repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
marcstreeter/ChatterBlog
|
app/shared/error.py
|
1
|
1203
|
class Errors(Exception):
__status_code = 400
__error_cat = "General Error"
__default_message = "System Error Occurred"
def __init__(self, message=None, status_code=None, payload=None):
Exception.__init__(self)
self.category = self.__error_cat
self.message = message or self.__default_message
self.status_code = status_code or self.__status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
rv['category'] = self.__error_cat
return rv
# System Errors
class SystemError(Errors):
__error_cat = "System Error"
__default_message = "Oops, Pardon us! An engineer has been notified and woken from slumber."
__status_code = 500
# User Errors
class UserErrors(Errors):
__error_cat = "User Error"
__default_message = "Error Caused By User"
class InvalidUsageError(UserErrors):
__default_message = "Invalid Usage"
class UnauthenticatedError(UserErrors):
__default_message = "Unauthenicated Usage"
__status_code = 401
class UnauthorizedError(UserErrors):
__default_message = "Unauthorized Usage"
__status_code = 403
|
apache-2.0
|
xaviercobain88/framework-python
|
build/lib.linux-i686-2.7/openerp/addons/account/project/wizard/account_analytic_cost_ledger_report.py
|
56
|
2042
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv, fields
class account_analytic_cost_ledger(osv.osv_memory):
_name = 'account.analytic.cost.ledger'
_description = 'Account Analytic Cost Ledger'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids',[]),
'model': 'account.analytic.account',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.analytic.account.cost_ledger',
'datas': datas,
}
account_analytic_cost_ledger()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
HireAnEsquire/django-rest-framework
|
rest_framework/versioning.py
|
81
|
6651
|
# coding: utf-8
from __future__ import unicode_literals
import re
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions
from rest_framework.compat import unicode_http_header
from rest_framework.reverse import _reverse
from rest_framework.settings import api_settings
from rest_framework.templatetags.rest_framework import replace_query_param
from rest_framework.utils.mediatypes import _MediaType
class BaseVersioning(object):
default_version = api_settings.DEFAULT_VERSION
allowed_versions = api_settings.ALLOWED_VERSIONS
version_param = api_settings.VERSION_PARAM
def determine_version(self, request, *args, **kwargs):
msg = '{cls}.determine_version() must be implemented.'
raise NotImplementedError(msg.format(
cls=self.__class__.__name__
))
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
return _reverse(viewname, args, kwargs, request, format, **extra)
def is_allowed_version(self, version):
if not self.allowed_versions:
return True
return (version == self.default_version) or (version in self.allowed_versions)
class AcceptHeaderVersioning(BaseVersioning):
"""
GET /something/ HTTP/1.1
Host: example.com
Accept: application/json; version=1.0
"""
invalid_version_message = _('Invalid version in "Accept" header.')
def determine_version(self, request, *args, **kwargs):
media_type = _MediaType(request.accepted_media_type)
version = media_type.params.get(self.version_param, self.default_version)
version = unicode_http_header(version)
if not self.is_allowed_version(version):
raise exceptions.NotAcceptable(self.invalid_version_message)
return version
# We don't need to implement `reverse`, as the versioning is based
# on the `Accept` header, not on the request URL.
class URLPathVersioning(BaseVersioning):
"""
To the client this is the same style as `NamespaceVersioning`.
The difference is in the backend - this implementation uses
Django's URL keyword arguments to determine the version.
An example URL conf for two views that accept two different versions.
urlpatterns = [
url(r'^(?P<version>[v1|v2]+)/users/$', users_list, name='users-list'),
url(r'^(?P<version>[v1|v2]+)/users/(?P<pk>[0-9]+)/$', users_detail, name='users-detail')
]
GET /1.0/something/ HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in URL path.')
def determine_version(self, request, *args, **kwargs):
version = kwargs.get(self.version_param, self.default_version)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
if request.version is not None:
kwargs = {} if (kwargs is None) else kwargs
kwargs[self.version_param] = request.version
return super(URLPathVersioning, self).reverse(
viewname, args, kwargs, request, format, **extra
)
class NamespaceVersioning(BaseVersioning):
"""
To the client this is the same style as `URLPathVersioning`.
The difference is in the backend - this implementation uses
Django's URL namespaces to determine the version.
An example URL conf that is namespaced into two seperate versions
# users/urls.py
urlpatterns = [
url(r'^/users/$', users_list, name='users-list'),
url(r'^/users/(?P<pk>[0-9]+)/$', users_detail, name='users-detail')
]
# urls.py
urlpatterns = [
url(r'^v1/', include('users.urls', namespace='v1')),
url(r'^v2/', include('users.urls', namespace='v2'))
]
GET /1.0/something/ HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in URL path.')
def determine_version(self, request, *args, **kwargs):
resolver_match = getattr(request, 'resolver_match', None)
if (resolver_match is None or not resolver_match.namespace):
return self.default_version
version = resolver_match.namespace
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
if request.version is not None:
viewname = self.get_versioned_viewname(viewname, request)
return super(NamespaceVersioning, self).reverse(
viewname, args, kwargs, request, format, **extra
)
def get_versioned_viewname(self, viewname, request):
return request.version + ':' + viewname
class HostNameVersioning(BaseVersioning):
"""
GET /something/ HTTP/1.1
Host: v1.example.com
Accept: application/json
"""
hostname_regex = re.compile(r'^([a-zA-Z0-9]+)\.[a-zA-Z0-9]+\.[a-zA-Z0-9]+$')
invalid_version_message = _('Invalid version in hostname.')
def determine_version(self, request, *args, **kwargs):
hostname, seperator, port = request.get_host().partition(':')
match = self.hostname_regex.match(hostname)
if not match:
return self.default_version
version = match.group(1)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
# We don't need to implement `reverse`, as the hostname will already be
# preserved as part of the REST framework `reverse` implementation.
class QueryParameterVersioning(BaseVersioning):
"""
GET /something/?version=0.1 HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in query parameter.')
def determine_version(self, request, *args, **kwargs):
version = request.query_params.get(self.version_param)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
url = super(QueryParameterVersioning, self).reverse(
viewname, args, kwargs, request, format, **extra
)
if request.version is not None:
return replace_query_param(url, self.version_param, request.version)
return url
|
bsd-2-clause
|
MattsFleaMarket/python-for-android
|
python-modules/twisted/twisted/test/test_adbapi.py
|
49
|
26501
|
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.enterprise.adbapi.
"""
from twisted.trial import unittest
import os, stat, new
from twisted.enterprise.adbapi import ConnectionPool, ConnectionLost, safe
from twisted.enterprise.adbapi import Connection, Transaction
from twisted.enterprise.adbapi import _unreleasedVersion
from twisted.internet import reactor, defer, interfaces
from twisted.python.failure import Failure
simple_table_schema = """
CREATE TABLE simple (
x integer
)
"""
class ADBAPITestBase:
"""Test the asynchronous DB-API code."""
openfun_called = {}
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Set up the database and create a connection pool pointing at it.
"""
self.startDB()
self.dbpool = self.makePool(cp_openfun=self.openfun)
self.dbpool.start()
def tearDown(self):
d = self.dbpool.runOperation('DROP TABLE simple')
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def openfun(self, conn):
self.openfun_called[conn] = True
def checkOpenfunCalled(self, conn=None):
if not conn:
self.failUnless(self.openfun_called)
else:
self.failUnless(self.openfun_called.has_key(conn))
def testPool(self):
d = self.dbpool.runOperation(simple_table_schema)
if self.test_failures:
d.addCallback(self._testPool_1_1)
d.addCallback(self._testPool_1_2)
d.addCallback(self._testPool_1_3)
d.addCallback(self._testPool_1_4)
d.addCallback(lambda res: self.flushLoggedErrors())
d.addCallback(self._testPool_2)
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
d.addCallback(self._testPool_6)
d.addCallback(self._testPool_7)
d.addCallback(self._testPool_8)
d.addCallback(self._testPool_9)
return d
def _testPool_1_1(self, res):
d = defer.maybeDeferred(self.dbpool.runQuery, "select * from NOTABLE")
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_2(self, res):
d = defer.maybeDeferred(self.dbpool.runOperation,
"deletexxx from NOTABLE")
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_3(self, res):
d = defer.maybeDeferred(self.dbpool.runInteraction,
self.bad_interaction)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_4(self, res):
d = defer.maybeDeferred(self.dbpool.runWithConnection,
self.bad_withConnection)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_2(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0, "Interaction not rolled back")
self.checkOpenfunCalled()
d.addCallback(_check)
return d
def _testPool_3(self, res):
sql = "select count(1) from simple"
inserts = []
# add some rows to simple table (runOperation)
for i in range(self.num_iterations):
sql = "insert into simple(x) values(%d)" % i
inserts.append(self.dbpool.runOperation(sql))
d = defer.gatherResults(inserts)
def _select(res):
# make sure they were added (runQuery)
sql = "select x from simple order by x";
d = self.dbpool.runQuery(sql)
return d
d.addCallback(_select)
def _check(rows):
self.failUnless(len(rows) == self.num_iterations,
"Wrong number of rows")
for i in range(self.num_iterations):
self.failUnless(len(rows[i]) == 1, "Wrong size row")
self.failUnless(rows[i][0] == i, "Values not returned.")
d.addCallback(_check)
return d
def _testPool_4(self, res):
# runInteraction
d = self.dbpool.runInteraction(self.interaction)
d.addCallback(lambda res: self.assertEquals(res, "done"))
return d
def _testPool_5(self, res):
# withConnection
d = self.dbpool.runWithConnection(self.withConnection)
d.addCallback(lambda res: self.assertEquals(res, "done"))
return d
def _testPool_6(self, res):
# Test a withConnection cannot be closed
d = self.dbpool.runWithConnection(self.close_withConnection)
return d
def _testPool_7(self, res):
# give the pool a workout
ds = []
for i in range(self.num_iterations):
sql = "select x from simple where x = %d" % i
ds.append(self.dbpool.runQuery(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
def _check(result):
for i in range(self.num_iterations):
self.failUnless(result[i][1][0][0] == i, "Value not returned")
dlist.addCallback(_check)
return dlist
def _testPool_8(self, res):
# now delete everything
ds = []
for i in range(self.num_iterations):
sql = "delete from simple where x = %d" % i
ds.append(self.dbpool.runOperation(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
return dlist
def _testPool_9(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0,
"Didn't successfully delete table contents")
self.checkConnect()
d.addCallback(_check)
return d
def checkConnect(self):
"""Check the connect/disconnect synchronous calls."""
conn = self.dbpool.connect()
self.checkOpenfunCalled(conn)
curs = conn.cursor()
curs.execute("insert into simple(x) values(1)")
curs.execute("select x from simple")
res = curs.fetchall()
self.failUnlessEqual(len(res), 1)
self.failUnlessEqual(len(res[0]), 1)
self.failUnlessEqual(res[0][0], 1)
curs.execute("delete from simple")
curs.execute("select x from simple")
self.failUnlessEqual(len(curs.fetchall()), 0)
curs.close()
self.dbpool.disconnect(conn)
def interaction(self, transaction):
transaction.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = transaction.fetchone()
self.failUnless(len(row) == 1, "Wrong size row")
self.failUnless(row[0] == i, "Value not returned.")
# should test this, but gadfly throws an exception instead
#self.failUnless(transaction.fetchone() is None, "Too many rows")
return "done"
def bad_interaction(self, transaction):
if self.can_rollback:
transaction.execute("insert into simple(x) values(0)")
transaction.execute("select * from NOTABLE")
def withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = curs.fetchone()
self.failUnless(len(row) == 1, "Wrong size row")
self.failUnless(row[0] == i, "Value not returned.")
# should test this, but gadfly throws an exception instead
#self.failUnless(transaction.fetchone() is None, "Too many rows")
finally:
curs.close()
return "done"
def close_withConnection(self, conn):
conn.close()
def bad_withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select * from NOTABLE")
finally:
curs.close()
class ReconnectTestBase:
"""Test the asynchronous DB-API code with reconnect."""
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Skip the test if C{good_sql} is unavailable. Otherwise, set up the
database, create a connection pool pointed at it, and set up a simple
schema in it.
"""
if self.good_sql is None:
raise unittest.SkipTest('no good sql for reconnect test')
self.startDB()
self.dbpool = self.makePool(cp_max=1, cp_reconnect=True,
cp_good_sql=self.good_sql)
self.dbpool.start()
return self.dbpool.runOperation(simple_table_schema)
def tearDown(self):
d = self.dbpool.runOperation('DROP TABLE simple')
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def testPool(self):
d = defer.succeed(None)
d.addCallback(self._testPool_1)
d.addCallback(self._testPool_2)
if not self.early_reconnect:
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
return d
def _testPool_1(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_2(self, res):
# reach in and close the connection manually
self.dbpool.connections.values()[0].close()
def _testPool_3(self, res):
sql = "select count(1) from simple"
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_4(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.failUnless(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_5(self, res):
self.flushLoggedErrors()
sql = "select * from NOTABLE" # bad sql
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: self.failIf(f.check(ConnectionLost)))
return d
class DBTestConnector:
"""A class which knows how to test for the presence of
and establish a connection to a relational database.
To enable test cases which use a central, system database,
you must create a database named DB_NAME with a user DB_USER
and password DB_PASS with full access rights to database DB_NAME.
"""
TEST_PREFIX = None # used for creating new test cases
DB_NAME = "twisted_test"
DB_USER = 'twisted_test'
DB_PASS = 'twisted_test'
DB_DIR = None # directory for database storage
nulls_ok = True # nulls supported
trailing_spaces_ok = True # trailing spaces in strings preserved
can_rollback = True # rollback supported
test_failures = True # test bad sql?
escape_slashes = True # escape \ in sql?
good_sql = ConnectionPool.good_sql
early_reconnect = True # cursor() will fail on closed connection
can_clear = True # can try to clear out tables when starting
num_iterations = 50 # number of iterations for test loops
# (lower this for slow db's)
def setUp(self):
self.DB_DIR = self.mktemp()
os.mkdir(self.DB_DIR)
if not self.can_connect():
raise unittest.SkipTest('%s: Cannot access db' % self.TEST_PREFIX)
return self.extraSetUp()
def can_connect(self):
"""Return true if this database is present on the system
and can be used in a test."""
raise NotImplementedError()
def startDB(self):
"""Take any steps needed to bring database up."""
pass
def stopDB(self):
"""Bring database down, if needed."""
pass
def makePool(self, **newkw):
"""Create a connection pool with additional keyword arguments."""
args, kw = self.getPoolArgs()
kw = kw.copy()
kw.update(newkw)
return ConnectionPool(*args, **kw)
def getPoolArgs(self):
"""Return a tuple (args, kw) of list and keyword arguments
that need to be passed to ConnectionPool to create a connection
to this database."""
raise NotImplementedError()
class GadflyConnector(DBTestConnector):
TEST_PREFIX = 'Gadfly'
nulls_ok = False
can_rollback = False
escape_slashes = False
good_sql = 'select * from simple where 1=0'
num_iterations = 1 # slow
def can_connect(self):
try: import gadfly
except: return False
if not getattr(gadfly, 'connect', None):
gadfly.connect = gadfly.gadfly
return True
def startDB(self):
import gadfly
conn = gadfly.gadfly()
conn.startup(self.DB_NAME, self.DB_DIR)
# gadfly seems to want us to create something to get the db going
cursor = conn.cursor()
cursor.execute("create table x (x integer)")
conn.commit()
conn.close()
def getPoolArgs(self):
args = ('gadfly', self.DB_NAME, self.DB_DIR)
kw = {'cp_max': 1}
return args, kw
class SQLiteConnector(DBTestConnector):
TEST_PREFIX = 'SQLite'
escape_slashes = False
num_iterations = 1 # slow
def can_connect(self):
try: import sqlite
except: return False
return True
def startDB(self):
self.database = os.path.join(self.DB_DIR, self.DB_NAME)
if os.path.exists(self.database):
os.unlink(self.database)
def getPoolArgs(self):
args = ('sqlite',)
kw = {'database': self.database, 'cp_max': 1}
return args, kw
class PyPgSQLConnector(DBTestConnector):
TEST_PREFIX = "PyPgSQL"
def can_connect(self):
try: from pyPgSQL import PgSQL
except: return False
try:
conn = PgSQL.connect(database=self.DB_NAME, user=self.DB_USER,
password=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('pyPgSQL.PgSQL',)
kw = {'database': self.DB_NAME, 'user': self.DB_USER,
'password': self.DB_PASS, 'cp_min': 0}
return args, kw
class PsycopgConnector(DBTestConnector):
TEST_PREFIX = 'Psycopg'
def can_connect(self):
try: import psycopg
except: return False
try:
conn = psycopg.connect(database=self.DB_NAME, user=self.DB_USER,
password=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('psycopg',)
kw = {'database': self.DB_NAME, 'user': self.DB_USER,
'password': self.DB_PASS, 'cp_min': 0}
return args, kw
class MySQLConnector(DBTestConnector):
TEST_PREFIX = 'MySQL'
trailing_spaces_ok = False
can_rollback = False
early_reconnect = False
def can_connect(self):
try: import MySQLdb
except: return False
try:
conn = MySQLdb.connect(db=self.DB_NAME, user=self.DB_USER,
passwd=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('MySQLdb',)
kw = {'db': self.DB_NAME, 'user': self.DB_USER, 'passwd': self.DB_PASS}
return args, kw
class FirebirdConnector(DBTestConnector):
TEST_PREFIX = 'Firebird'
test_failures = False # failure testing causes problems
escape_slashes = False
good_sql = None # firebird doesn't handle failed sql well
can_clear = False # firebird is not so good
num_iterations = 5 # slow
def can_connect(self):
try: import kinterbasdb
except: return False
try:
self.startDB()
self.stopDB()
return True
except:
return False
def startDB(self):
import kinterbasdb
self.DB_NAME = os.path.join(self.DB_DIR, DBTestConnector.DB_NAME)
os.chmod(self.DB_DIR, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
sql = 'create database "%s" user "%s" password "%s"'
sql %= (self.DB_NAME, self.DB_USER, self.DB_PASS);
conn = kinterbasdb.create_database(sql)
conn.close()
def getPoolArgs(self):
args = ('kinterbasdb',)
kw = {'database': self.DB_NAME, 'host': '127.0.0.1',
'user': self.DB_USER, 'password': self.DB_PASS}
return args, kw
def stopDB(self):
import kinterbasdb
conn = kinterbasdb.connect(database=self.DB_NAME,
host='127.0.0.1', user=self.DB_USER,
password=self.DB_PASS)
conn.drop_database()
def makeSQLTests(base, suffix, globals):
"""
Make a test case for every db connector which can connect.
@param base: Base class for test case. Additional base classes
will be a DBConnector subclass and unittest.TestCase
@param suffix: A suffix used to create test case names. Prefixes
are defined in the DBConnector subclasses.
"""
connectors = [GadflyConnector, SQLiteConnector, PyPgSQLConnector,
PsycopgConnector, MySQLConnector, FirebirdConnector]
for connclass in connectors:
name = connclass.TEST_PREFIX + suffix
klass = new.classobj(name, (connclass, base, unittest.TestCase), base.__dict__)
globals[name] = klass
# GadflyADBAPITestCase SQLiteADBAPITestCase PyPgSQLADBAPITestCase
# PsycopgADBAPITestCase MySQLADBAPITestCase FirebirdADBAPITestCase
makeSQLTests(ADBAPITestBase, 'ADBAPITestCase', globals())
# GadflyReconnectTestCase SQLiteReconnectTestCase PyPgSQLReconnectTestCase
# PsycopgReconnectTestCase MySQLReconnectTestCase FirebirdReconnectTestCase
makeSQLTests(ReconnectTestBase, 'ReconnectTestCase', globals())
class DeprecationTestCase(unittest.TestCase):
"""
Test deprecations in twisted.enterprise.adbapi
"""
def test_safe(self):
"""
Test deprecation of twisted.enterprise.adbapi.safe()
"""
result = self.callDeprecated(_unreleasedVersion,
safe, "test'")
# make sure safe still behaves like the original
self.assertEqual(result, "test''")
class FakePool(object):
"""
A fake L{ConnectionPool} for tests.
@ivar connectionFactory: factory for making connections returned by the
C{connect} method.
@type connectionFactory: any callable
"""
reconnect = True
noisy = True
def __init__(self, connectionFactory):
self.connectionFactory = connectionFactory
def connect(self):
"""
Return an instance of C{self.connectionFactory}.
"""
return self.connectionFactory()
def disconnect(self, connection):
"""
Do nothing.
"""
class ConnectionTestCase(unittest.TestCase):
"""
Tests for the L{Connection} class.
"""
def test_rollbackErrorLogged(self):
"""
If an error happens during rollback, L{ConnectionLost} is raised but
the original error is logged.
"""
class ConnectionRollbackRaise(object):
def rollback(self):
raise RuntimeError("problem!")
pool = FakePool(ConnectionRollbackRaise)
connection = Connection(pool)
self.assertRaises(ConnectionLost, connection.rollback)
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
class TransactionTestCase(unittest.TestCase):
"""
Tests for the L{Transaction} class.
"""
def test_reopenLogErrorIfReconnect(self):
"""
If the cursor creation raises an error in L{Transaction.reopen}, it
reconnects but log the error occurred.
"""
class ConnectionCursorRaise(object):
count = 0
def reconnect(self):
pass
def cursor(self):
if self.count == 0:
self.count += 1
raise RuntimeError("problem!")
pool = FakePool(None)
transaction = Transaction(pool, ConnectionCursorRaise())
transaction.reopen()
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
class NonThreadPool(object):
def callInThreadWithCallback(self, onResult, f, *a, **kw):
success = True
try:
result = f(*a, **kw)
except Exception, e:
success = False
result = Failure()
onResult(success, result)
class DummyConnectionPool(ConnectionPool):
"""
A testable L{ConnectionPool};
"""
threadpool = NonThreadPool()
def __init__(self):
"""
Don't forward init call.
"""
self.reactor = reactor
class EventReactor(object):
"""
Partial L{IReactorCore} implementation with simple event-related
methods.
@ivar _running: A C{bool} indicating whether the reactor is pretending
to have been started already or not.
@ivar triggers: A C{list} of pending system event triggers.
"""
def __init__(self, running):
self._running = running
self.triggers = []
def callWhenRunning(self, function):
if self._running:
function()
else:
return self.addSystemEventTrigger('after', 'startup', function)
def addSystemEventTrigger(self, phase, event, trigger):
handle = (phase, event, trigger)
self.triggers.append(handle)
return handle
def removeSystemEventTrigger(self, handle):
self.triggers.remove(handle)
class ConnectionPoolTestCase(unittest.TestCase):
"""
Unit tests for L{ConnectionPool}.
"""
def test_runWithConnectionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runWithConnection} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise(object):
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
def raisingFunction(connection):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
d = pool.runWithConnection(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_closeLogError(self):
"""
L{ConnectionPool._close} logs exceptions.
"""
class ConnectionCloseRaise(object):
def close(self):
raise RuntimeError("problem!")
pool = DummyConnectionPool()
pool._close(ConnectionCloseRaise())
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
def test_runWithInteractionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runInteraction} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise(object):
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
class DummyTransaction(object):
def __init__(self, pool, connection):
pass
def raisingFunction(transaction):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
pool.transactionFactory = DummyTransaction
d = pool.runInteraction(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_unstartedClose(self):
"""
If L{ConnectionPool.close} is called without L{ConnectionPool.start}
having been called, the pool's startup event is cancelled.
"""
reactor = EventReactor(False)
pool = ConnectionPool('twisted.test.test_adbapi', cp_reactor=reactor)
# There should be a startup trigger waiting.
self.assertEquals(reactor.triggers, [('after', 'startup', pool._start)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
def test_startedClose(self):
"""
If L{ConnectionPool.close} is called after it has been started, but
not by its shutdown trigger, the shutdown trigger is cancelled.
"""
reactor = EventReactor(True)
pool = ConnectionPool('twisted.test.test_adbapi', cp_reactor=reactor)
# There should be a shutdown trigger waiting.
self.assertEquals(reactor.triggers, [('during', 'shutdown', pool.finalClose)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
|
apache-2.0
|
p4datasystems/CarnotKE
|
jyhton/lib-python/2.7/test/test_generators.py
|
72
|
50768
|
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print i
1
2
>>> g = f()
>>> g.next()
1
>>> g.next()
2
"Falling off the end" stops the generator:
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> g.next() # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(g2())
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print "creator", r.next()
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print "caller", i
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = me.next()
... yield i
>>> me = g()
>>> me.next()
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print list(f1())
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(f2())
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> k.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> k.next() # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print list(f())
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = range(1, 5)
>>> for k in range(len(seq) + 2):
... print "%d-combs of %s:" % (k, seq)
... for c in gcomb(seq, k):
... print " ", c
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<type 'function'>
>>> i = g()
>>> type(i)
<type 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'next', 'send', 'throw']
>>> from test.test_support import HAVE_DOCSTRINGS
>>> print(i.next.__doc__ if HAVE_DOCSTRINGS else 'x.next() -> the next value, or raise StopIteration')
x.next() -> the next value, or raise StopIteration
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<type 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
TypeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> me.next()
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: [email protected]
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return self.generator.next()
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.WichmannHill(42)
>>> while 1:
... for s in sets:
... print "%s->%s" % (s, s.find()),
... print
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print "merged", s1, "into", s2
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged D into G
A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged C into F
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged L into A
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M
merged H into E
A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged B into E
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged J into G
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M
merged E into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M
merged M into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G
merged I into K
A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G
merged K into A
A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G
merged F into A
A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
merged A into G
A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [g.next() for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = ints.next()
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = g.next()
... nh = h.next()
... while 1:
... if ng < nh:
... yield ng
... ng = g.next()
... elif ng > nh:
... yield nh
... nh = h.next()
... else:
... yield ng
... ng = g.next()
... nh = h.next()
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print firstn(result, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.next
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print [m235[j] for j in range(15*i, 15*(i+1))]
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def tail(g):
... g.next() # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print firstn(it, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def _fib():
... yield 1
... yield 2
... fibTail.next() # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
>>> def f():
... return 22
... yield 1
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[0]>, line 3)
>>> def f():
... yield 1
... return 22
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[1]>, line 3)
"return None" is not the same as "return" in a generator:
>>> def f():
... yield 1
... return None
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[2]>, line 3)
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<type 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<type 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... lambda x: x # shouldn't trigger here
... return # or here
... def f(i):
... return 2*i # or here
... if 0:
... return 3 # but *this* sucks (line 8)
... if 0:
... yield 2 # because it's a generator (line 10)
Traceback (most recent call last):
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[24]>, line 10)
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print g.next()
0
>>> print g.next()
1
>>> print g.next()
2
>>> print g.next()
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.func_code
True
>>> g.next()
5
>>> g.next()
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.func_code
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().next
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1L << j) | # column ordinal
(1L << (n + i-j + n-1)) | # NW-SE ordinal
(1L << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print sep
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print "|" + "|".join(squares) + "|"
print sep
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print sep
for i in range(m):
row = squares[i]
print "|" + "|".join(row) + "|"
print sep
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print c
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print n, len(all), all[0] == [0] * n, all[-1] == [1] * n
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print count, "solutions in all."
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print (yield 1)
... yield 2
>>> g = f()
>>> g.next()
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<type 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> c.next()
>>> print seq
[]
>>> c.send(10)
>>> print seq
[10]
>>> c.send(10)
>>> print seq
[10, 20]
>>> c.send(10)
>>> print seq
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[21]>", line 1
SyntaxError: 'yield' outside function
>>> def f(): return lambda x=(yield): 1
Traceback (most recent call last):
...
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.coroutine[22]>, line 1)
>>> def f(): x = yield = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[23]>", line 1
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[24]>", line 1
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[25]>", line 1
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print (yield)
... except ValueError,v:
... print "caught ValueError (%s)" % (v),
>>> import sys
>>> g = f()
>>> g.next()
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print g.gi_frame
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
>>> f().throw("abc") # throw on just-opened generator
Traceback (most recent call last):
...
TypeError: exceptions must be classes, or instances, not str
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print "exiting"
>>> g = f()
>>> g.next()
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> g.next()
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print "exiting"
>>> g = f()
>>> g.next()
>>> del g
exiting
>>> class context(object):
... def __enter__(self): pass
... def __exit__(self, *args): print 'exiting'
>>> def f():
... with context():
... yield
>>> g = f()
>>> g.next()
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception: print 'except'
... finally: print 'finally'
>>> g = f()
>>> g.next()
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, StringIO
>>> old, sys.stderr = sys.stderr, StringIO.StringIO()
>>> g = f()
>>> g.next()
>>> del g
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError: 'generator ignored GeneratorExit' in "
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<type 'generator'>
>>> def f(): x = yield
>>> type(f())
<type 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<type 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<type 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<type 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def next(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = it.next()
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, StringIO
>>> old = sys.stderr
>>> try:
... sys.stderr = StringIO.StringIO()
... class Leaker:
... def __del__(self):
... raise RuntimeError
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... err.startswith(
... "Exception RuntimeError: RuntimeError() in <"
... )
... err.endswith("> ignored")
... len(err.splitlines())
... finally:
... sys.stderr = old
True
True
1
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import test_support, test_generators
test_support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
apache-2.0
|
lokeshjindal15/gem5_transform
|
tests/configs/pc-simple-timing.py
|
52
|
2346
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from x86_generic import *
root = LinuxX86FSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_x64,
cpu_class=TimingSimpleCPU).create_root()
|
bsd-3-clause
|
ruibarreira/linuxtrail
|
usr/lib/python2.7/encodings/cp500.py
|
593
|
13377
|
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-3.0
|
Senseg/Py4A
|
python3-alpha/extra_modules/pyxmpp2/mainloop/glib.py
|
46
|
12001
|
#
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""GLib main loop integration.
"""
__docformat__ = "restructuredtext en"
import inspect
import sys
import logging
import glib
import functools
from .interfaces import HandlerReady, PrepareAgain
from .base import MainLoopBase
logger = logging.getLogger("pyxmpp2.mainloop.glib")
def hold_exception(method):
"""Decorator for glib callback methods of GLibMainLoop used to store the
exception raised."""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
"""Wrapper for methods decorated with `hold_exception`."""
# pylint: disable=W0703
try:
return method(self, *args, **kwargs)
except Exception:
if self.exc_info:
raise
if not self._stack:
logger.debug('@hold_exception wrapped method {0!r} called'
' from outside of the main loop'.format(method))
raise
self.exc_info = sys.exc_info()
logger.debug("exception in glib main loop callback:",
exc_info = self.exc_info)
# pylint: disable=W0212
main_loop = self._stack[-1]
if main_loop is not None:
main_loop.quit()
return False
return wrapper
class GLibMainLoop(MainLoopBase):
"""Wrapper for the GLib main event loop.
"""
# pylint: disable=R0902
def __init__(self, settings = None, handlers = None):
self._unprepared_handlers = {}
self._io_sources = {}
self._timer_sources = {}
self._prepare_sources = {}
self._stack = []
self.exc_info = None
self._anything_done = False
self._unprepared_pending = set()
MainLoopBase.__init__(self, settings, handlers)
def __del__(self):
for tag in list(self._prepare_sources.values()):
glib.source_remove(tag)
for tag in list(self._io_sources.values()):
glib.source_remove(tag)
for tag in list(self._timer_sources.values()):
glib.source_remove(tag)
def _add_io_handler(self, handler):
"""Add an I/O handler to the loop."""
self._unprepared_handlers[handler] = None
self._configure_io_handler(handler)
def _configure_io_handler(self, handler):
"""Register an io-handler with the glib main loop."""
if self.check_events():
return
if handler in self._unprepared_handlers:
old_fileno = self._unprepared_handlers[handler]
prepared = self._prepare_io_handler(handler)
else:
old_fileno = None
prepared = True
fileno = handler.fileno()
if old_fileno is not None and fileno != old_fileno:
tag = self._io_sources.pop(handler, None)
if tag is not None:
glib.source_remove(tag)
if not prepared:
self._unprepared_handlers[handler] = fileno
if fileno is None:
logger.debug(" {0!r}.fileno() is None, not polling"
.format(handler))
return
events = 0
if handler.is_readable():
logger.debug(" {0!r} readable".format(handler))
events |= glib.IO_IN | glib.IO_ERR
if handler.is_writable():
logger.debug(" {0!r} writable".format(handler))
events |= glib.IO_OUT | glib.IO_HUP | glib.IO_ERR
if events:
logger.debug(" registering {0!r} handler fileno {1} for"
" events {2}".format(handler, fileno, events))
glib.io_add_watch(fileno, events, self._io_callback, handler)
@hold_exception
def _io_callback(self, fileno, condition, handler):
"""Called by glib on I/O event."""
# pylint: disable=W0613
self._anything_done = True
logger.debug("_io_callback called for {0!r}, cond: {1}".format(handler,
condition))
try:
if condition & glib.IO_HUP:
handler.handle_hup()
if condition & glib.IO_IN:
handler.handle_read()
elif condition & glib.IO_ERR:
handler.handle_err()
if condition & glib.IO_OUT:
handler.handle_write()
if self.check_events():
return False
finally:
self._io_sources.pop(handler, None)
self._configure_io_handler(handler)
self._prepare_pending()
return False
def _prepare_io_handler(self, handler):
"""Call the `interfaces.IOHandler.prepare` method and
remove the handler from unprepared handler list when done.
"""
logger.debug(" preparing handler: {0!r}".format(handler))
self._unprepared_pending.discard(handler)
ret = handler.prepare()
logger.debug(" prepare result: {0!r}".format(ret))
if isinstance(ret, HandlerReady):
del self._unprepared_handlers[handler]
prepared = True
elif isinstance(ret, PrepareAgain):
if ret.timeout == 0:
tag = glib.idle_add(self._prepare_io_handler_cb, handler)
self._prepare_sources[handler] = tag
elif ret.timeout is not None:
timeout = ret.timeout
timeout = int(timeout * 1000)
if not timeout:
timeout = 1
tag = glib.timeout_add(timeout, self._prepare_io_handler_cb,
handler)
self._prepare_sources[handler] = tag
else:
self._unprepared_pending.add(handler)
prepared = False
else:
raise TypeError("Unexpected result type from prepare()")
return prepared
def _prepare_pending(self):
"""Prepare pending handlers.
"""
if not self._unprepared_pending:
return
for handler in list(self._unprepared_pending):
self._configure_io_handler(handler)
self.check_events()
@hold_exception
def _prepare_io_handler_cb(self, handler):
"""Timeout callback called to try prepare an IOHandler again."""
self._anything_done = True
logger.debug("_prepar_io_handler_cb called for {0!r}".format(handler))
self._configure_io_handler(handler)
self._prepare_sources.pop(handler, None)
return False
def _remove_io_handler(self, handler):
"""Remove an i/o-handler."""
if handler in self._unprepared_handlers:
del self._unprepared_handlers[handler]
tag = self._prepare_sources.pop(handler, None)
if tag is not None:
glib.source_remove(tag)
tag = self._io_sources.pop(handler, None)
if tag is not None:
glib.source_remove(tag)
def _add_timeout_handler(self, handler):
"""Add a `TimeoutHandler` to the main loop."""
# pylint: disable=W0212
for dummy, method in inspect.getmembers(handler, callable):
if not hasattr(method, "_pyxmpp_timeout"):
continue
tag = glib.timeout_add(int(method._pyxmpp_timeout * 1000),
self._timeout_cb, method)
self._timer_sources[method] = tag
def _remove_timeout_handler(self, handler):
"""Remove `TimeoutHandler` from the main loop."""
for dummy, method in inspect.getmembers(handler, callable):
if not hasattr(method, "_pyxmpp_timeout"):
continue
tag = self._timer_sources.pop(method, None)
if tag is not None:
glib.source_remove(tag)
@hold_exception
def _timeout_cb(self, method):
"""Call the timeout handler due.
"""
self._anything_done = True
logger.debug("_timeout_cb() called for: {0!r}".format(method))
result = method()
# pylint: disable=W0212
rec = method._pyxmpp_recurring
if rec:
self._prepare_pending()
return True
if rec is None and result is not None:
logger.debug(" auto-recurring, restarting in {0} s"
.format(result))
tag = glib.timeout_add(int(result * 1000), self._timeout_cb, method)
self._timer_sources[method] = tag
else:
self._timer_sources.pop(method, None)
self._prepare_pending()
return False
def loop(self, timeout = None):
main_loop = glib.MainLoop()
self._stack.append(main_loop)
try:
self._prepare_pending()
if timeout is None:
logger.debug("Calling main_loop.run()")
main_loop.run()
logger.debug("..main_loop.run() exited")
else:
tag = glib.timeout_add(int(timeout * 1000),
self._loop_timeout_cb, main_loop)
try:
logger.debug("Calling main_loop.run()")
main_loop.run()
logger.debug("..main_loop.run() exited")
finally:
glib.source_remove(tag)
finally:
self._stack.pop()
if self.exc_info:
(exc_type, exc_value, ext_stack), self.exc_info = (self.exc_info,
None)
raise exc_type(exc_value).with_traceback(ext_stack)
def loop_iteration(self, timeout = 1):
self._stack.append(None)
try:
if self.check_events():
return
self._prepare_pending()
def dummy_cb():
"Dummy callback function to force event if none are pending."
self._anything_done = True
logger.debug("Dummy timeout func called")
return False
if not glib.main_context_default().pending():
glib.timeout_add(int(timeout * 1000), dummy_cb)
self._anything_done = False
logger.debug("Calling main_context_default().iteration()")
while not self._anything_done:
glib.main_context_default().iteration(True)
logger.debug("..main_context_default().iteration() exited")
finally:
self._stack.pop()
if self.exc_info:
(exc_type, exc_value, ext_stack), self.exc_info = (self.exc_info,
None)
raise exc_type(exc_value).with_traceback(ext_stack)
def _loop_timeout_cb(self, main_loop):
"""Stops the loop after the time specified in the `loop` call.
"""
self._anything_done = True
logger.debug("_loop_timeout_cb() called")
main_loop.quit()
def check_events(self):
result = MainLoopBase.check_events(self)
if result:
main_loop = self._stack[-1]
if main_loop:
main_loop.quit()
return result
|
apache-2.0
|
easycoin-core/Easycoin
|
qa/rpc-tests/p2p-segwit.py
|
9
|
92403
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
'''
SegWit p2p test.
'''
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
# Note: we can reduce code by using SingleNodeConnCB (in master, not 0.12)
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.05
self.getdataset = set()
self.last_reject = None
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
self.last_getdata = message
def on_pong(self, conn, message):
self.last_pong = message
def on_reject(self, conn, message):
self.last_reject = message
#print (message)
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_getdata != None
self.sync(test_function, timeout)
def wait_for_inv(self, expected_inv, timeout=60):
test_function = lambda: self.last_inv != expected_inv
self.sync(test_function, timeout)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_getdata = None
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
return
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_getdata = None
if use_header:
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getdata()
return
def announce_block(self, block, use_header):
with mininode_lock:
self.last_getdata = None
if use_header:
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_block = None
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_reject.reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1"]))
# Start a node for testing IsStandard rules.
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
connect_nodes(self.nodes[0], 1)
# Disable segwit's bip9 parameter to simulate upgrading after activation.
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
connect_nodes(self.nodes[0], 2)
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=4):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
print("\tVerifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
print("\tTesting non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
print("\tTesting behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_reject.reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
print("\tTesting witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
print("\tTesting witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
print("\tTesting witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
print("\tTesting extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
print("\tTesting maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
print("\tTesting maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
print("\tTesting witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_getdata.inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
print("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-100000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv(CInv(1, tx3.sha256))
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
print("\tTesting block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
self.old_node.announce_block(block4, use_header=False)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)]
tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
print("\tTesting standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_reject.reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
print("\tTesting premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
print("\tTesting segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
print("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
print("\tTesting P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node, node_id):
print("\tTesting software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
stop_node(node, node_id)
self.nodes[node_id] = start_node(node_id, self.options.tmpdir, ["-debug"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(node, 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = node.getblockcount()
while height >= 0:
block_hash = node.getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
print("\tTesting sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs));
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
print("\tTesting getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we should not be signalling
# for segwit activation, nor should we get a witness commitment.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# TODO: this duplicates some code from blocktools.py, would be nice
# to refactor.
# Check that default_witness_commitment is present.
block = CBlock()
witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
script = CScript([OP_RETURN, output_data])
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
print("\tTesting uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
print("\tTesting detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
print("\nStarting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
print("\nTesting behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
print("\nTesting behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
|
mit
|
FrancescAlted/blaze
|
blaze/io/server/crossdomain.py
|
6
|
1834
|
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
from ... import py2help
def crossdomain(origin=None, methods=None, headers=None,
automatic_headers=True,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, py2help.basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, py2help.basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if automatic_headers and h.get('Access-Control-Request-Headers'):
h['Access-Control-Allow-Headers'] = h['Access-Control-Request-Headers']
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
|
bsd-3-clause
|
eduNEXT/edx-platform
|
cms/djangoapps/contentstore/management/commands/sync_courses.py
|
3
|
2362
|
"""
Sync courses from catalog service. This is used to setup a master's
integration environment.
"""
import logging
from textwrap import dedent
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.management.base import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from cms.djangoapps.contentstore.management.commands.utils import user_from_str
from cms.djangoapps.contentstore.views.course import create_new_course_in_store
from openedx.core.djangoapps.catalog.utils import get_course_runs
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import DuplicateCourseError
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Command to populate modulestore with courses from the discovery service.
Example: ./manage.py cms sync_courses [email protected]
"""
help = dedent(__doc__).strip()
def add_arguments(self, parser):
parser.add_argument('instructor')
def get_user(self, user):
"""
Return a User object.
"""
try:
user_object = user_from_str(user)
except User.DoesNotExist:
raise CommandError(f"No user {user} found.") # lint-amnesty, pylint: disable=raise-missing-from
return user_object
def handle(self, *args, **options):
"""Execute the command"""
instructor = self.get_user(options['instructor'])
course_runs = get_course_runs()
for course_run in course_runs:
course_key = CourseKey.from_string(course_run.get('key'))
fields = {
"display_name": course_run.get('title')
}
try:
new_course = create_new_course_in_store(
ModuleStoreEnum.Type.split,
instructor,
course_key.org,
course_key.course,
course_key.run,
fields,
)
logger.info(f"Created {str(new_course.id)}")
except DuplicateCourseError:
logger.warning(
"Course already exists for %s, %s, %s. Skipping",
course_key.org,
course_key.course,
course_key.run,
)
|
agpl-3.0
|
doordash/realms-wiki
|
realms/modules/github/views.py
|
1
|
1935
|
from flask import (
current_app, render_template, redirect, Blueprint, flash, url_for, g
)
from flask.ext.github import GitHubError
from flask.ext.login import login_user, logout_user
from realms import github
from realms.modules.github.models import User
blueprint = Blueprint('github', __name__)
@blueprint.route("/login")
def login():
return render_template("github/login.html")
@blueprint.route("/github/authorize")
def authorize():
"""
Redirect user to Github to authorize
"""
BASE_URL = current_app.config['BASE_URL']
redirect_uri = '{}{}'.format(BASE_URL, url_for('github.authorized'))
return github.authorize(
scope='user:email,read:org',
redirect_uri=redirect_uri
)
@blueprint.route('/github/callback')
@github.authorized_handler
def authorized(oauth_token):
"""
Callback from Github, will call with a oauth_token
"""
if oauth_token is None:
flash("Authorization failed.")
return redirect(url_for(current_app.config['ROOT_ENDPOINT']))
g.github_access_token = oauth_token
github_user = github.get('user')
emails = github.get('user/emails')
# Get the primary email
email = None
for e in emails:
if e['primary']:
email = e['email']
break
username = github_user['login']
# Check membership of organization
# TODO add GITHUB_AUTHORIZED_USERNAMES or something
org = current_app.config['GITHUB_AUTHORIZED_ORG']
membership_url = 'orgs/{}/members/{}'.format(org, username)
try:
github.get(membership_url)
except GitHubError:
flash('Not org member')
else:
user = User(username, email=email)
login_user(user, remember=True)
return redirect(url_for(current_app.config['ROOT_ENDPOINT']))
@blueprint.route("/logout")
def logout():
logout_user()
return redirect(url_for(current_app.config['ROOT_ENDPOINT']))
|
gpl-2.0
|
gsehub/edx-platform
|
lms/djangoapps/discussion_api/tests/test_permissions.py
|
9
|
5119
|
"""
Tests for discussion API permission logic
"""
import itertools
import ddt
from nose.plugins.attrib import attr
from discussion_api.permissions import (
can_delete,
get_editable_fields,
get_initializable_comment_fields,
get_initializable_thread_fields
)
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from lms.lib.comment_client.user import User
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
def _get_context(requester_id, is_requester_privileged, is_cohorted=False, thread=None):
"""Return a context suitable for testing the permissions module"""
return {
"cc_requester": User(id=requester_id),
"is_requester_privileged": is_requester_privileged,
"course": CourseFactory(cohort_config={"cohorted": is_cohorted}),
"discussion_division_enabled": is_cohorted,
"thread": thread,
}
@attr(shard=8)
@ddt.ddt
class GetInitializableFieldsTest(ModuleStoreTestCase):
"""Tests for get_*_initializable_fields"""
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_thread(self, is_privileged, is_cohorted):
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
is_cohorted=is_cohorted
)
actual = get_initializable_thread_fields(context)
expected = {
"abuse_flagged", "course_id", "following", "raw_body", "read", "title", "topic_id", "type", "voted"
}
if is_privileged and is_cohorted:
expected |= {"group_id"}
self.assertEqual(actual, expected)
@ddt.data(*itertools.product([True, False], ["question", "discussion"], [True, False]))
@ddt.unpack
def test_comment(self, is_thread_author, thread_type, is_privileged):
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
thread=Thread(user_id="5" if is_thread_author else "6", thread_type=thread_type)
)
actual = get_initializable_comment_fields(context)
expected = {
"abuse_flagged", "parent_id", "raw_body", "thread_id", "voted"
}
if (is_thread_author and thread_type == "question") or is_privileged:
expected |= {"endorsed"}
self.assertEqual(actual, expected)
@attr(shard=8)
@ddt.ddt
class GetEditableFieldsTest(ModuleStoreTestCase):
"""Tests for get_editable_fields"""
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_thread(self, is_author, is_privileged, is_cohorted):
thread = Thread(user_id="5" if is_author else "6", type="thread")
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
is_cohorted=is_cohorted
)
actual = get_editable_fields(thread, context)
expected = {"abuse_flagged", "following", "read", "voted"}
if is_author or is_privileged:
expected |= {"topic_id", "type", "title", "raw_body"}
if is_privileged and is_cohorted:
expected |= {"group_id"}
self.assertEqual(actual, expected)
@ddt.data(*itertools.product([True, False], [True, False], ["question", "discussion"], [True, False]))
@ddt.unpack
def test_comment(self, is_author, is_thread_author, thread_type, is_privileged):
comment = Comment(user_id="5" if is_author else "6", type="comment")
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
thread=Thread(user_id="5" if is_thread_author else "6", thread_type=thread_type)
)
actual = get_editable_fields(comment, context)
expected = {"abuse_flagged", "voted"}
if is_author or is_privileged:
expected |= {"raw_body"}
if (is_thread_author and thread_type == "question") or is_privileged:
expected |= {"endorsed"}
self.assertEqual(actual, expected)
@attr(shard=8)
@ddt.ddt
class CanDeleteTest(ModuleStoreTestCase):
"""Tests for can_delete"""
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_thread(self, is_author, is_privileged):
thread = Thread(user_id="5" if is_author else "6")
context = _get_context(requester_id="5", is_requester_privileged=is_privileged)
self.assertEqual(can_delete(thread, context), is_author or is_privileged)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_comment(self, is_author, is_thread_author, is_privileged):
comment = Comment(user_id="5" if is_author else "6")
context = _get_context(
requester_id="5",
is_requester_privileged=is_privileged,
thread=Thread(user_id="5" if is_thread_author else "6")
)
self.assertEqual(can_delete(comment, context), is_author or is_privileged)
|
agpl-3.0
|
jsoref/django
|
tests/template_backends/test_jinja2.py
|
315
|
3048
|
# Since this package contains a "jinja2" directory, this is required to
# silence an ImportWarning warning on Python 2.
from __future__ import absolute_import
from unittest import skipIf
from django.template import TemplateSyntaxError
from .test_dummy import TemplateStringsTests
try:
import jinja2
except ImportError:
jinja2 = None
Jinja2 = None
else:
from django.template.backends.jinja2 import Jinja2
@skipIf(jinja2 is None, "this test requires jinja2")
class Jinja2Tests(TemplateStringsTests):
engine_class = Jinja2
backend_name = 'jinja2'
options = {'keep_trailing_newline': True}
def test_origin(self):
template = self.engine.get_template('template_backends/hello.html')
self.assertTrue(template.origin.name.endswith('hello.html'))
self.assertEqual(template.origin.template_name, 'template_backends/hello.html')
def test_origin_from_string(self):
template = self.engine.from_string('Hello!\n')
self.assertEqual(template.origin.name, '<template>')
self.assertEqual(template.origin.template_name, None)
def test_self_context(self):
"""
Using 'self' in the context should not throw errors (#24538).
"""
# self will be overridden to be a TemplateReference, so the self
# variable will not come through. Attempting to use one though should
# not throw an error.
template = self.engine.from_string('hello {{ foo }}!')
content = template.render(context={'self': 'self', 'foo': 'world'})
self.assertEqual(content, 'hello world!')
def test_exception_debug_info_min_context(self):
with self.assertRaises(TemplateSyntaxError) as e:
self.engine.get_template('template_backends/syntax_error.html')
debug = e.exception.template_debug
self.assertEqual(debug['after'], '')
self.assertEqual(debug['before'], '')
self.assertEqual(debug['during'], '{% block %}')
self.assertEqual(debug['bottom'], 1)
self.assertEqual(debug['top'], 0)
self.assertEqual(debug['line'], 1)
self.assertEqual(debug['total'], 1)
self.assertEqual(len(debug['source_lines']), 1)
self.assertTrue(debug['name'].endswith('syntax_error.html'))
self.assertTrue('message' in debug)
def test_exception_debug_info_max_context(self):
with self.assertRaises(TemplateSyntaxError) as e:
self.engine.get_template('template_backends/syntax_error2.html')
debug = e.exception.template_debug
self.assertEqual(debug['after'], '')
self.assertEqual(debug['before'], '')
self.assertEqual(debug['during'], '{% block %}')
self.assertEqual(debug['bottom'], 26)
self.assertEqual(debug['top'], 5)
self.assertEqual(debug['line'], 16)
self.assertEqual(debug['total'], 31)
self.assertEqual(len(debug['source_lines']), 21)
self.assertTrue(debug['name'].endswith('syntax_error2.html'))
self.assertTrue('message' in debug)
|
bsd-3-clause
|
juharris/tensorflow
|
tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py
|
5
|
4520
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Confusion matrix related metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
def confusion_matrix(predictions, labels, num_classes=None, dtype=dtypes.int32,
name=None, weights=None):
"""Computes the confusion matrix from predictions and labels.
Calculate the Confusion Matrix for a pair of prediction and
label 1-D int arrays.
The matrix rows represent the prediction labels and the columns
represents the real labels. The confusion matrix is always a 2-D array
of shape `[n, n]`, where `n` is the number of valid labels for a given
classification task. Both prediction and labels must be 1-D arrays of
the same shape in order for this function to work.
If `num_classes` is None, then `num_classes` will be set to the one plus
the maximum value in either predictions or labels.
Class labels are expected to start at 0. E.g., if `num_classes` was
three, then the possible labels would be `[0, 1, 2]`.
If `weights` is not `None`, then each prediction contributes its
corresponding weight to the total value of the confusion matrix cell.
For example:
```python
tf.contrib.metrics.confusion_matrix([1, 2, 4], [2, 2, 4]) ==>
[[0 0 0 0 0]
[0 0 1 0 0]
[0 0 1 0 0]
[0 0 0 0 0]
[0 0 0 0 1]]
```
Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`,
resulting in a 5x5 confusion matrix.
Args:
predictions: A 1-D array representing the predictions for a given
classification.
labels: A 1-D representing the real labels for the classification task.
num_classes: The possible number of labels the classification task can
have. If this value is not provided, it will be calculated
using both predictions and labels array.
dtype: Data type of the confusion matrix.
name: Scope name.
weights: An optional `Tensor` whose shape matches `predictions`.
Returns:
A k X k matrix representing the confusion matrix, where k is the number of
possible labels in the classification task.
Raises:
ValueError: If both predictions and labels are not 1-D vectors and have
mismatched shapes, or if `weights` is not `None` and its shape doesn't
match `predictions`.
"""
with ops.name_scope(name, 'confusion_matrix',
[predictions, labels, num_classes]) as name:
predictions, labels = tensor_util.remove_squeezable_dimensions(
ops.convert_to_tensor(
predictions, name='predictions'),
ops.convert_to_tensor(labels, name='labels'))
predictions = math_ops.cast(predictions, dtypes.int64)
labels = math_ops.cast(labels, dtypes.int64)
if num_classes is None:
num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
math_ops.reduce_max(labels)) + 1
if weights is not None:
predictions.get_shape().assert_is_compatible_with(weights.get_shape())
weights = math_ops.cast(weights, dtype)
shape = array_ops.pack([num_classes, num_classes])
indices = array_ops.transpose(array_ops.pack([predictions, labels]))
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
cm_sparse = ops.SparseTensor(
indices=indices, values=values, shape=math_ops.to_int64(shape))
zero_matrix = array_ops.zeros(math_ops.to_int32(shape), dtype)
return sparse_ops.sparse_add(zero_matrix, cm_sparse)
|
apache-2.0
|
simonmikkelsen/facebook-downloader
|
fbdownload/groupdownloader.py
|
1
|
2371
|
from fbdownload.downloader import FacebookDownloader
class FacebookGroupDownloader(FacebookDownloader):
'''
Downloads data from a Facebook group, including events.
Images are not downloaded.
In this package of classes, that is done by the
FacebookHtmlExporter, even though a stand alone download
function would be nice.
'''
def __init__(self, groupId, access_token):
'''
Creates a new instance.
:param groupId: the ID of the group to download. Use th
FacebookGroupLister to get it.
:param access_token: The Facebook access token.
'''
FacebookDownloader.__init__(self, access_token)
self.groupId = groupId
self.lightEvents = False
def download(self, downloadEvents = True):
'''
Download everything.
'''
if downloadEvents:
self.downloadEvents()
elif self.verbose > 0:
print "Do not download events."
self.downloadGroup()
return self.data
def downloadGroup(self):
'''
Download the groups feed.
'''
url = "https://graph.facebook.com/%s/feed" % self.groupId
return self.downloadData(url, 'group.posts')
def downloadEvents(self):
'''
Download the groups events.
'''
url = "https://graph.facebook.com/%s/events" % self.groupId
objects = []
events = self.downloadData(url, 'group.events')
for event in events:
eventContents = {}
url = "https://graph.facebook.com/%s/" % event['id']
eventContents['event'] = self.downloadData(url, multipleElements = False)
if self.lightEvents:
pagesToGet = ['feed', 'attending', 'photos']
else:
pagesToGet = ['feed', 'attending', 'declined', 'invited', 'maybe', 'noreply', 'photos', 'videos']
for action in pagesToGet:
url = "https://graph.facebook.com/%s/%s/" % (event['id'], action)
eventContents[action] = self.downloadData(url, multipleElements = True)
objects.append(eventContents)
# TODO Download, photos, videos here and not in the exporter.
# url = "https://graph.facebook.com/%s/picture/" % event['id']
self.data['events'] = objects
if self.jsonFile != None:
self.saveDatasets()
return objects
def setLightEvents(self, lightEvents):
'''
When True, only the most important attributes of an event are downloaded.
'''
self.lightEvents = lightEvents
|
mit
|
mpeuster/estate
|
experiments/scaleability-fixed/pox/pox/info/switch_info.py
|
46
|
2469
|
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dumps info about switches when they first connect
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
log = core.getLogger()
# Formatted switch descriptions we've logged
# (We rememeber them so that we only print them once)
_switches = set()
# .. unless always is True in which case we always print them
_always = False
def _format_entry (desc):
def fmt (v):
if not v: return "<Empty>"
return str(v)
dpid = dpid_to_str(desc.connection.dpid)
ofp = desc.ofp.body
s = []
ports = [(p.port_no,p.name) for p in desc.connection.ports.values()]
ports.sort()
ports = " ".join(p[1] for p in ports)
#if len(ports) > len(dpid)+12:
# ports = "%s ports" % (len(desc.connection.ports),)
s.append("New Switch: " + dpid)
s.append("Hardware: " + fmt(ofp.hw_desc))
s.append("Software: " + fmt(ofp.sw_desc))
s.append("SerialNum: " + fmt(ofp.serial_num))
s.append("Desc: " + fmt(ofp.dp_desc))
s.append("Ports: " + fmt(ports))
# Let's get fancy
width = max(len(line) for line in s)
s.insert(0, "=" * width)
s.insert(2, "-" * width)
s.append( "=" * width)
return "\n".join(s)
def _handle_ConnectionUp (event):
msg = of.ofp_stats_request(body=of.ofp_desc_stats_request())
msg.type = 0 # For betta bug, can be removed
event.connection.send(msg)
def _handle_SwitchDescReceived (event):
s = _format_entry(event)
if not _always and s in _switches:
# We've already logged it.
return
_switches.add(s)
ss = s.split("\n")
logger = core.getLogger("info." + dpid_to_str(event.connection.dpid))
for s in ss:
logger.info(s)
def launch (always = False):
global _always
_always = always
core.openflow.addListenerByName("ConnectionUp",
_handle_ConnectionUp)
core.openflow.addListenerByName("SwitchDescReceived",
_handle_SwitchDescReceived)
|
apache-2.0
|
samtx/whatsmyrankine
|
venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py
|
286
|
18718
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb'); f.read(skip)
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
|
mit
|
liu602348184/django
|
docs/conf.py
|
188
|
11919
|
# -*- coding: utf-8 -*-
#
# Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import sys
from os.path import abspath, dirname, join
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"ticket_role",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if 'spelling' in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = 'en_US'
# Location of word list.
spelling_word_list_filename = 'spelling_wordlist'
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Django'
copyright = 'Django Software Foundation and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9'
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep386ver = get_version()
if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep386ver:
return pep386ver + '.dev'
return pep386ver
release = django_release()
# The "development version" of Django
django_next_version = '1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ['locale/']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'sphinx': ('http://sphinx-doc.org/', None),
'six': ('http://pythonhosted.org/six/', None),
'formtools': ('http://django-formtools.readthedocs.org/en/latest/', None),
'psycopg2': ('http://initd.org/psycopg/docs/', None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# HTML translator class for the builder
html_translator_class = "djangodocs.DjangoHTMLTranslator"
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Djangodoc'
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-core-mentorship| replace:: :ref:`django-core-mentorship <django-core-mentorship-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
"""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'preamble': ('\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}'
'\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}')
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
('contents', 'django.tex', 'Django Documentation',
'Django Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('ref/django-admin', 'django-admin', 'Utility script for the Django Web framework', ['Django Software Foundation'], 1),
]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [(
master_doc, "django", "", "", "Django",
"Documentation of the Django framework", "Web development", False
)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = 'Django Software Foundation'
epub_publisher = 'Django Software Foundation'
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = 'djangodocs-epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# -- ticket options ------------------------------------------------------------
ticket_url = 'https://code.djangoproject.com/ticket/%s'
|
bsd-3-clause
|
EclecticIQ/OpenTAXII
|
opentaxii/taxii/services/collection_management.py
|
2
|
2914
|
from libtaxii.constants import (
SVC_COLLECTION_MANAGEMENT,
MSG_COLLECTION_INFORMATION_REQUEST, MSG_FEED_INFORMATION_REQUEST,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST,
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST,
)
from .abstract import TAXIIService
from .handlers import (
CollectionInformationRequestHandler,
SubscriptionRequestHandler
)
class CollectionManagementService(TAXIIService):
handlers = {
MSG_COLLECTION_INFORMATION_REQUEST:
CollectionInformationRequestHandler,
MSG_FEED_INFORMATION_REQUEST:
CollectionInformationRequestHandler,
}
subscription_handlers = {
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST:
SubscriptionRequestHandler,
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST:
SubscriptionRequestHandler
}
service_type = SVC_COLLECTION_MANAGEMENT
subscription_message = "Default subscription message"
subscription_supported = True
def __init__(self, subscription_supported=True, subscription_message=None,
**kwargs):
super(CollectionManagementService, self).__init__(**kwargs)
self.subscription_message = subscription_message
self.subscription_supported = subscription_supported
if self.subscription_supported:
self.handlers = dict(CollectionManagementService.handlers)
self.handlers.update(
CollectionManagementService.subscription_handlers)
@property
def advertised_collections(self):
return self.server.persistence.get_collections(self.id)
def get_collection(self, name):
return self.server.persistence.get_collection(name, self.id)
def get_push_methods(self, collection):
# Push delivery is not implemented
pass
def get_polling_services(self, collection):
return self.server.get_services_for_collection(collection, 'poll')
def get_subscription_services(self, collection):
services = []
all_services = self.server.get_services_for_collection(
collection, 'collection_management')
for s in all_services:
if s.subscription_supported:
services.append(s)
return services
def create_subscription(self, subscription):
subscription.subscription_id = self.generate_id()
return self.server.persistence.create_subscription(subscription)
def get_subscription(self, subscription_id):
return self.server.persistence.get_subscription(subscription_id)
def get_subscriptions(self):
return self.server.persistence.get_subscriptions(service_id=self.id)
def update_subscription(self, subscription):
return self.server.persistence.update_subscription(subscription)
def get_receiving_inbox_services(self, collection):
return self.server.get_services_for_collection(collection, 'inbox')
|
bsd-3-clause
|
bmanojlovic/ansible
|
lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup.py
|
32
|
6998
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vmware_dvs_portgroup
short_description: Create or remove a Distributed vSwitch portgroup
description:
- Create or remove a Distributed vSwitch portgroup
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
portgroup_name:
description:
- The name of the portgroup that is to be created or deleted
required: True
switch_name:
description:
- The name of the distributed vSwitch the port group should be created on.
required: True
vlan_id:
description:
- The VLAN ID that should be configured with the portgroup
required: True
num_ports:
description:
- The number of ports the portgroup should contain
required: True
portgroup_type:
description:
- See VMware KB 1022312 regarding portgroup types
required: True
choices:
- 'earlyBinding'
- 'lateBinding'
- 'ephemeral'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create Management portgroup
local_action:
module: vmware_dvs_portgroup
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
portgroup_name: Management
switch_name: dvSwitch
vlan_id: 123
num_ports: 120
portgroup_type: earlyBinding
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareDvsPortgroup(object):
def __init__(self, module):
self.module = module
self.dvs_portgroup = None
self.switch_name = self.module.params['switch_name']
self.portgroup_name = self.module.params['portgroup_name']
self.vlan_id = self.module.params['vlan_id']
self.num_ports = self.module.params['num_ports']
self.portgroup_type = self.module.params['portgroup_type']
self.dv_switch = None
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvspg_states = {
'absent': {
'present': self.state_destroy_dvspg,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvspg,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvspg,
}
}
dvspg_states[self.state][self.check_dvspg_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_port_group(self):
config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
config.name = self.portgroup_name
config.numPorts = self.num_ports
# vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation
# but this is the correct managed object type.
config.defaultPortConfig = vim.VMwareDVSPortSetting()
# vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the
# pyvmomi documentation but this is the correct managed object type
config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()
config.defaultPortConfig.vlan.inherited = False
config.defaultPortConfig.vlan.vlanId = self.vlan_id
config.type = self.portgroup_type
spec = [config]
task = self.dv_switch.AddDVPortgroup_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_destroy_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
task = self.dvs_portgroup.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_dvspg(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
changed, result = self.create_port_group()
self.module.exit_json(changed=changed, result=str(result))
def check_dvspg_state(self):
self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
if self.dv_switch is None:
raise Exception("A distributed virtual switch with name %s does not exist" % self.switch_name)
self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name)
if self.dvs_portgroup is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='int'),
num_ports=dict(required=True, type='int'),
portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvs_portgroup = VMwareDvsPortgroup(module)
vmware_dvs_portgroup.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
chand3040/cloud_that
|
lms/djangoapps/shoppingcart/models.py
|
16
|
89388
|
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE
from util.query import use_read_replica_if_available
from xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from microsite_configuration import microsite
from shoppingcart.pdf import PDFInvoice
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) # pylint: disable=invalid-name
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=no-member
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists() # pylint: disable=no-member
else:
items = self.orderitem_set.all().select_subclasses() # pylint: disable=no-member
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all(): # pylint: disable=no-member
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete() # pylint: disable=no-member
@transaction.commit_on_success
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all() # pylint: disable=no-member
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(self, cart_item.paidcourseregistration.course_id, cart_item.qty)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(self, cart_item.courseregcodeitem.course_id)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id), # pylint: disable=no-member
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_info = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(getattr(item, 'course_id'), depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_info.append((course.display_name, ' (' + course.start_datetime_text() + '-' + course.end_datetime_text() + ')'))
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_info
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, courses_info):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, getattr(self.user, 'email'), 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
courses_names_with_dates = [course_info[0] + course_info[1] for course_info in courses_info]
joined_course_names = " " + ", ".join(courses_names_with_dates)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = microsite.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join([course_info[0] for course_info in courses_info]),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(username=self.user.username, email=getattr(self.user, 'email')), # pylint: disable=no-member
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL),
'payment_email_signature': microsite.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'Receipt.pdf', pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=no-member
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
courses_info = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, courses_info = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, courses_info
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.FEATURES.get('SEGMENT_IO_LMS') and settings.SEGMENT_IO_LMS_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, { # pylint: disable=no-member
'orderId': self.id, # pylint: disable=no-member
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id) # pylint: disable=no-member
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all(): # pylint: disable=no-member
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all(): # pylint: disable=no-member
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.commit_on_success
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id, # pylint: disable=no-member
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount')) # pylint: disable=no-member
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id), # pylint: disable=no-member
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d") # pylint: disable=no-member
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum('amount')
) # pylint: disable=no-member
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username, # pylint: disable=no-member
'last_modified_by': self.last_modified_by.username # pylint: disable=no-member
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta: # pylint: disable=missing-docstring,old-style-class
get_latest_by = "timestamp"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(default=datetime.now(pytz.utc))
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(default=datetime.now(pytz.utc), null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_query_set(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_query_set().filter(is_active=True)
def get_query_set(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_query_set()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(default=datetime.now(pytz.utc))
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = getattr(item, 'course_id')
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if getattr(item, 'course_id'):
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum('unit_cost', field='qty * unit_cost')
) # pylint: disable=no-member
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=no-member
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=no-member
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum('unit_cost', field='qty * unit_cost')
) # pylint: disable=no-member
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=no-member
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=no-member
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.error(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
def additional_instruction_text(self):
verification_reminder = ""
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment() # pylint: disable=E1101
if is_enrollment_mode_verified:
domain = microsite.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
refund_reminder = _(
"You have up to two weeks into the course to unenroll and receive a full refund."
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=no-member
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
pass
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=settings.PLATFORM_NAME)
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(platform_name=settings.PLATFORM_NAME)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=no-member
else:
data['name'] = settings.PLATFORM_NAME
data['category'] = settings.PLATFORM_NAME
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
|
agpl-3.0
|
djeo94/CouchPotatoServer
|
couchpotato/core/notifications/nmj.py
|
75
|
4379
|
import re
import telnetlib
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__)
autoload = 'NMJ'
class NMJ(Notification):
# noinspection PyMissingConstructor
def __init__(self):
addApiView(self.testNotifyName(), self.test)
addApiView('notify.nmj.auto_config', self.autoConfig)
addEvent('renamer.after', self.addToLibrary)
def autoConfig(self, host = 'localhost', **kwargs):
mount = ''
try:
terminal = telnetlib.Telnet(host)
except Exception:
log.error('Warning: unable to get a telnet session to %s', host)
return self.failed()
log.debug('Connected to %s via telnet', host)
terminal.read_until('sh-3.00# ')
terminal.write('cat /tmp/source\n')
terminal.write('cat /tmp/netshare\n')
terminal.write('exit\n')
tnoutput = terminal.read_all()
match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput)
if match:
database = match.group(1)
device = match.group(2)
log.info('Found NMJ database %s on device %s', (database, device))
else:
log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host)
return self.failed()
if device.startswith('NETWORK_SHARE/'):
match = re.search('.*(?=\r\n?%s)' % (re.escape(device[14:])), tnoutput)
if match:
mount = match.group().replace('127.0.0.1', host)
log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount)
else:
log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url')
return self.failed()
return {
'success': True,
'database': database,
'mount': mount,
}
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
host = self.conf('host')
mount = self.conf('mount')
database = self.conf('database')
if mount:
log.debug('Try to mount network drive via url: %s', mount)
try:
self.urlopen(mount)
except:
return False
params = {
'arg0': 'scanner_start',
'arg1': database,
'arg2': 'background',
'arg3': '',
}
params = tryUrlencode(params)
update_url = 'http://%(host)s:8008/metadata_database?%(params)s' % {'host': host, 'params': params}
try:
response = self.urlopen(update_url)
except:
return False
try:
et = etree.fromstring(response)
result = et.findtext('returnValue')
except SyntaxError as e:
log.error('Unable to parse XML returned from the Popcorn Hour: %s', e)
return False
if int(result) > 0:
log.error('Popcorn Hour returned an errorcode: %s', result)
return False
else:
log.info('NMJ started background scan')
return True
def failed(self):
return {
'success': False
}
def test(self, **kwargs):
return {
'success': self.addToLibrary()
}
config = [{
'name': 'nmj',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'nmj',
'label': 'NMJ',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'host',
'default': 'localhost',
},
{
'name': 'database',
},
{
'name': 'mount',
},
],
}
],
}]
|
gpl-3.0
|
42cc/p2psafety
|
p2psafety_django/events/jabber/clients.py
|
1
|
9829
|
import logging
import traceback
import threading
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
from sleekxmpp.xmlstream import ET
from lxml import etree
from users.utils import get_api_key
logger = logging.getLogger('events.jabber')
class ClientException(Exception):
pass
class BaseConfig(object):
"""
Base class for jabber clients configurations.
Extend and add new fields to __slots__ class variable, so the only these
fields can be set on an config object.
"""
__slots__ = ('jid', 'password')
@contextmanager
def _handle_missing_keys(self):
try:
yield
except KeyError, e:
message = 'Key %s not found within client config' % e.args[1]
raise ImproperlyConfigured(message)
def __init__(self, config_dict):
with self._handle_missing_keys():
self.jid = config_dict['JID']
self.password = config_dict['PASSWORD']
class BaseClient(object):
"""
Lightweight wrapper for :class:`sleekxmpp.ClientXMPP` client.
Example of usage::
with BaseClient({...}) as client:
client.my_method(...)
or::
client = BaseClient({...})
client.connect()
client.my_method(...)
client.disconnect()
"""
base_required_plugins = 30, # Service discovery
Config = BaseConfig
def __init__(self, config_dict):
self.config = self.Config(config_dict)
self._client = self._create_xmpp_client(self.config.jid, self.config.password)
required_plugins = set(self.base_required_plugins + self.required_plugins)
for plugin_num in required_plugins:
self._client.register_plugin(self._get_plugin_name(plugin_num))
self._client.add_event_handler('failed_auth', self._on_failed_auth, threaded=True)
self._client.add_event_handler('session_start', self._on_start, threaded=True)
self._on_auth_event = threading.Event()
self._on_start_event = threading.Event()
def _create_xmpp_client(self, *args, **kwargs):
return ClientXMPP(*args, **kwargs)
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, tb):
self.disconnect()
if exc_value:
traceback.print_exc()
raise exc_value
def _on_start(self, event):
logger.debug('session has been started')
self._authorized = True
self._on_auth_event.set()
self._on_start_event.set()
def _on_failed_auth(self, event):
self._authorized = False
self._on_auth_event.set()
def _get_plugin_name(self, plugin_num):
return 'xep_' + str(plugin_num).rjust(4, '0')
def get_plugin(self, plugin_num):
return self._client[self._get_plugin_name(plugin_num)]
@property
def discovery(self):
return self.get_plugin(30)
@property
def is_connected(self):
return self._client.authenticated
def connect(self):
logger.debug('connecting as %s', self.config.jid)
if self._client.connect():
logger.debug('connected')
self._client.process(block=False)
# Make sure we got authorized
self._on_auth_event.wait()
if self._authorized:
# Make sure session was started
self._on_start_event.wait()
else:
message = 'Server has rejected the provided login credentials'
raise ClientException(message)
else:
logger.error('failed to connect')
def disconnect(self):
self._client.disconnect()
logger.debug('disconnected')
class UsersClient(BaseClient):
required_plugins = 133, # Administration service
@property
def _admin(self):
return self.get_plugin(133)
@property
def _adhoc(self):
return self.get_plugin(50)
def synchronize_accounts(self):
"""
Creates jabber accounts for registered users.
"""
node, jid = 'all users', self._client.boundjid.server
try:
logger.debug('sending "get items" to %s node %s', jid, node)
items = self.discovery.get_items(jid=jid, node=node, block=True)
except IqError as e:
logging.error("Entity returned an error: %s" % e.iq['error']['condition'])
except IqTimeout:
logging.error("No response received.")
else:
registered_users = User.objects.only('id', 'username').order_by('id')
registered_jids = [item[0] for item in items['disco_items']['items']]
registered_jids_names = [j.split('@')[0] for j in registered_jids]
name_user_map = dict((u.username, u) for u in registered_users)
users_to_create = [name_user_map[name] for name in name_user_map
if name not in registered_jids_names]
if users_to_create:
logger.debug('%d jabber profiles are missing', len(users_to_create))
created_count = sum(map(self.create_account, users_to_create))
logger.info('created %d accounts', created_count)
else:
logger.debug('no need to create additional accounts')
logger.info('synchronize completed')
def create_account(self, user):
"""
Creates jabber account for given user.
Uses `user.username` as jid and user's api pkey as password.
Returns True if account was created successfully and False otherwise.
:type user: `django.contrib.auth.models.User`
:rtype: True or False
"""
jabber_username = "{}@{}".format(user.username, settings.XMPP_SERVER)
jabber_password = get_api_key(user).key
logger.debug('creating account for "%s" with jid=%s passsword=%s',
user.username, jabber_username, jabber_password)
# Both current and process loop threads have access to this variable
shared_result = dict(result=False)
# We use event to be able to return result within current thread
on_done_event = threading.Event()
def process_form(iq, session):
form = iq['command']['form']
answers = {
'accountjid': jabber_username,
'password': jabber_password,
'password-verify': jabber_password,
'FORM_TYPE': form['fields']['FORM_TYPE']['value']
}
form['type'] = 'submit'
form['values'] = answers
session['next'] = command_success
session['payload'] = form
self._adhoc.complete_command(session)
def command_success(iq, session):
logger.debug('success')
shared_result['result'] = True
on_done_event.set()
def command_error(iq, session):
code, text = iq['error']['code'], iq['error']['text']
logger.error('could not create account: %s %s', code, text)
self._adhoc.terminate_command(session)
on_done_event.set()
session = dict(next=process_form, error=command_error)
self._admin.add_user(session=session)
on_done_event.wait()
return shared_result['result']
class EventsNotifierClient(BaseClient):
required_plugins = (
59, # Result Set Management
60, # Publish-subscribe
)
class Config(BaseConfig):
__slots__ = ('pubsub_server', 'node_name')
def __init__(self, config_dict):
super(type(self), self).__init__(config_dict)
with self._handle_missing_keys():
self.pubsub_server = config_dict['PUBSUB_SERVER']
self.node_name = config_dict['NODE_NAME']
@property
def _pubsub(self):
return self.get_plugin(60)
def publish(self, event, radius):
from events.api.resources.jabber import EventResource
resource = EventResource()
event_dict = resource.full_dehydrate(resource.build_bundle(obj=event))
event_dict.data['radius'] = radius
str_payload = resource.serialize(None, event_dict, 'application/xml')
payload = ET.fromstring(str_payload)
if logger.level is logging.DEBUG:
lxml_payload = etree.fromstring(ET.tostring(payload))
str_payload = etree.tostring(lxml_payload, pretty_print=True)
logger.debug('sending publish message with payload:\n%s', str_payload)
self._pubsub.publish(self.config.pubsub_server,
self.config.node_name,
payload=payload)
def get_client(ClientClassOrName):
"""
Constructs client object using proper settings by given class object
or name.
:type ClientClassOrName: type or basestring
:rtype: BaseClient
"""
if isinstance(ClientClassOrName, basestring):
ClientClass = globals()[ClientClassOrName]
elif isinstance(ClientClassOrName, type):
ClientClass = ClientClassOrName
else:
raise TypeError(ClientClassOrName)
config_dict = {
'JID': settings.XMPP_ADMIN_JID,
'PASSWORD': settings.XMPP_ADMIN_PASSWORD,
}
if ClientClass is EventsNotifierClient:
config_dict.update({
'PUBSUB_SERVER': settings.XMPP_PUBSUB_SERVER,
'NODE_NAME': settings.XMPP_EVENTS_NOTIFICATION_NODE,
})
elif ClientClass is UsersClient:
pass
else:
raise Exception('No such client')
return ClientClass(config_dict)
|
apache-2.0
|
maruen/yowsup.jlguardi
|
yowsup/layers/axolotl/store/sqlite/liteaxolotlstore.py
|
17
|
3414
|
from axolotl.state.axolotlstore import AxolotlStore
from .liteidentitykeystore import LiteIdentityKeyStore
from .liteprekeystore import LitePreKeyStore
from .litesessionstore import LiteSessionStore
from .litesignedprekeystore import LiteSignedPreKeyStore
from .litesenderkeystore import LiteSenderKeyStore
import sqlite3
class LiteAxolotlStore(AxolotlStore):
def __init__(self, db):
conn = sqlite3.connect(db, check_same_thread=False)
conn.text_factory = bytes
self.identityKeyStore = LiteIdentityKeyStore(conn)
self.preKeyStore = LitePreKeyStore(conn)
self.signedPreKeyStore = LiteSignedPreKeyStore(conn)
self.sessionStore = LiteSessionStore(conn)
self.senderKeyStore = LiteSenderKeyStore(conn)
def getIdentityKeyPair(self):
return self.identityKeyStore.getIdentityKeyPair()
def storeLocalData(self, registrationId, identityKeyPair):
self.identityKeyStore.storeLocalData(registrationId, identityKeyPair)
def getLocalRegistrationId(self):
return self.identityKeyStore.getLocalRegistrationId()
def saveIdentity(self, recepientId, identityKey):
self.identityKeyStore.saveIdentity(recepientId, identityKey)
def isTrustedIdentity(self, recepientId, identityKey):
return self.identityKeyStore.isTrustedIdentity(recepientId, identityKey)
def loadPreKey(self, preKeyId):
return self.preKeyStore.loadPreKey(preKeyId)
def loadPreKeys(self):
return self.preKeyStore.loadPendingPreKeys()
def storePreKey(self, preKeyId, preKeyRecord):
self.preKeyStore.storePreKey(preKeyId, preKeyRecord)
def containsPreKey(self, preKeyId):
return self.preKeyStore.containsPreKey(preKeyId)
def removePreKey(self, preKeyId):
self.preKeyStore.removePreKey(preKeyId)
def loadSession(self, recepientId, deviceId):
return self.sessionStore.loadSession(recepientId, deviceId)
def getSubDeviceSessions(self, recepientId):
return self.sessionStore.getSubDeviceSessions(recepientId)
def storeSession(self, recepientId, deviceId, sessionRecord):
self.sessionStore.storeSession(recepientId, deviceId, sessionRecord)
def containsSession(self, recepientId, deviceId):
return self.sessionStore.containsSession(recepientId, deviceId)
def deleteSession(self, recepientId, deviceId):
self.sessionStore.deleteSession(recepientId, deviceId)
def deleteAllSessions(self, recepientId):
self.sessionStore.deleteAllSessions(recepientId)
def loadSignedPreKey(self, signedPreKeyId):
return self.signedPreKeyStore.loadSignedPreKey(signedPreKeyId)
def loadSignedPreKeys(self):
return self.signedPreKeyStore.loadSignedPreKeys()
def storeSignedPreKey(self, signedPreKeyId, signedPreKeyRecord):
self.signedPreKeyStore.storeSignedPreKey(signedPreKeyId, signedPreKeyRecord)
def containsSignedPreKey(self, signedPreKeyId):
return self.signedPreKeyStore.containsSignedPreKey(signedPreKeyId)
def removeSignedPreKey(self, signedPreKeyId):
self.signedPreKeyStore.removeSignedPreKey(signedPreKeyId)
def loadSenderKey(self, senderKeyName):
return self.senderKeyStore.loadSenderKey(senderKeyName)
def storeSenderKey(self, senderKeyName, senderKeyRecord):
self.senderKeyStore.storeSenderKey(senderKeyName, senderKeyRecord)
|
gpl-3.0
|
keakon/gunicorn
|
setup.py
|
12
|
2916
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from gunicorn import __version__
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
# read long description
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
# read dev requirements
fname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')
with open(fname) as f:
tests_require = [l.strip() for l in f.readlines()]
if sys.version_info[:2] < (3, 3):
tests_require.append('mock')
if sys.version_info[:2] < (2, 7):
tests_require.append('unittest2')
class PyTestCommand(TestCommand):
user_options = [
("cov", None, "measure coverage")
]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
if self.cov:
self.test_args += ['--cov', 'gunicorn']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='gunicorn',
version=__version__,
description='WSGI HTTP Server for UNIX',
long_description=long_description,
author='Benoit Chesneau',
author_email='[email protected]',
license='MIT',
url='http://gunicorn.org',
classifiers=CLASSIFIERS,
zip_safe=False,
packages=find_packages(exclude=['examples', 'tests']),
include_package_data=True,
tests_require=tests_require,
cmdclass={'test': PyTestCommand},
entry_points="""
[console_scripts]
gunicorn=gunicorn.app.wsgiapp:run
gunicorn_django=gunicorn.app.djangoapp:run
gunicorn_paster=gunicorn.app.pasterapp:run
[paste.server_runner]
main=gunicorn.app.pasterapp:paste_server
"""
)
|
mit
|
QianBIG/odoo
|
addons/account_analytic_plans/account_analytic_plans.py
|
143
|
23352
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
class one2many_mod2(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
res = {}
for id in ids:
res[id] = []
ids2 = None
if 'journal_id' in context:
journal = obj.pool.get('account.journal').browse(cr, user, context['journal_id'], context=context)
pnum = int(name[7]) -1
plan = journal.plan_id
if plan and len(plan.plan_ids) > pnum:
acc_id = plan.plan_ids[pnum].root_analytic_id.id
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids),('analytic_account_id','child_of',[acc_id])], limit=self._limit)
if ids2 is None:
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids)], limit=self._limit)
for r in obj.pool[self._obj].read(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
key = r[self._fields_id]
if isinstance(key, tuple):
# Read return a tuple in the case where the field is a many2one
# but we want to get the id of this field.
key = key[0]
res[key].append( r['id'] )
return res
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
def _get_amount(self, cr, uid, ids, name, args, context=None):
res = {}
for id in ids:
res.setdefault(id, 0.0)
for line in self.browse(cr, uid, ids, context=context):
amount = line.move_id and line.move_id.amount_currency * (line.percentage / 100) or 0.0
res[line.id] = amount
return res
_columns = {
'amount_currency': fields.function(_get_amount, string="Amount Currency", type="float", store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
'percentage': fields.float('Percentage')
}
class account_analytic_plan(osv.osv):
_name = "account.analytic.plan"
_description = "Analytic Plan"
_columns = {
'name': fields.char('Analytic Plan', required=True, select=True),
'plan_ids': fields.one2many('account.analytic.plan.line', 'plan_id', 'Analytic Plans', copy=True),
}
class account_analytic_plan_line(osv.osv):
_name = "account.analytic.plan.line"
_description = "Analytic Plan Line"
_order = "sequence, id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan','Analytic Plan',required=True),
'name': fields.char('Axis Name', required=True, select=True),
'sequence': fields.integer('Sequence'),
'root_analytic_id': fields.many2one('account.analytic.account', 'Root Account', help="Root account of this plan.", required=False),
'min_required': fields.float('Minimum Allowed (%)'),
'max_required': fields.float('Maximum Allowed (%)'),
}
_defaults = {
'min_required': 100.0,
'max_required': 100.0,
}
class account_analytic_plan_instance(osv.osv):
_name = "account.analytic.plan.instance"
_description = "Analytic Plan Instance"
_columns = {
'name': fields.char('Analytic Distribution'),
'code': fields.char('Distribution Code', size=16),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal' ),
'account_ids': fields.one2many('account.analytic.plan.instance.line', 'plan_id', 'Account Id', copy=True),
'account1_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account1 Id'),
'account2_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account2 Id'),
'account3_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account3 Id'),
'account4_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account4 Id'),
'account5_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account5 Id'),
'account6_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account6 Id'),
'plan_id': fields.many2one('account.analytic.plan', "Model's Plan"),
}
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.get('journal_id', False):
journal = journal_obj.browse(cr, user, [context['journal_id']], context=context)[0]
analytic_journal = journal.analytic_journal_id and journal.analytic_journal_id.id or False
args.append('|')
args.append(('journal_id', '=', analytic_journal))
args.append(('journal_id', '=', False))
res = super(account_analytic_plan_instance, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
return res
def _default_journal(self, cr, uid, context=None):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.has_key('journal_id') and context['journal_id']:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
if journal.analytic_journal_id:
return journal.analytic_journal_id.id
return False
_defaults = {
'plan_id': False,
'journal_id': _default_journal,
}
def name_get(self, cr, uid, ids, context=None):
res = []
for inst in self.browse(cr, uid, ids, context=context):
name = inst.name or '/'
if name and inst.code:
name=name+' ('+inst.code+')'
res.append((inst.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
if name:
ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context or {})
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context or {})
else:
ids = self.search(cr, uid, args, limit=limit, context=context or {})
return self.name_get(cr, uid, ids, context or {})
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
wiz_id = self.pool.get('ir.actions.act_window').search(cr, uid, [("name","=","analytic.plan.create.model.action")], context=context)
res = super(account_analytic_plan_instance,self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
journal_obj = self.pool.get('account.journal')
analytic_plan_obj = self.pool.get('account.analytic.plan')
if (res['type']=='form'):
plan_id = False
if context.get('journal_id', False):
plan_id = journal_obj.browse(cr, uid, int(context['journal_id']), context=context).plan_id
elif context.get('plan_id', False):
plan_id = analytic_plan_obj.browse(cr, uid, int(context['plan_id']), context=context)
if plan_id:
i=1
res['arch'] = """<form string="%s">
<field name="name"/>
<field name="code"/>
<field name="journal_id"/>
<button name="%d" string="Save This Distribution as a Model" type="action" colspan="2"/>
"""% (tools.to_xml(plan_id.name), wiz_id[0])
for line in plan_id.plan_ids:
res['arch']+="""
<field name="account%d_ids" string="%s" nolabel="1" colspan="4">
<tree string="%s" editable="bottom">
<field name="rate"/>
<field name="analytic_account_id" domain="[('parent_id','child_of',[%d])]" groups="analytic.group_analytic_accounting"/>
</tree>
</field>
<newline/>"""%(i,tools.to_xml(line.name),tools.to_xml(line.name),line.root_analytic_id and line.root_analytic_id.id or 0)
i+=1
res['arch'] += "</form>"
doc = etree.fromstring(res['arch'].encode('utf8'))
xarch, xfields = self._view_look_dom_arch(cr, uid, doc, view_id, context=context)
res['arch'] = xarch
res['fields'] = xfields
return res
else:
return res
def create(self, cr, uid, vals, context=None):
journal_obj = self.pool.get('account.journal')
ana_plan_instance_obj = self.pool.get('account.analytic.plan.instance')
acct_anal_acct = self.pool.get('account.analytic.account')
acct_anal_plan_line_obj = self.pool.get('account.analytic.plan.line')
if context and context.get('journal_id'):
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
pids = ana_plan_instance_obj.search(cr, uid, [('name','=',vals['name']), ('code','=',vals['code']), ('plan_id','<>',False)], context=context)
if pids:
raise osv.except_osv(_('Error!'), _('A model with this name and code already exists.'))
res = acct_anal_plan_line_obj.search(cr, uid, [('plan_id','=',journal.plan_id.id)], context=context)
for i in res:
total_per_plan = 0
item = acct_anal_plan_line_obj.browse(cr, uid, i, context=context)
temp_list = ['account1_ids','account2_ids','account3_ids','account4_ids','account5_ids','account6_ids']
for l in temp_list:
if vals.has_key(l):
for tempo in vals[l]:
if acct_anal_acct.search(cr, uid, [('parent_id', 'child_of', [item.root_analytic_id.id]), ('id', '=', tempo[2]['analytic_account_id'])], context=context):
total_per_plan += tempo[2]['rate']
if total_per_plan < item.min_required or total_per_plan > item.max_required:
raise osv.except_osv(_('Error!'),_('The total should be between %s and %s.') % (str(item.min_required), str(item.max_required)))
return super(account_analytic_plan_instance, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
invoice_line_obj = self.pool.get('account.invoice.line')
if this.plan_id and not vals.has_key('plan_id'):
#this instance is a model, so we have to create a new plan instance instead of modifying it
#copy the existing model
temp_id = self.copy(cr, uid, this.id, None, context=context)
#get the list of the invoice line that were linked to the model
lists = invoice_line_obj.search(cr, uid, [('analytics_id','=',this.id)], context=context)
#make them link to the copy
invoice_line_obj.write(cr, uid, lists, {'analytics_id':temp_id}, context=context)
#and finally modify the old model to be not a model anymore
vals['plan_id'] = False
if not vals.has_key('name'):
vals['name'] = this.name and (str(this.name)+'*') or "*"
if not vals.has_key('code'):
vals['code'] = this.code and (str(this.code)+'*') or "*"
return super(account_analytic_plan_instance, self).write(cr, uid, ids, vals, context=context)
class account_analytic_plan_instance_line(osv.osv):
_name = "account.analytic.plan.instance.line"
_description = "Analytic Instance Line"
_rec_name = "analytic_account_id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan.instance', 'Plan Id'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, domain=[('type','<>','view')]),
'rate': fields.float('Rate (%)', required=True),
}
_defaults = {
'rate': 100.0
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['analytic_account_id'], context=context)
res = []
for record in reads:
res.append((record['id'], record['analytic_account_id']))
return res
class account_journal(osv.osv):
_inherit = "account.journal"
_name = "account.journal"
_columns = {
'plan_id': fields.many2one('account.analytic.plan', 'Analytic Plans'),
}
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_name = "account.invoice.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def create(self, cr, uid, vals, context=None):
if 'analytics_id' in vals and isinstance(vals['analytics_id'], tuple):
vals['analytics_id'] = vals['analytics_id'][0]
return super(account_invoice_line, self).create(cr, uid, vals, context=context)
def move_line_get_item(self, cr, uid, line, context=None):
res = super(account_invoice_line, self).move_line_get_item(cr, uid, line, context=context)
res ['analytics_id'] = line.analytics_id and line.analytics_id.id or False
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id=company_id, context=context)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), context=context)
if rec and rec.analytics_id:
res_prod['value'].update({'analytics_id': rec.analytics_id.id})
return res_prod
class account_move_line(osv.osv):
_inherit = "account.move.line"
_name = "account.move.line"
_columns = {
'analytics_id':fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def _default_get_move_form_hook(self, cursor, user, data):
data = super(account_move_line, self)._default_get_move_form_hook(cursor, user, data)
if data.has_key('analytics_id'):
del(data['analytics_id'])
return data
def create_analytic_lines(self, cr, uid, ids, context=None):
if context is None:
context = {}
super(account_move_line, self).create_analytic_lines(cr, uid, ids, context=context)
analytic_line_obj = self.pool.get('account.analytic.line')
for line in self.browse(cr, uid, ids, context=context):
if line.analytics_id:
if not line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal.") % (line.journal_id.name,))
toremove = analytic_line_obj.search(cr, uid, [('move_id','=',line.id)], context=context)
if toremove:
analytic_line_obj.unlink(cr, uid, toremove, context=context)
for line2 in line.analytics_id.account_ids:
val = (line.credit or 0.0) - (line.debit or 0.0)
amt=val * (line2.rate/100)
al_vals={
'name': line.name,
'date': line.date,
'account_id': line2.analytic_account_id.id,
'unit_amount': line.quantity,
'product_id': line.product_id and line.product_id.id or False,
'product_uom_id': line.product_uom_id and line.product_uom_id.id or False,
'amount': amt,
'general_account_id': line.account_id.id,
'move_id': line.id,
'journal_id': line.journal_id.analytic_journal_id.id,
'ref': line.ref,
'percentage': line2.rate
}
analytic_line_obj.create(cr, uid, al_vals, context=context)
return True
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
result = super(account_move_line, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
return result
class account_invoice(osv.osv):
_name = "account.invoice"
_inherit = "account.invoice"
def line_get_convert(self, cr, uid, x, part, date, context=None):
res=super(account_invoice,self).line_get_convert(cr, uid, x, part, date, context=context)
res['analytics_id'] = x.get('analytics_id', False)
return res
def _get_analytic_lines(self, cr, uid, ids, context=None):
inv = self.browse(cr, uid, ids)[0]
cur_obj = self.pool.get('res.currency')
invoice_line_obj = self.pool.get('account.invoice.line')
acct_ins_obj = self.pool.get('account.analytic.plan.instance')
company_currency = inv.company_id.currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = invoice_line_obj.move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il.get('analytics_id', False):
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = inv.number
obj_move_line = acct_ins_obj.browse(cr, uid, il['analytics_id'], context=context)
ctx = context.copy()
ctx.update({'date': inv.date_invoice})
amount_calc = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context=ctx) * sign
qty = il['quantity']
il['analytic_lines'] = []
for line2 in obj_move_line.account_ids:
amt = amount_calc * (line2.rate/100)
qtty = qty* (line2.rate/100)
al_vals = {
'name': il['name'],
'date': inv['date_invoice'],
'unit_amount': qtty,
'product_id': il['product_id'],
'account_id': line2.analytic_account_id.id,
'amount': amt,
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self._get_journal_analytic(cr, uid, inv.type),
'ref': ref,
}
il['analytic_lines'].append((0, 0, al_vals))
return iml
class account_analytic_plan(osv.osv):
_inherit = "account.analytic.plan"
_columns = {
'default_instance_id': fields.many2one('account.analytic.plan.instance', 'Default Entries'),
}
class analytic_default(osv.osv):
_inherit = "account.analytic.default"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line,self).invoice_line_create(cr, uid, ids, context=context)
inv_line_obj = self.pool.get('account.invoice.line')
acct_anal_def_obj = self.pool.get('account.analytic.default')
if ids:
sale_line = self.browse(cr, uid, ids[0], context=context)
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = acct_anal_def_obj.account_get(cr, uid, line.product_id.id,
sale_line.order_id.partner_id.id, uid, time.strftime('%Y-%m-%d'),
sale_line.order_id.company_id.id, context=context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'analytics_id': rec.analytics_id.id}, context=context)
return create_ids
class account_bank_statement(osv.osv):
_inherit = "account.bank.statement"
_name = "account.bank.statement"
def _prepare_bank_move_line(self, cr, uid, st_line, move_id, amount, company_currency_id, context=None):
result = super(account_bank_statement,self)._prepare_bank_move_line(cr, uid, st_line,
move_id, amount, company_currency_id, context=context)
result['analytics_id'] = st_line.analytics_id.id
return result
def button_confirm_bank(self, cr, uid, ids, context=None):
super(account_bank_statement,self).button_confirm_bank(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
for st_line in st.line_ids:
if st_line.analytics_id:
if not st.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal.") % (st.journal_id.name,))
if not st_line.amount:
continue
return True
class account_bank_statement_line(osv.osv):
_inherit = "account.bank.statement.line"
_name = "account.bank.statement.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
plaes/pelican-toc
|
test_toc.py
|
1
|
3458
|
from io import open
import unittest
import re
import toc
from pelican.readers import MarkdownReader
from pelican.contents import Article
from pelican.tests.support import get_settings
class TestToCGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
toc.init_default_config(None)
cls.settings = get_settings()
cls.md_reader = MarkdownReader(cls.settings)
def setUp(self):
# have to reset the default, because shallow copies
self.settings['TOC']['TOC_HEADERS'] = '^h[1-6]'
self.settings['TOC']['TOC_RUN'] = 'true'
def _handle_article_generation(self, path):
content, metadata = self.md_reader.read(path)
return Article(content=content, metadata=metadata)
def _generate_toc(self, article_path, expected_path):
result = self._handle_article_generation(article_path)
toc.generate_toc(result)
expected = ""
with open(expected_path, 'r') as f:
expected = f.read()
return result, expected
def test_toc_generation(self):
result, expected = self._generate_toc(
"test_data/article_with_headers.md",
"test_data/article_with_headers_toc.html"
)
self.assertEqual(result.toc, expected)
def test_toc_generation_nonascii(self):
result, expected = self._generate_toc(
"test_data/article_with_headers_nonascii.md",
"test_data/article_with_headers_toc_nonascii.html"
)
self.assertEqual(result.toc, expected)
def test_toc_generation_exclude_small_headers(self):
self.settings['TOC']['TOC_HEADERS'] = '^h[1-3]'
result, expected = self._generate_toc(
"test_data/article_with_headers_exclude_small_headers.md",
"test_data/article_with_headers_toc_exclude_small_headers.html"
)
self.assertEqual(result.toc, expected)
def test_toc_generation_exclude_small_headers_metadata(self):
result, expected = self._generate_toc(
"test_data/article_with_headers_exclude_small_headers_metadata.md",
"test_data/article_with_headers_toc_exclude_small_headers.html"
)
self.assertEqual(result.toc, expected)
def test_bad_TOC_HEADERS(self):
self.settings['TOC']['TOC_HEADERS'] = '^[1-'
with self.assertRaises(re.error):
self._generate_toc(
"test_data/article_with_headers_exclude_small_headers.md",
"test_data/article_with_headers_toc_exclude_small_headers.html"
)
def test_no_toc_generation(self):
article_without_headers_path = "test_data/article_without_headers.md"
article_without_headers = self._handle_article_generation(
article_without_headers_path)
toc.generate_toc(article_without_headers)
with self.assertRaises(AttributeError):
self.assertIsNone(article_without_headers.toc)
def test_no_toc_generation_metadata(self):
article_without_headers_path = "test_data/article_with_headers_metadata.md"
article_without_headers = self._handle_article_generation(
article_without_headers_path)
toc.generate_toc(article_without_headers)
with self.assertRaises(AttributeError):
self.assertIsNone(article_without_headers.toc)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
tastynoodle/django
|
tests/utils_tests/test_html.py
|
5
|
8266
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import os
from unittest import TestCase
from django.utils import html
from django.utils._os import upath
from django.utils.encoding import force_text
class TestUtilsHtml(TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{0} {1} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
)
for value, output in items:
self.check_output(f, value, output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_clean_html(self):
f = html.clean_html
items = (
('<p>I <i>believe</i> in <b>semantic markup</b>!</p>', '<p>I <em>believe</em> in <strong>semantic markup</strong>!</p>'),
('I escape & I don\'t <a href="#" target="_blank">target</a>', 'I escape & I don\'t <a href="#" >target</a>'),
('<p>I kill whitespace</p><br clear="all"><p> </p>', '<p>I kill whitespace</p>'),
# also a regression test for #7267: this used to raise an UnicodeDecodeError
('<p>* foo</p><p>* bar</p>', '<ul>\n<li> foo</li><li> bar</li>\n</ul>'),
)
for value, output in items:
self.check_output(f, value, output)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2'), 'http://example.com/?x=1&y=2')
|
bsd-3-clause
|
ramitsurana/boto
|
tests/unit/cloudtrail/test_layer1.py
|
91
|
2731
|
#!/usr/bin/env python
import json
from boto.cloudtrail.layer1 import CloudTrailConnection
from tests.unit import AWSMockServiceTestCase
class TestDescribeTrails(AWSMockServiceTestCase):
connection_class = CloudTrailConnection
def default_body(self):
return b'''
{"trailList":
[
{
"IncludeGlobalServiceEvents": false,
"Name": "test",
"SnsTopicName": "cloudtrail-1",
"S3BucketName": "cloudtrail-1"
}
]
}'''
def test_describe(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_trails()
self.assertEqual(1, len(api_response['trailList']))
self.assertEqual('test', api_response['trailList'][0]['Name'])
self.assert_request_parameters({})
target = self.actual_request.headers['X-Amz-Target']
self.assertTrue('DescribeTrails' in target)
def test_describe_name_list(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_trails(
trail_name_list=['test'])
self.assertEqual(1, len(api_response['trailList']))
self.assertEqual('test', api_response['trailList'][0]['Name'])
self.assertEqual(json.dumps({
'trailNameList': ['test']
}), self.actual_request.body.decode('utf-8'))
target = self.actual_request.headers['X-Amz-Target']
self.assertTrue('DescribeTrails' in target)
class TestCreateTrail(AWSMockServiceTestCase):
connection_class = CloudTrailConnection
def default_body(self):
return b'''
{"trail":
{
"IncludeGlobalServiceEvents": false,
"Name": "test",
"SnsTopicName": "cloudtrail-1",
"S3BucketName": "cloudtrail-1"
}
}'''
def test_create(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_trail(
'test', 'cloudtrail-1', sns_topic_name='cloudtrail-1',
include_global_service_events=False)
self.assertEqual('test', api_response['trail']['Name'])
self.assertEqual('cloudtrail-1', api_response['trail']['S3BucketName'])
self.assertEqual('cloudtrail-1', api_response['trail']['SnsTopicName'])
self.assertEqual(False,
api_response['trail']['IncludeGlobalServiceEvents'])
target = self.actual_request.headers['X-Amz-Target']
self.assertTrue('CreateTrail' in target)
|
mit
|
Scarygami/mirror-api-examples
|
colours-of-the-world/lib/oauth2client/tools.py
|
46
|
6819
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = '[email protected] (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import socket
import sys
import webbrowser
from oauth2client.client import FlowExchangeError
from oauth2client.client import OOB_CALLBACK_URN
from oauth2client import util
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
@util.positional(2)
def run(flow, storage, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
Since it uses flags make sure to initialize the gflags module before calling
run().
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if not success:
print 'Failed to start a local webserver listening on either port 8080'
print 'or port 9090. Please check your firewall settings and locally'
print 'running programs that may be blocking or using those ports.'
print
print 'Falling back to --noauth_local_webserver and continuing with',
print 'authorization.'
print
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if FLAGS.auth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
print 'If your browser is on a different machine then exit and re-run this'
print 'application with the command-line parameter '
print
print ' --noauth_local_webserver'
print
else:
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
code = None
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print 'Authentication successful.'
return credential
|
apache-2.0
|
Johnetordoff/osf.io
|
api_tests/registrations/views/test_registration_linked_nodes.py
|
11
|
15618
|
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from api_tests.utils import disconnected_from_listeners
from website.project.signals import contributor_removed
from osf_tests.factories import (
NodeFactory,
AuthUserFactory,
RegistrationFactory
)
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeRelationshipNodeLinks:
@pytest.fixture()
def contributor(self):
return AuthUserFactory()
@pytest.fixture()
def auth(self, user):
return Auth(user)
@pytest.fixture()
def private_node(self, user):
return NodeFactory(creator=user)
@pytest.fixture()
def admin_node(self, user):
return NodeFactory(creator=user)
@pytest.fixture()
def other_node(self):
return NodeFactory()
@pytest.fixture()
def public_node(self):
return NodeFactory(is_public=True)
@pytest.fixture()
def linking_node_source(self, user, auth, private_node, admin_node):
linking_node_source = NodeFactory(creator=user)
linking_node_source.add_pointer(private_node, auth=auth)
linking_node_source.add_pointer(admin_node, auth=auth)
return linking_node_source
@pytest.fixture()
def contributor_node(self, user, contributor):
contributor_node = NodeFactory(creator=contributor)
contributor_node.add_contributor(user, auth=Auth(contributor))
contributor_node.save()
return contributor_node
@pytest.fixture()
def public_linking_node_source(
self, contributor, private_node, public_node):
public_linking_node_source = NodeFactory(
is_public=True, creator=contributor)
public_linking_node_source.add_pointer(
private_node, auth=Auth(contributor))
public_linking_node_source.add_pointer(
public_node, auth=Auth(contributor))
public_linking_node_source.save()
return public_linking_node_source
@pytest.fixture()
def public_linking_node(self, public_linking_node_source, contributor):
return RegistrationFactory(
project=public_linking_node_source,
is_public=True,
creator=contributor)
@pytest.fixture()
def linking_node(self, user, linking_node_source):
return RegistrationFactory(project=linking_node_source, creator=user)
@pytest.fixture()
def url(self, linking_node):
return '/{}registrations/{}/relationships/linked_nodes/'.format(
API_BASE, linking_node._id)
@pytest.fixture()
def public_url(self, public_linking_node):
return '/{}registrations/{}/relationships/linked_nodes/'.format(
API_BASE, public_linking_node._id)
@pytest.fixture()
def payload(self, admin_node):
def payload(node_ids=None):
node_ids = node_ids or [admin_node._id]
return {'data': [{'type': 'linked_nodes', 'id': node_id}
for node_id in node_ids]}
return payload
def test_node_relationship_node_links(
self, app, user, url, public_url, linking_node,
private_node, admin_node, public_node,
contributor_node, other_node, payload):
# get_relationship_linked_nodes
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert linking_node.linked_nodes_self_url in res.json['links']['self']
assert linking_node.linked_nodes_related_url in res.json['links']['html']
assert private_node._id in [e['id'] for e in res.json['data']]
assert res.json['data'][0]['type'] == 'linked_nodes'
# get_relationship_linked_nodes_2_13
res = app.get('{}?version=2.13'.format(url), auth=user.auth)
assert res.status_code == 200
assert linking_node.linked_nodes_self_url in res.json['links']['self']
assert linking_node.linked_nodes_related_url in res.json['links']['html']
assert private_node._id in [e['id'] for e in res.json['data']]
assert res.json['data'][0]['type'] == 'nodes'
# get_linked_nodes_related_counts
res = app.get(
'/{}registrations/{}/?related_counts=linked_nodes'.format(API_BASE, linking_node._id),
auth=user.auth
)
assert res.json['data']['relationships']['linked_nodes']['links']['related']['meta']['count'] == 2
# get_public_relationship_linked_nodes_logged_out
res = app.get(public_url)
assert res.status_code == 200
assert len(res.json['data']) == 1
assert public_node._id in [e['id'] for e in res.json['data']]
# get_public_relationship_linked_nodes_logged_in
res = app.get(public_url, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']) == 2
# get_private_relationship_linked_nodes_logged_out
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# post_contributing_node
res = app.post_json_api(
url, payload([contributor_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
# post_public_node
res = app.post_json_api(
url, payload([public_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
# post_private_node
res = app.post_json_api(
url, payload([other_node._id]),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 405
res = app.get(
url, auth=user.auth
)
ids = [data['id'] for data in res.json['data']]
assert other_node._id not in ids
assert private_node._id in ids
# post_mixed_nodes
res = app.post_json_api(
url, payload([other_node._id, contributor_node._id]),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 405
res = app.get(
url, auth=user.auth
)
ids = [data['id'] for data in res.json['data']]
assert other_node._id not in ids
assert contributor_node._id not in ids
assert private_node._id in ids
# post_node_already_linked
res = app.post_json_api(
url, payload([private_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
# put_contributing_node
res = app.put_json_api(
url, payload([contributor_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
# put_private_node
res = app.put_json_api(
url, payload([other_node._id]),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 405
res = app.get(
url, auth=user.auth
)
ids = [data['id'] for data in res.json['data']]
assert other_node._id not in ids
assert private_node._id in ids
# put_mixed_nodes
res = app.put_json_api(
url, payload([other_node._id, contributor_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
res = app.get(
url, auth=user.auth
)
ids = [data['id'] for data in res.json['data']]
assert other_node._id not in ids
assert contributor_node._id not in ids
assert private_node._id in ids
# delete_with_put_empty_array
new_payload = payload()
new_payload['data'].pop()
res = app.put_json_api(
url, new_payload, auth=user.auth, expect_errors=True
)
assert res.status_code == 405
# delete_one
res = app.delete_json_api(
url, payload([private_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
res = app.get(url, auth=user.auth)
ids = [data['id'] for data in res.json['data']]
assert admin_node._id in ids
assert private_node._id in ids
# delete_multiple
res = app.delete_json_api(
url, payload([private_node._id, admin_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == 2
# delete_not_present
number_of_links = linking_node.linked_nodes.count()
res = app.delete_json_api(
url, payload([other_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
res = app.get(
url, auth=user.auth
)
assert len(res.json['data']) == number_of_links
# node_doesnt_exist
res = app.post_json_api(
url, payload(['aquarela']),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 405
# type_mistyped
res = app.post_json_api(
url,
{'data': [{
'type': 'not_linked_nodes',
'id': contributor_node._id}]},
auth=user.auth,
expect_errors=True)
assert res.status_code == 405
# creates_public_linked_node_relationship_logged_out
res = app.post_json_api(
public_url, payload([public_node._id]),
expect_errors=True
)
assert res.status_code == 401
# creates_public_linked_node_relationship_logged_in
res = app.post_json_api(
public_url, payload([public_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
# creates_private_linked_node_relationship_logged_out
res = app.post_json_api(
url, payload([other_node._id]),
expect_errors=True
)
assert res.status_code == 401
# put_public_nodes_relationships_logged_out
res = app.put_json_api(
public_url, payload([public_node._id]),
expect_errors=True
)
assert res.status_code == 401
# put_public_nodes_relationships_logged_in
res = app.put_json_api(
public_url, payload([private_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
# delete_public_nodes_relationships_logged_out
res = app.delete_json_api(
public_url, payload([public_node._id]),
expect_errors=True
)
assert res.status_code == 401
# delete_public_nodes_relationships_logged_in
res = app.delete_json_api(
public_url, payload([private_node._id]),
auth=user.auth, expect_errors=True
)
assert res.status_code == 405
@pytest.mark.django_db
class TestNodeLinkedNodes:
@pytest.fixture()
def auth(self, user):
return Auth(user)
@pytest.fixture()
def private_node_one(self, user):
return NodeFactory(creator=user)
@pytest.fixture()
def private_node_two(self, user):
return NodeFactory(creator=user)
@pytest.fixture()
def node_source(
self, user, auth, private_node_one, private_node_two,
public_node):
node_source = NodeFactory(creator=user)
node_source.add_pointer(private_node_one, auth=auth)
node_source.add_pointer(private_node_two, auth=auth)
node_source.add_pointer(public_node, auth=auth)
node_source.save()
return node_source
@pytest.fixture()
def public_node(self, user):
return NodeFactory(is_public=True, creator=user)
@pytest.fixture()
def linking_node(self, user, node_source):
return RegistrationFactory(project=node_source, creator=user)
@pytest.fixture()
def url(self, linking_node):
return '/{}registrations/{}/linked_nodes/'.format(
API_BASE, linking_node._id)
@pytest.fixture()
def node_ids(self, linking_node):
return list(
linking_node.nodes_pointer.values_list(
'guids___id', flat=True))
def test_linked_nodes_returns_everything(self, app, user, url, node_ids):
res = app.get(url, auth=user.auth)
assert res.status_code == 200
nodes_returned = [
linked_node['id']for linked_node in res.json['data']
]
assert len(nodes_returned) == len(node_ids)
for node_id in node_ids:
assert node_id in nodes_returned
def test_linked_nodes_only_return_viewable_nodes(
self, app, auth, private_node_one, private_node_two,
public_node, node_ids):
user = AuthUserFactory()
new_linking_node = NodeFactory(creator=user)
private_node_one.add_contributor(user, auth=auth, save=True)
private_node_two.add_contributor(user, auth=auth, save=True)
public_node.add_contributor(user, auth=auth, save=True)
new_linking_node.add_pointer(private_node_one, auth=Auth(user))
new_linking_node.add_pointer(private_node_two, auth=Auth(user))
new_linking_node.add_pointer(public_node, auth=Auth(user))
new_linking_node.save()
new_linking_registration = RegistrationFactory(
project=new_linking_node, creator=user)
res = app.get(
'/{}registrations/{}/linked_nodes/'.format(API_BASE, new_linking_registration._id),
auth=user.auth
)
assert res.status_code == 200
nodes_returned = [linked_node['id']
for linked_node in res.json['data']]
assert len(nodes_returned) == len(node_ids)
for node_id in node_ids:
assert node_id in nodes_returned
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
private_node_two.remove_contributor(user, auth=auth)
public_node.remove_contributor(user, auth=auth)
res = app.get(
'/{}registrations/{}/linked_nodes/'.format(API_BASE, new_linking_registration._id),
auth=user.auth
)
nodes_returned = [
linked_node['id'] for linked_node in res.json['data']
]
assert len(nodes_returned) == len(node_ids) - 1
assert private_node_one._id in nodes_returned
assert public_node._id in nodes_returned
assert private_node_two._id not in nodes_returned
def test_linked_nodes_doesnt_return_deleted_nodes(
self, app, user, url, private_node_one,
private_node_two, public_node, node_ids):
private_node_one.is_deleted = True
private_node_one.save()
res = app.get(url, auth=user.auth)
assert res.status_code == 200
nodes_returned = [
linked_node['id'] for linked_node in res.json['data']
]
assert len(nodes_returned) == len(node_ids) - 1
assert private_node_one._id not in nodes_returned
assert private_node_two._id in nodes_returned
assert public_node._id in nodes_returned
def test_attempt_to_return_linked_nodes_logged_out(self, app, url):
res = app.get(url, auth=None, expect_errors=True)
assert res.status_code == 401
|
apache-2.0
|
rameshthoomu/fabric
|
bddtests/peer/peer_pb2_grpc.py
|
17
|
1504
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import peer.proposal_pb2 as peer_dot_proposal__pb2
import peer.proposal_response_pb2 as peer_dot_proposal__response__pb2
class EndorserStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ProcessProposal = channel.unary_unary(
'/protos.Endorser/ProcessProposal',
request_serializer=peer_dot_proposal__pb2.SignedProposal.SerializeToString,
response_deserializer=peer_dot_proposal__response__pb2.ProposalResponse.FromString,
)
class EndorserServicer(object):
def ProcessProposal(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EndorserServicer_to_server(servicer, server):
rpc_method_handlers = {
'ProcessProposal': grpc.unary_unary_rpc_method_handler(
servicer.ProcessProposal,
request_deserializer=peer_dot_proposal__pb2.SignedProposal.FromString,
response_serializer=peer_dot_proposal__response__pb2.ProposalResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'protos.Endorser', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
apache-2.0
|
yuezh/azure-linux-extensions
|
OSPatching/azure/servicemanagement/__init__.py
|
46
|
65202
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from xml.dom import minidom
from azure import (
WindowsAzureData,
_Base64String,
_create_entry,
_dict_of,
_encode_base64,
_general_error_handler,
_get_children_from_path,
_get_first_child_node_value,
_list_of,
_scalar_list_of,
_str,
_xml_attribute,
)
#-----------------------------------------------------------------------------
# Constants for Azure app environment settings.
AZURE_MANAGEMENT_CERTFILE = 'AZURE_MANAGEMENT_CERTFILE'
AZURE_MANAGEMENT_SUBSCRIPTIONID = 'AZURE_MANAGEMENT_SUBSCRIPTIONID'
# x-ms-version for service management.
X_MS_VERSION = '2013-06-01'
#-----------------------------------------------------------------------------
# Data classes
class StorageServices(WindowsAzureData):
def __init__(self):
self.storage_services = _list_of(StorageService)
def __iter__(self):
return iter(self.storage_services)
def __len__(self):
return len(self.storage_services)
def __getitem__(self, index):
return self.storage_services[index]
class StorageService(WindowsAzureData):
def __init__(self):
self.url = ''
self.service_name = ''
self.storage_service_properties = StorageAccountProperties()
self.storage_service_keys = StorageServiceKeys()
self.extended_properties = _dict_of(
'ExtendedProperty', 'Name', 'Value')
self.capabilities = _scalar_list_of(str, 'Capability')
class StorageAccountProperties(WindowsAzureData):
def __init__(self):
self.description = u''
self.affinity_group = u''
self.location = u''
self.label = _Base64String()
self.status = u''
self.endpoints = _scalar_list_of(str, 'Endpoint')
self.geo_replication_enabled = False
self.geo_primary_region = u''
self.status_of_primary = u''
self.geo_secondary_region = u''
self.status_of_secondary = u''
self.last_geo_failover_time = u''
self.creation_time = u''
class StorageServiceKeys(WindowsAzureData):
def __init__(self):
self.primary = u''
self.secondary = u''
class Locations(WindowsAzureData):
def __init__(self):
self.locations = _list_of(Location)
def __iter__(self):
return iter(self.locations)
def __len__(self):
return len(self.locations)
def __getitem__(self, index):
return self.locations[index]
class Location(WindowsAzureData):
def __init__(self):
self.name = u''
self.display_name = u''
self.available_services = _scalar_list_of(str, 'AvailableService')
class AffinityGroup(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = _Base64String()
self.description = u''
self.location = u''
self.hosted_services = HostedServices()
self.storage_services = StorageServices()
self.capabilities = _scalar_list_of(str, 'Capability')
class AffinityGroups(WindowsAzureData):
def __init__(self):
self.affinity_groups = _list_of(AffinityGroup)
def __iter__(self):
return iter(self.affinity_groups)
def __len__(self):
return len(self.affinity_groups)
def __getitem__(self, index):
return self.affinity_groups[index]
class HostedServices(WindowsAzureData):
def __init__(self):
self.hosted_services = _list_of(HostedService)
def __iter__(self):
return iter(self.hosted_services)
def __len__(self):
return len(self.hosted_services)
def __getitem__(self, index):
return self.hosted_services[index]
class HostedService(WindowsAzureData):
def __init__(self):
self.url = u''
self.service_name = u''
self.hosted_service_properties = HostedServiceProperties()
self.deployments = Deployments()
class HostedServiceProperties(WindowsAzureData):
def __init__(self):
self.description = u''
self.location = u''
self.affinity_group = u''
self.label = _Base64String()
self.status = u''
self.date_created = u''
self.date_last_modified = u''
self.extended_properties = _dict_of(
'ExtendedProperty', 'Name', 'Value')
class VirtualNetworkSites(WindowsAzureData):
def __init__(self):
self.virtual_network_sites = _list_of(VirtualNetworkSite)
def __iter__(self):
return iter(self.virtual_network_sites)
def __len__(self):
return len(self.virtual_network_sites)
def __getitem__(self, index):
return self.virtual_network_sites[index]
class VirtualNetworkSite(WindowsAzureData):
def __init__(self):
self.name = u''
self.id = u''
self.affinity_group = u''
self.subnets = Subnets()
class Subnets(WindowsAzureData):
def __init__(self):
self.subnets = _list_of(Subnet)
def __iter__(self):
return iter(self.subnets)
def __len__(self):
return len(self.subnets)
def __getitem__(self, index):
return self.subnets[index]
class Subnet(WindowsAzureData):
def __init__(self):
self.name = u''
self.address_prefix = u''
class Deployments(WindowsAzureData):
def __init__(self):
self.deployments = _list_of(Deployment)
def __iter__(self):
return iter(self.deployments)
def __len__(self):
return len(self.deployments)
def __getitem__(self, index):
return self.deployments[index]
class Deployment(WindowsAzureData):
def __init__(self):
self.name = u''
self.deployment_slot = u''
self.private_id = u''
self.status = u''
self.label = _Base64String()
self.url = u''
self.configuration = _Base64String()
self.role_instance_list = RoleInstanceList()
self.upgrade_status = UpgradeStatus()
self.upgrade_domain_count = u''
self.role_list = RoleList()
self.sdk_version = u''
self.input_endpoint_list = InputEndpoints()
self.locked = False
self.rollback_allowed = False
self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()
self.created_time = u''
self.virtual_network_name = u''
self.last_modified_time = u''
self.extended_properties = _dict_of(
'ExtendedProperty', 'Name', 'Value')
class RoleInstanceList(WindowsAzureData):
def __init__(self):
self.role_instances = _list_of(RoleInstance)
def __iter__(self):
return iter(self.role_instances)
def __len__(self):
return len(self.role_instances)
def __getitem__(self, index):
return self.role_instances[index]
class RoleInstance(WindowsAzureData):
def __init__(self):
self.role_name = u''
self.instance_name = u''
self.instance_status = u''
self.instance_upgrade_domain = 0
self.instance_fault_domain = 0
self.instance_size = u''
self.instance_state_details = u''
self.instance_error_code = u''
self.ip_address = u''
self.instance_endpoints = InstanceEndpoints()
self.power_state = u''
self.fqdn = u''
self.host_name = u''
class InstanceEndpoints(WindowsAzureData):
def __init__(self):
self.instance_endpoints = _list_of(InstanceEndpoint)
def __iter__(self):
return iter(self.instance_endpoints)
def __len__(self):
return len(self.instance_endpoints)
def __getitem__(self, index):
return self.instance_endpoints[index]
class InstanceEndpoint(WindowsAzureData):
def __init__(self):
self.name = u''
self.vip = u''
self.public_port = u''
self.local_port = u''
self.protocol = u''
class UpgradeStatus(WindowsAzureData):
def __init__(self):
self.upgrade_type = u''
self.current_upgrade_domain_state = u''
self.current_upgrade_domain = u''
class InputEndpoints(WindowsAzureData):
def __init__(self):
self.input_endpoints = _list_of(InputEndpoint)
def __iter__(self):
return iter(self.input_endpoints)
def __len__(self):
return len(self.input_endpoints)
def __getitem__(self, index):
return self.input_endpoints[index]
class InputEndpoint(WindowsAzureData):
def __init__(self):
self.role_name = u''
self.vip = u''
self.port = u''
class RoleList(WindowsAzureData):
def __init__(self):
self.roles = _list_of(Role)
def __iter__(self):
return iter(self.roles)
def __len__(self):
return len(self.roles)
def __getitem__(self, index):
return self.roles[index]
class Role(WindowsAzureData):
def __init__(self):
self.role_name = u''
self.role_type = u''
self.os_version = u''
self.configuration_sets = ConfigurationSets()
self.availability_set_name = u''
self.data_virtual_hard_disks = DataVirtualHardDisks()
self.os_virtual_hard_disk = OSVirtualHardDisk()
self.role_size = u''
self.default_win_rm_certificate_thumbprint = u''
class PersistentVMDowntimeInfo(WindowsAzureData):
def __init__(self):
self.start_time = u''
self.end_time = u''
self.status = u''
class Certificates(WindowsAzureData):
def __init__(self):
self.certificates = _list_of(Certificate)
def __iter__(self):
return iter(self.certificates)
def __len__(self):
return len(self.certificates)
def __getitem__(self, index):
return self.certificates[index]
class Certificate(WindowsAzureData):
def __init__(self):
self.certificate_url = u''
self.thumbprint = u''
self.thumbprint_algorithm = u''
self.data = u''
class OperationError(WindowsAzureData):
def __init__(self):
self.code = u''
self.message = u''
class Operation(WindowsAzureData):
def __init__(self):
self.id = u''
self.status = u''
self.http_status_code = u''
self.error = OperationError()
class OperatingSystem(WindowsAzureData):
def __init__(self):
self.version = u''
self.label = _Base64String()
self.is_default = True
self.is_active = True
self.family = 0
self.family_label = _Base64String()
class OperatingSystems(WindowsAzureData):
def __init__(self):
self.operating_systems = _list_of(OperatingSystem)
def __iter__(self):
return iter(self.operating_systems)
def __len__(self):
return len(self.operating_systems)
def __getitem__(self, index):
return self.operating_systems[index]
class OperatingSystemFamily(WindowsAzureData):
def __init__(self):
self.name = u''
self.label = _Base64String()
self.operating_systems = OperatingSystems()
class OperatingSystemFamilies(WindowsAzureData):
def __init__(self):
self.operating_system_families = _list_of(OperatingSystemFamily)
def __iter__(self):
return iter(self.operating_system_families)
def __len__(self):
return len(self.operating_system_families)
def __getitem__(self, index):
return self.operating_system_families[index]
class Subscription(WindowsAzureData):
def __init__(self):
self.subscription_id = u''
self.subscription_name = u''
self.subscription_status = u''
self.account_admin_live_email_id = u''
self.service_admin_live_email_id = u''
self.max_core_count = 0
self.max_storage_accounts = 0
self.max_hosted_services = 0
self.current_core_count = 0
self.current_hosted_services = 0
self.current_storage_accounts = 0
self.max_virtual_network_sites = 0
self.max_local_network_sites = 0
self.max_dns_servers = 0
class AvailabilityResponse(WindowsAzureData):
def __init__(self):
self.result = False
class SubscriptionCertificates(WindowsAzureData):
def __init__(self):
self.subscription_certificates = _list_of(SubscriptionCertificate)
def __iter__(self):
return iter(self.subscription_certificates)
def __len__(self):
return len(self.subscription_certificates)
def __getitem__(self, index):
return self.subscription_certificates[index]
class SubscriptionCertificate(WindowsAzureData):
def __init__(self):
self.subscription_certificate_public_key = u''
self.subscription_certificate_thumbprint = u''
self.subscription_certificate_data = u''
self.created = u''
class Images(WindowsAzureData):
def __init__(self):
self.images = _list_of(OSImage)
def __iter__(self):
return iter(self.images)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
return self.images[index]
class OSImage(WindowsAzureData):
def __init__(self):
self.affinity_group = u''
self.category = u''
self.location = u''
self.logical_size_in_gb = 0
self.label = u''
self.media_link = u''
self.name = u''
self.os = u''
self.eula = u''
self.description = u''
class Disks(WindowsAzureData):
def __init__(self):
self.disks = _list_of(Disk)
def __iter__(self):
return iter(self.disks)
def __len__(self):
return len(self.disks)
def __getitem__(self, index):
return self.disks[index]
class Disk(WindowsAzureData):
def __init__(self):
self.affinity_group = u''
self.attached_to = AttachedTo()
self.has_operating_system = u''
self.is_corrupted = u''
self.location = u''
self.logical_disk_size_in_gb = 0
self.label = u''
self.media_link = u''
self.name = u''
self.os = u''
self.source_image_name = u''
class AttachedTo(WindowsAzureData):
def __init__(self):
self.hosted_service_name = u''
self.deployment_name = u''
self.role_name = u''
class PersistentVMRole(WindowsAzureData):
def __init__(self):
self.role_name = u''
self.role_type = u''
self.os_version = u'' # undocumented
self.configuration_sets = ConfigurationSets()
self.availability_set_name = u''
self.data_virtual_hard_disks = DataVirtualHardDisks()
self.os_virtual_hard_disk = OSVirtualHardDisk()
self.role_size = u''
self.default_win_rm_certificate_thumbprint = u''
class ConfigurationSets(WindowsAzureData):
def __init__(self):
self.configuration_sets = _list_of(ConfigurationSet)
def __iter__(self):
return iter(self.configuration_sets)
def __len__(self):
return len(self.configuration_sets)
def __getitem__(self, index):
return self.configuration_sets[index]
class ConfigurationSet(WindowsAzureData):
def __init__(self):
self.configuration_set_type = u'NetworkConfiguration'
self.role_type = u''
self.input_endpoints = ConfigurationSetInputEndpoints()
self.subnet_names = _scalar_list_of(str, 'SubnetName')
class ConfigurationSetInputEndpoints(WindowsAzureData):
def __init__(self):
self.input_endpoints = _list_of(
ConfigurationSetInputEndpoint, 'InputEndpoint')
def __iter__(self):
return iter(self.input_endpoints)
def __len__(self):
return len(self.input_endpoints)
def __getitem__(self, index):
return self.input_endpoints[index]
class ConfigurationSetInputEndpoint(WindowsAzureData):
'''
Initializes a network configuration input endpoint.
name: Specifies the name for the external endpoint.
protocol:
Specifies the protocol to use to inspect the virtual machine
availability status. Possible values are: HTTP, TCP.
port: Specifies the external port to use for the endpoint.
local_port:
Specifies the internal port on which the virtual machine is listening
to serve the endpoint.
load_balanced_endpoint_set_name:
Specifies a name for a set of load-balanced endpoints. Specifying this
element for a given endpoint adds it to the set. If you are setting an
endpoint to use to connect to the virtual machine via the Remote
Desktop, do not set this property.
enable_direct_server_return:
Specifies whether direct server return load balancing is enabled.
'''
def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'',
load_balanced_endpoint_set_name=u'',
enable_direct_server_return=False):
self.enable_direct_server_return = enable_direct_server_return
self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name
self.local_port = local_port
self.name = name
self.port = port
self.load_balancer_probe = LoadBalancerProbe()
self.protocol = protocol
class WindowsConfigurationSet(WindowsAzureData):
def __init__(self, computer_name=None, admin_password=None,
reset_password_on_first_logon=None,
enable_automatic_updates=None, time_zone=None,
admin_username=None):
self.configuration_set_type = u'WindowsProvisioningConfiguration'
self.computer_name = computer_name
self.admin_password = admin_password
self.admin_username = admin_username
self.reset_password_on_first_logon = reset_password_on_first_logon
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.domain_join = DomainJoin()
self.stored_certificate_settings = StoredCertificateSettings()
self.win_rm = WinRM()
class DomainJoin(WindowsAzureData):
def __init__(self):
self.credentials = Credentials()
self.join_domain = u''
self.machine_object_ou = u''
class Credentials(WindowsAzureData):
def __init__(self):
self.domain = u''
self.username = u''
self.password = u''
class StoredCertificateSettings(WindowsAzureData):
def __init__(self):
self.stored_certificate_settings = _list_of(CertificateSetting)
def __iter__(self):
return iter(self.stored_certificate_settings)
def __len__(self):
return len(self.stored_certificate_settings)
def __getitem__(self, index):
return self.stored_certificate_settings[index]
class CertificateSetting(WindowsAzureData):
'''
Initializes a certificate setting.
thumbprint:
Specifies the thumbprint of the certificate to be provisioned. The
thumbprint must specify an existing service certificate.
store_name:
Specifies the name of the certificate store from which retrieve
certificate.
store_location:
Specifies the target certificate store location on the virtual machine.
The only supported value is LocalMachine.
'''
def __init__(self, thumbprint=u'', store_name=u'', store_location=u''):
self.thumbprint = thumbprint
self.store_name = store_name
self.store_location = store_location
class WinRM(WindowsAzureData):
'''
Contains configuration settings for the Windows Remote Management service on
the Virtual Machine.
'''
def __init__(self):
self.listeners = Listeners()
class Listeners(WindowsAzureData):
def __init__(self):
self.listeners = _list_of(Listener)
def __iter__(self):
return iter(self.listeners)
def __len__(self):
return len(self.listeners)
def __getitem__(self, index):
return self.listeners[index]
class Listener(WindowsAzureData):
'''
Specifies the protocol and certificate information for the listener.
protocol:
Specifies the protocol of listener. Possible values are: Http, Https.
The value is case sensitive.
certificate_thumbprint:
Optional. Specifies the certificate thumbprint for the secure
connection. If this value is not specified, a self-signed certificate is
generated and used for the Virtual Machine.
'''
def __init__(self, protocol=u'', certificate_thumbprint=u''):
self.protocol = protocol
self.certificate_thumbprint = certificate_thumbprint
class LinuxConfigurationSet(WindowsAzureData):
def __init__(self, host_name=None, user_name=None, user_password=None,
disable_ssh_password_authentication=None):
self.configuration_set_type = u'LinuxProvisioningConfiguration'
self.host_name = host_name
self.user_name = user_name
self.user_password = user_password
self.disable_ssh_password_authentication =\
disable_ssh_password_authentication
self.ssh = SSH()
class SSH(WindowsAzureData):
def __init__(self):
self.public_keys = PublicKeys()
self.key_pairs = KeyPairs()
class PublicKeys(WindowsAzureData):
def __init__(self):
self.public_keys = _list_of(PublicKey)
def __iter__(self):
return iter(self.public_keys)
def __len__(self):
return len(self.public_keys)
def __getitem__(self, index):
return self.public_keys[index]
class PublicKey(WindowsAzureData):
def __init__(self, fingerprint=u'', path=u''):
self.fingerprint = fingerprint
self.path = path
class KeyPairs(WindowsAzureData):
def __init__(self):
self.key_pairs = _list_of(KeyPair)
def __iter__(self):
return iter(self.key_pairs)
def __len__(self):
return len(self.key_pairs)
def __getitem__(self, index):
return self.key_pairs[index]
class KeyPair(WindowsAzureData):
def __init__(self, fingerprint=u'', path=u''):
self.fingerprint = fingerprint
self.path = path
class LoadBalancerProbe(WindowsAzureData):
def __init__(self):
self.path = u''
self.port = u''
self.protocol = u''
class DataVirtualHardDisks(WindowsAzureData):
def __init__(self):
self.data_virtual_hard_disks = _list_of(DataVirtualHardDisk)
def __iter__(self):
return iter(self.data_virtual_hard_disks)
def __len__(self):
return len(self.data_virtual_hard_disks)
def __getitem__(self, index):
return self.data_virtual_hard_disks[index]
class DataVirtualHardDisk(WindowsAzureData):
def __init__(self):
self.host_caching = u''
self.disk_label = u''
self.disk_name = u''
self.lun = 0
self.logical_disk_size_in_gb = 0
self.media_link = u''
class OSVirtualHardDisk(WindowsAzureData):
def __init__(self, source_image_name=None, media_link=None,
host_caching=None, disk_label=None, disk_name=None):
self.source_image_name = source_image_name
self.media_link = media_link
self.host_caching = host_caching
self.disk_label = disk_label
self.disk_name = disk_name
self.os = u'' # undocumented, not used when adding a role
class AsynchronousOperationResult(WindowsAzureData):
def __init__(self, request_id=None):
self.request_id = request_id
class ServiceBusRegion(WindowsAzureData):
def __init__(self):
self.code = u''
self.fullname = u''
class ServiceBusNamespace(WindowsAzureData):
def __init__(self):
self.name = u''
self.region = u''
self.default_key = u''
self.status = u''
self.created_at = u''
self.acs_management_endpoint = u''
self.servicebus_endpoint = u''
self.connection_string = u''
self.subscription_id = u''
self.enabled = False
class WebSpaces(WindowsAzureData):
def __init__(self):
self.web_space = _list_of(WebSpace)
def __iter__(self):
return iter(self.web_space)
def __len__(self):
return len(self.web_space)
def __getitem__(self, index):
return self.web_space[index]
class WebSpace(WindowsAzureData):
def __init__(self):
self.availability_state = u''
self.geo_location = u''
self.geo_region = u''
self.name = u''
self.plan = u''
self.status = u''
self.subscription = u''
class Sites(WindowsAzureData):
def __init__(self):
self.site = _list_of(Site)
def __iter__(self):
return iter(self.site)
def __len__(self):
return len(self.site)
def __getitem__(self, index):
return self.site[index]
class Site(WindowsAzureData):
def __init__(self):
self.admin_enabled = False
self.availability_state = ''
self.compute_mode = ''
self.enabled = False
self.enabled_host_names = _scalar_list_of(str, 'a:string')
self.host_name_ssl_states = HostNameSslStates()
self.host_names = _scalar_list_of(str, 'a:string')
self.last_modified_time_utc = ''
self.name = ''
self.repository_site_name = ''
self.self_link = ''
self.server_farm = ''
self.site_mode = ''
self.state = ''
self.storage_recovery_default_state = ''
self.usage_state = ''
self.web_space = ''
class HostNameSslStates(WindowsAzureData):
def __init__(self):
self.host_name_ssl_state = _list_of(HostNameSslState)
def __iter__(self):
return iter(self.host_name_ssl_state)
def __len__(self):
return len(self.host_name_ssl_state)
def __getitem__(self, index):
return self.host_name_ssl_state[index]
class HostNameSslState(WindowsAzureData):
def __init__(self):
self.name = u''
self.ssl_state = u''
class PublishData(WindowsAzureData):
_xml_name = 'publishData'
def __init__(self):
self.publish_profiles = _list_of(PublishProfile, 'publishProfile')
class PublishProfile(WindowsAzureData):
def __init__(self):
self.profile_name = _xml_attribute('profileName')
self.publish_method = _xml_attribute('publishMethod')
self.publish_url = _xml_attribute('publishUrl')
self.msdeploysite = _xml_attribute('msdeploySite')
self.user_name = _xml_attribute('userName')
self.user_pwd = _xml_attribute('userPWD')
self.destination_app_url = _xml_attribute('destinationAppUrl')
self.sql_server_db_connection_string = _xml_attribute('SQLServerDBConnectionString')
self.my_sqldb_connection_string = _xml_attribute('mySQLDBConnectionString')
self.hosting_provider_forum_link = _xml_attribute('hostingProviderForumLink')
self.control_panel_link = _xml_attribute('controlPanelLink')
class QueueDescription(WindowsAzureData):
def __init__(self):
self.lock_duration = u''
self.max_size_in_megabytes = 0
self.requires_duplicate_detection = False
self.requires_session = False
self.default_message_time_to_live = u''
self.dead_lettering_on_message_expiration = False
self.duplicate_detection_history_time_window = u''
self.max_delivery_count = 0
self.enable_batched_operations = False
self.size_in_bytes = 0
self.message_count = 0
self.is_anonymous_accessible = False
self.authorization_rules = AuthorizationRules()
self.status = u''
self.created_at = u''
self.updated_at = u''
self.accessed_at = u''
self.support_ordering = False
self.auto_delete_on_idle = u''
self.count_details = CountDetails()
self.entity_availability_status = u''
class TopicDescription(WindowsAzureData):
def __init__(self):
self.default_message_time_to_live = u''
self.max_size_in_megabytes = 0
self.requires_duplicate_detection = False
self.duplicate_detection_history_time_window = u''
self.enable_batched_operations = False
self.size_in_bytes = 0
self.filtering_messages_before_publishing = False
self.is_anonymous_accessible = False
self.authorization_rules = AuthorizationRules()
self.status = u''
self.created_at = u''
self.updated_at = u''
self.accessed_at = u''
self.support_ordering = False
self.count_details = CountDetails()
self.subscription_count = 0
class CountDetails(WindowsAzureData):
def __init__(self):
self.active_message_count = 0
self.dead_letter_message_count = 0
self.scheduled_message_count = 0
self.transfer_message_count = 0
self.transfer_dead_letter_message_count = 0
class NotificationHubDescription(WindowsAzureData):
def __init__(self):
self.registration_ttl = u''
self.authorization_rules = AuthorizationRules()
class AuthorizationRules(WindowsAzureData):
def __init__(self):
self.authorization_rule = _list_of(AuthorizationRule)
def __iter__(self):
return iter(self.authorization_rule)
def __len__(self):
return len(self.authorization_rule)
def __getitem__(self, index):
return self.authorization_rule[index]
class AuthorizationRule(WindowsAzureData):
def __init__(self):
self.claim_type = u''
self.claim_value = u''
self.rights = _scalar_list_of(str, 'AccessRights')
self.created_time = u''
self.modified_time = u''
self.key_name = u''
self.primary_key = u''
self.secondary_keu = u''
class RelayDescription(WindowsAzureData):
def __init__(self):
self.path = u''
self.listener_type = u''
self.listener_count = 0
self.created_at = u''
self.updated_at = u''
class MetricResponses(WindowsAzureData):
def __init__(self):
self.metric_response = _list_of(MetricResponse)
def __iter__(self):
return iter(self.metric_response)
def __len__(self):
return len(self.metric_response)
def __getitem__(self, index):
return self.metric_response[index]
class MetricResponse(WindowsAzureData):
def __init__(self):
self.code = u''
self.data = Data()
self.message = u''
class Data(WindowsAzureData):
def __init__(self):
self.display_name = u''
self.end_time = u''
self.name = u''
self.primary_aggregation_type = u''
self.start_time = u''
self.time_grain = u''
self.unit = u''
self.values = Values()
class Values(WindowsAzureData):
def __init__(self):
self.metric_sample = _list_of(MetricSample)
def __iter__(self):
return iter(self.metric_sample)
def __len__(self):
return len(self.metric_sample)
def __getitem__(self, index):
return self.metric_sample[index]
class MetricSample(WindowsAzureData):
def __init__(self):
self.count = 0
self.time_created = u''
self.total = 0
class MetricDefinitions(WindowsAzureData):
def __init__(self):
self.metric_definition = _list_of(MetricDefinition)
def __iter__(self):
return iter(self.metric_definition)
def __len__(self):
return len(self.metric_definition)
def __getitem__(self, index):
return self.metric_definition[index]
class MetricDefinition(WindowsAzureData):
def __init__(self):
self.display_name = u''
self.metric_availabilities = MetricAvailabilities()
self.name = u''
self.primary_aggregation_type = u''
self.unit = u''
class MetricAvailabilities(WindowsAzureData):
def __init__(self):
self.metric_availability = _list_of(MetricAvailability, 'MetricAvailabilily')
def __iter__(self):
return iter(self.metric_availability)
def __len__(self):
return len(self.metric_availability)
def __getitem__(self, index):
return self.metric_availability[index]
class MetricAvailability(WindowsAzureData):
def __init__(self):
self.retention = u''
self.time_grain = u''
class Servers(WindowsAzureData):
def __init__(self):
self.server = _list_of(Server)
def __iter__(self):
return iter(self.server)
def __len__(self):
return len(self.server)
def __getitem__(self, index):
return self.server[index]
class Server(WindowsAzureData):
def __init__(self):
self.name = u''
self.administrator_login = u''
self.location = u''
self.fully_qualified_domain_name = u''
self.version = u''
class Database(WindowsAzureData):
def __init__(self):
self.name = u''
self.type = u''
self.state = u''
self.self_link = u''
self.parent_link = u''
self.id = 0
self.edition = u''
self.collation_name = u''
self.creation_date = u''
self.is_federation_root = False
self.is_system_object = False
self.max_size_bytes = 0
def _update_management_header(request):
''' Add additional headers for management. '''
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# append additional headers base on the service
request.headers.append(('x-ms-version', X_MS_VERSION))
# if it is not GET or HEAD request, must set content-type.
if not request.method in ['GET', 'HEAD']:
for name, _ in request.headers:
if 'content-type' == name.lower():
break
else:
request.headers.append(
('Content-Type',
'application/atom+xml;type=entry;charset=utf-8'))
return request.headers
def _parse_response_for_async_op(response):
''' Extracts request id from response header. '''
if response is None:
return None
result = AsynchronousOperationResult()
if response.headers:
for name, value in response.headers:
if name.lower() == 'x-ms-request-id':
result.request_id = value
return result
def _management_error_handler(http_error):
''' Simple error handler for management service. '''
return _general_error_handler(http_error)
def _lower(text):
return text.lower()
class _XmlSerializer(object):
@staticmethod
def create_storage_service_input_to_xml(service_name, description, label,
affinity_group, location,
geo_replication_enabled,
extended_properties):
return _XmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[('ServiceName', service_name),
('Description', description),
('Label', label, _encode_base64),
('AffinityGroup', affinity_group),
('Location', location),
('GeoReplicationEnabled', geo_replication_enabled, _lower)],
extended_properties)
@staticmethod
def update_storage_service_input_to_xml(description, label,
geo_replication_enabled,
extended_properties):
return _XmlSerializer.doc_from_data(
'UpdateStorageServiceInput',
[('Description', description),
('Label', label, _encode_base64),
('GeoReplicationEnabled', geo_replication_enabled, _lower)],
extended_properties)
@staticmethod
def regenerate_keys_to_xml(key_type):
return _XmlSerializer.doc_from_data('RegenerateKeys',
[('KeyType', key_type)])
@staticmethod
def update_hosted_service_to_xml(label, description, extended_properties):
return _XmlSerializer.doc_from_data('UpdateHostedService',
[('Label', label, _encode_base64),
('Description', description)],
extended_properties)
@staticmethod
def create_hosted_service_to_xml(service_name, label, description,
location, affinity_group,
extended_properties):
return _XmlSerializer.doc_from_data(
'CreateHostedService',
[('ServiceName', service_name),
('Label', label, _encode_base64),
('Description', description),
('Location', location),
('AffinityGroup', affinity_group)],
extended_properties)
@staticmethod
def create_deployment_to_xml(name, package_url, label, configuration,
start_deployment, treat_warnings_as_error,
extended_properties):
return _XmlSerializer.doc_from_data(
'CreateDeployment',
[('Name', name),
('PackageUrl', package_url),
('Label', label, _encode_base64),
('Configuration', configuration),
('StartDeployment',
start_deployment, _lower),
('TreatWarningsAsError', treat_warnings_as_error, _lower)],
extended_properties)
@staticmethod
def swap_deployment_to_xml(production, source_deployment):
return _XmlSerializer.doc_from_data(
'Swap',
[('Production', production),
('SourceDeployment', source_deployment)])
@staticmethod
def update_deployment_status_to_xml(status):
return _XmlSerializer.doc_from_data(
'UpdateDeploymentStatus',
[('Status', status)])
@staticmethod
def change_deployment_to_xml(configuration, treat_warnings_as_error, mode,
extended_properties):
return _XmlSerializer.doc_from_data(
'ChangeConfiguration',
[('Configuration', configuration),
('TreatWarningsAsError', treat_warnings_as_error, _lower),
('Mode', mode)],
extended_properties)
@staticmethod
def upgrade_deployment_to_xml(mode, package_url, configuration, label,
role_to_upgrade, force, extended_properties):
return _XmlSerializer.doc_from_data(
'UpgradeDeployment',
[('Mode', mode),
('PackageUrl', package_url),
('Configuration', configuration),
('Label', label, _encode_base64),
('RoleToUpgrade', role_to_upgrade),
('Force', force, _lower)],
extended_properties)
@staticmethod
def rollback_upgrade_to_xml(mode, force):
return _XmlSerializer.doc_from_data(
'RollbackUpdateOrUpgrade',
[('Mode', mode),
('Force', force, _lower)])
@staticmethod
def walk_upgrade_domain_to_xml(upgrade_domain):
return _XmlSerializer.doc_from_data(
'WalkUpgradeDomain',
[('UpgradeDomain', upgrade_domain)])
@staticmethod
def certificate_file_to_xml(data, certificate_format, password):
return _XmlSerializer.doc_from_data(
'CertificateFile',
[('Data', data),
('CertificateFormat', certificate_format),
('Password', password)])
@staticmethod
def create_affinity_group_to_xml(name, label, description, location):
return _XmlSerializer.doc_from_data(
'CreateAffinityGroup',
[('Name', name),
('Label', label, _encode_base64),
('Description', description),
('Location', location)])
@staticmethod
def update_affinity_group_to_xml(label, description):
return _XmlSerializer.doc_from_data(
'UpdateAffinityGroup',
[('Label', label, _encode_base64),
('Description', description)])
@staticmethod
def subscription_certificate_to_xml(public_key, thumbprint, data):
return _XmlSerializer.doc_from_data(
'SubscriptionCertificate',
[('SubscriptionCertificatePublicKey', public_key),
('SubscriptionCertificateThumbprint', thumbprint),
('SubscriptionCertificateData', data)])
@staticmethod
def os_image_to_xml(label, media_link, name, os):
return _XmlSerializer.doc_from_data(
'OSImage',
[('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)])
@staticmethod
def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun,
logical_disk_size_in_gb, media_link,
source_media_link):
return _XmlSerializer.doc_from_data(
'DataVirtualHardDisk',
[('HostCaching', host_caching),
('DiskLabel', disk_label),
('DiskName', disk_name),
('Lun', lun),
('LogicalDiskSizeInGB', logical_disk_size_in_gb),
('MediaLink', media_link),
('SourceMediaLink', source_media_link)])
@staticmethod
def disk_to_xml(has_operating_system, label, media_link, name, os):
return _XmlSerializer.doc_from_data(
'Disk',
[('HasOperatingSystem', has_operating_system, _lower),
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)])
@staticmethod
def restart_role_operation_to_xml():
return _XmlSerializer.doc_from_xml(
'RestartRoleOperation',
'<OperationType>RestartRoleOperation</OperationType>')
@staticmethod
def shutdown_role_operation_to_xml(post_shutdown_action):
xml = _XmlSerializer.data_to_xml(
[('OperationType', 'ShutdownRoleOperation'),
('PostShutdownAction', post_shutdown_action)])
return _XmlSerializer.doc_from_xml('ShutdownRoleOperation', xml)
@staticmethod
def shutdown_roles_operation_to_xml(role_names, post_shutdown_action):
xml = _XmlSerializer.data_to_xml(
[('OperationType', 'ShutdownRolesOperation')])
xml += '<Roles>'
for role_name in role_names:
xml += _XmlSerializer.data_to_xml([('Name', role_name)])
xml += '</Roles>'
xml += _XmlSerializer.data_to_xml(
[('PostShutdownAction', post_shutdown_action)])
return _XmlSerializer.doc_from_xml('ShutdownRolesOperation', xml)
@staticmethod
def start_role_operation_to_xml():
return _XmlSerializer.doc_from_xml(
'StartRoleOperation',
'<OperationType>StartRoleOperation</OperationType>')
@staticmethod
def start_roles_operation_to_xml(role_names):
xml = _XmlSerializer.data_to_xml(
[('OperationType', 'StartRolesOperation')])
xml += '<Roles>'
for role_name in role_names:
xml += _XmlSerializer.data_to_xml([('Name', role_name)])
xml += '</Roles>'
return _XmlSerializer.doc_from_xml('StartRolesOperation', xml)
@staticmethod
def windows_configuration_to_xml(configuration):
xml = _XmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type),
('ComputerName', configuration.computer_name),
('AdminPassword', configuration.admin_password),
('ResetPasswordOnFirstLogon',
configuration.reset_password_on_first_logon,
_lower),
('EnableAutomaticUpdates',
configuration.enable_automatic_updates,
_lower),
('TimeZone', configuration.time_zone)])
if configuration.domain_join is not None:
xml += '<DomainJoin>'
xml += '<Credentials>'
xml += _XmlSerializer.data_to_xml(
[('Domain', configuration.domain_join.credentials.domain),
('Username', configuration.domain_join.credentials.username),
('Password', configuration.domain_join.credentials.password)])
xml += '</Credentials>'
xml += _XmlSerializer.data_to_xml(
[('JoinDomain', configuration.domain_join.join_domain),
('MachineObjectOU',
configuration.domain_join.machine_object_ou)])
xml += '</DomainJoin>'
if configuration.stored_certificate_settings is not None:
xml += '<StoredCertificateSettings>'
for cert in configuration.stored_certificate_settings:
xml += '<CertificateSetting>'
xml += _XmlSerializer.data_to_xml(
[('StoreLocation', cert.store_location),
('StoreName', cert.store_name),
('Thumbprint', cert.thumbprint)])
xml += '</CertificateSetting>'
xml += '</StoredCertificateSettings>'
if configuration.win_rm is not None:
xml += '<WinRM><Listeners>'
for listener in configuration.win_rm.listeners:
xml += '<Listener>'
xml += _XmlSerializer.data_to_xml(
[('Protocol', listener.protocol),
('CertificateThumbprint', listener.certificate_thumbprint)])
xml += '</Listener>'
xml += '</Listeners></WinRM>'
xml += _XmlSerializer.data_to_xml(
[('AdminUsername', configuration.admin_username)])
return xml
@staticmethod
def linux_configuration_to_xml(configuration):
xml = _XmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type),
('HostName', configuration.host_name),
('UserName', configuration.user_name),
('UserPassword', configuration.user_password),
('DisableSshPasswordAuthentication',
configuration.disable_ssh_password_authentication,
_lower)])
if configuration.ssh is not None:
xml += '<SSH>'
xml += '<PublicKeys>'
for key in configuration.ssh.public_keys:
xml += '<PublicKey>'
xml += _XmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint),
('Path', key.path)])
xml += '</PublicKey>'
xml += '</PublicKeys>'
xml += '<KeyPairs>'
for key in configuration.ssh.key_pairs:
xml += '<KeyPair>'
xml += _XmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint),
('Path', key.path)])
xml += '</KeyPair>'
xml += '</KeyPairs>'
xml += '</SSH>'
return xml
@staticmethod
def network_configuration_to_xml(configuration):
xml = _XmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)])
xml += '<InputEndpoints>'
for endpoint in configuration.input_endpoints:
xml += '<InputEndpoint>'
xml += _XmlSerializer.data_to_xml(
[('LoadBalancedEndpointSetName',
endpoint.load_balanced_endpoint_set_name),
('LocalPort', endpoint.local_port),
('Name', endpoint.name),
('Port', endpoint.port)])
if endpoint.load_balancer_probe.path or\
endpoint.load_balancer_probe.port or\
endpoint.load_balancer_probe.protocol:
xml += '<LoadBalancerProbe>'
xml += _XmlSerializer.data_to_xml(
[('Path', endpoint.load_balancer_probe.path),
('Port', endpoint.load_balancer_probe.port),
('Protocol', endpoint.load_balancer_probe.protocol)])
xml += '</LoadBalancerProbe>'
xml += _XmlSerializer.data_to_xml(
[('Protocol', endpoint.protocol),
('EnableDirectServerReturn',
endpoint.enable_direct_server_return,
_lower)])
xml += '</InputEndpoint>'
xml += '</InputEndpoints>'
xml += '<SubnetNames>'
for name in configuration.subnet_names:
xml += _XmlSerializer.data_to_xml([('SubnetName', name)])
xml += '</SubnetNames>'
return xml
@staticmethod
def role_to_xml(availability_set_name, data_virtual_hard_disks,
network_configuration_set, os_virtual_hard_disk, role_name,
role_size, role_type, system_configuration_set):
xml = _XmlSerializer.data_to_xml([('RoleName', role_name),
('RoleType', role_type)])
xml += '<ConfigurationSets>'
if system_configuration_set is not None:
xml += '<ConfigurationSet>'
if isinstance(system_configuration_set, WindowsConfigurationSet):
xml += _XmlSerializer.windows_configuration_to_xml(
system_configuration_set)
elif isinstance(system_configuration_set, LinuxConfigurationSet):
xml += _XmlSerializer.linux_configuration_to_xml(
system_configuration_set)
xml += '</ConfigurationSet>'
if network_configuration_set is not None:
xml += '<ConfigurationSet>'
xml += _XmlSerializer.network_configuration_to_xml(
network_configuration_set)
xml += '</ConfigurationSet>'
xml += '</ConfigurationSets>'
if availability_set_name is not None:
xml += _XmlSerializer.data_to_xml(
[('AvailabilitySetName', availability_set_name)])
if data_virtual_hard_disks is not None:
xml += '<DataVirtualHardDisks>'
for hd in data_virtual_hard_disks:
xml += '<DataVirtualHardDisk>'
xml += _XmlSerializer.data_to_xml(
[('HostCaching', hd.host_caching),
('DiskLabel', hd.disk_label),
('DiskName', hd.disk_name),
('Lun', hd.lun),
('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb),
('MediaLink', hd.media_link)])
xml += '</DataVirtualHardDisk>'
xml += '</DataVirtualHardDisks>'
if os_virtual_hard_disk is not None:
xml += '<OSVirtualHardDisk>'
xml += _XmlSerializer.data_to_xml(
[('HostCaching', os_virtual_hard_disk.host_caching),
('DiskLabel', os_virtual_hard_disk.disk_label),
('DiskName', os_virtual_hard_disk.disk_name),
('MediaLink', os_virtual_hard_disk.media_link),
('SourceImageName', os_virtual_hard_disk.source_image_name)])
xml += '</OSVirtualHardDisk>'
if role_size is not None:
xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)])
return xml
@staticmethod
def add_role_to_xml(role_name, system_configuration_set,
os_virtual_hard_disk, role_type,
network_configuration_set, availability_set_name,
data_virtual_hard_disks, role_size):
xml = _XmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
role_name,
role_size,
role_type,
system_configuration_set)
return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)
@staticmethod
def update_role_to_xml(role_name, os_virtual_hard_disk, role_type,
network_configuration_set, availability_set_name,
data_virtual_hard_disks, role_size):
xml = _XmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
role_name,
role_size,
role_type,
None)
return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)
@staticmethod
def capture_role_to_xml(post_capture_action, target_image_name,
target_image_label, provisioning_configuration):
xml = _XmlSerializer.data_to_xml(
[('OperationType', 'CaptureRoleOperation'),
('PostCaptureAction', post_capture_action)])
if provisioning_configuration is not None:
xml += '<ProvisioningConfiguration>'
if isinstance(provisioning_configuration, WindowsConfigurationSet):
xml += _XmlSerializer.windows_configuration_to_xml(
provisioning_configuration)
elif isinstance(provisioning_configuration, LinuxConfigurationSet):
xml += _XmlSerializer.linux_configuration_to_xml(
provisioning_configuration)
xml += '</ProvisioningConfiguration>'
xml += _XmlSerializer.data_to_xml(
[('TargetImageLabel', target_image_label),
('TargetImageName', target_image_name)])
return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
@staticmethod
def virtual_machine_deployment_to_xml(deployment_name, deployment_slot,
label, role_name,
system_configuration_set,
os_virtual_hard_disk, role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks, role_size,
virtual_network_name):
xml = _XmlSerializer.data_to_xml([('Name', deployment_name),
('DeploymentSlot', deployment_slot),
('Label', label)])
xml += '<RoleList>'
xml += '<Role>'
xml += _XmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
role_name,
role_size,
role_type,
system_configuration_set)
xml += '</Role>'
xml += '</RoleList>'
if virtual_network_name is not None:
xml += _XmlSerializer.data_to_xml(
[('VirtualNetworkName', virtual_network_name)])
return _XmlSerializer.doc_from_xml('Deployment', xml)
@staticmethod
def create_website_to_xml(webspace_name, website_name, geo_region, plan,
host_names, compute_mode, server_farm, site_mode):
xml = '<HostNames xmlns:a="http://schemas.microsoft.com/2003/10/Serialization/Arrays">'
for host_name in host_names:
xml += '<a:string>{0}</a:string>'.format(host_name)
xml += '</HostNames>'
xml += _XmlSerializer.data_to_xml(
[('Name', website_name),
('ComputeMode', compute_mode),
('ServerFarm', server_farm),
('SiteMode', site_mode)])
xml += '<WebSpaceToCreate>'
xml += _XmlSerializer.data_to_xml(
[('GeoRegion', geo_region),
('Name', webspace_name),
('Plan', plan)])
xml += '</WebSpaceToCreate>'
return _XmlSerializer.doc_from_xml('Site', xml)
@staticmethod
def data_to_xml(data):
'''Creates an xml fragment from the specified data.
data: Array of tuples, where first: xml element name
second: xml element text
third: conversion function
'''
xml = ''
for element in data:
name = element[0]
val = element[1]
if len(element) > 2:
converter = element[2]
else:
converter = None
if val is not None:
if converter is not None:
text = _str(converter(_str(val)))
else:
text = _str(val)
xml += ''.join(['<', name, '>', text, '</', name, '>'])
return xml
@staticmethod
def doc_from_xml(document_element_name, inner_xml):
'''Wraps the specified xml in an xml root element with default azure
namespaces'''
xml = ''.join(['<', document_element_name,
' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"',
' xmlns="http://schemas.microsoft.com/windowsazure">'])
xml += inner_xml
xml += ''.join(['</', document_element_name, '>'])
return xml
@staticmethod
def doc_from_data(document_element_name, data, extended_properties=None):
xml = _XmlSerializer.data_to_xml(data)
if extended_properties is not None:
xml += _XmlSerializer.extended_properties_dict_to_xml_fragment(
extended_properties)
return _XmlSerializer.doc_from_xml(document_element_name, xml)
@staticmethod
def extended_properties_dict_to_xml_fragment(extended_properties):
xml = ''
if extended_properties is not None and len(extended_properties) > 0:
xml += '<ExtendedProperties>'
for key, val in extended_properties.items():
xml += ''.join(['<ExtendedProperty>',
'<Name>',
_str(key),
'</Name>',
'<Value>',
_str(val),
'</Value>',
'</ExtendedProperty>'])
xml += '</ExtendedProperties>'
return xml
def _parse_bool(value):
if value.lower() == 'true':
return True
return False
class _ServiceBusManagementXmlSerializer(object):
@staticmethod
def namespace_to_xml(region):
'''Converts a service bus namespace description to xml
The xml format:
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<content type="application/xml">
<NamespaceDescription
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<Region>West US</Region>
</NamespaceDescription>
</content>
</entry>
'''
body = '<NamespaceDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
body += ''.join(['<Region>', region, '</Region>'])
body += '</NamespaceDescription>'
return _create_entry(body)
@staticmethod
def xml_to_namespace(xmlstr):
'''Converts xml response to service bus namespace
The xml format for namespace:
<entry>
<id>uuid:00000000-0000-0000-0000-000000000000;id=0000000</id>
<title type="text">myunittests</title>
<updated>2012-08-22T16:48:10Z</updated>
<content type="application/xml">
<NamespaceDescription
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
<Name>myunittests</Name>
<Region>West US</Region>
<DefaultKey>0000000000000000000000000000000000000000000=</DefaultKey>
<Status>Active</Status>
<CreatedAt>2012-08-22T16:48:10.217Z</CreatedAt>
<AcsManagementEndpoint>https://myunittests-sb.accesscontrol.windows.net/</AcsManagementEndpoint>
<ServiceBusEndpoint>https://myunittests.servicebus.windows.net/</ServiceBusEndpoint>
<ConnectionString>Endpoint=sb://myunittests.servicebus.windows.net/;SharedSecretIssuer=owner;SharedSecretValue=0000000000000000000000000000000000000000000=</ConnectionString>
<SubscriptionId>00000000000000000000000000000000</SubscriptionId>
<Enabled>true</Enabled>
</NamespaceDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
namespace = ServiceBusNamespace()
mappings = (
('Name', 'name', None),
('Region', 'region', None),
('DefaultKey', 'default_key', None),
('Status', 'status', None),
('CreatedAt', 'created_at', None),
('AcsManagementEndpoint', 'acs_management_endpoint', None),
('ServiceBusEndpoint', 'servicebus_endpoint', None),
('ConnectionString', 'connection_string', None),
('SubscriptionId', 'subscription_id', None),
('Enabled', 'enabled', _parse_bool),
)
for desc in _get_children_from_path(xmldoc,
'entry',
'content',
'NamespaceDescription'):
for xml_name, field_name, conversion_func in mappings:
node_value = _get_first_child_node_value(desc, xml_name)
if node_value is not None:
if conversion_func is not None:
node_value = conversion_func(node_value)
setattr(namespace, field_name, node_value)
return namespace
@staticmethod
def xml_to_region(xmlstr):
'''Converts xml response to service bus region
The xml format for region:
<entry>
<id>uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759</id>
<title type="text"></title>
<updated>2013-04-10T18:25:29Z</updated>
<content type="application/xml">
<RegionCodeDescription
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
<Code>East Asia</Code>
<FullName>East Asia</FullName>
</RegionCodeDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
region = ServiceBusRegion()
for desc in _get_children_from_path(xmldoc, 'entry', 'content',
'RegionCodeDescription'):
node_value = _get_first_child_node_value(desc, 'Code')
if node_value is not None:
region.code = node_value
node_value = _get_first_child_node_value(desc, 'FullName')
if node_value is not None:
region.fullname = node_value
return region
@staticmethod
def xml_to_namespace_availability(xmlstr):
'''Converts xml response to service bus namespace availability
The xml format:
<?xml version="1.0" encoding="utf-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<id>uuid:9fc7c652-1856-47ab-8d74-cd31502ea8e6;id=3683292</id>
<title type="text"></title>
<updated>2013-04-16T03:03:37Z</updated>
<content type="application/xml">
<NamespaceAvailability
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
<Result>false</Result>
</NamespaceAvailability>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
availability = AvailabilityResponse()
for desc in _get_children_from_path(xmldoc, 'entry', 'content',
'NamespaceAvailability'):
node_value = _get_first_child_node_value(desc, 'Result')
if node_value is not None:
availability.result = _parse_bool(node_value)
return availability
from azure.servicemanagement.servicemanagementservice import (
ServiceManagementService)
from azure.servicemanagement.servicebusmanagementservice import (
ServiceBusManagementService)
from azure.servicemanagement.websitemanagementservice import (
WebsiteManagementService)
|
apache-2.0
|
GeographicaGS/Medinageoportal
|
data_tools/config.py
|
1
|
34005
|
# -*- coding: utf-8 -*-
import os
dataPath = "/Users/alasarr/dev/Medinageoportal/data_tools/data/nador"
config = {
"path_input": os.path.join(dataPath,"input"),
"path_output": os.path.join(dataPath,"output"),
"shapepath" : "data/nador",
"mapfile_output_prefix":"nador_",
"date":"01/01/2006",
"layers":
[
{
"name":"bacteria_biomass",
"title":"Bacteria Biomass (mg C/m3)",
"abs":"Bacteria Biomass in Nador",
"class": [
{
"name":"< 3.06",
"expr":"([pixel] < 3.06)",
"color":"235 235 235"
},
{
"name":"3.06-3.25",
"expr":"([pixel] >= 3.06 AND [pixel] < 3.25)",
"color":"234 233 235"
},
{
"name":"3.25-3.55",
"expr":"([pixel] >= 3.25 AND [pixel] < 3.55)",
"color":"231 229 237"
},
{
"name":"3.55-3.88",
"expr":"([pixel] >= 3.55 AND [pixel] < 3.88)",
"color":"223 218 240"
},
{
"name":"3.88-4.18",
"expr":"([pixel] >= 3.88 AND [pixel] < 4.18)",
"color":"218 213 241"
},
{
"name":"4.18-4.51",
"expr":"([pixel] >= 4.18 AND [pixel] < 4.51)",
"color":"213 207 242"
},
{
"name":"4.51-5.10",
"expr":"([pixel] >= 4.51 AND [pixel] < 5.10)",
"color":"204 198 243"
},
{
"name":"5.10-5.43",
"expr":"([pixel] >= 5.10 AND [pixel] < 5.43)",
"color":"198 194 243"
},
{
"name":"5.43-5.73",
"expr":"([pixel] >= 5.43 AND [pixel] < 5.73)",
"color":"193 190 242"
},
{
"name":"5.73-6.36",
"expr":"([pixel] >= 5.73 AND [pixel] < 6.36)",
"color":"182 181 240"
},
{
"name":"6.36-6.65",
"expr":"([pixel] >= 6.36 AND [pixel] < 6.65)",
"color":"177 178 238"
},
{
"name":"6.65-6.98",
"expr":"([pixel] >= 6.65 AND [pixel] < 6.98)",
"color":"171 174 236"
},
{
"name":"6.98-7.57",
"expr":"([pixel] >= 6.98 AND [pixel] < 7.57)",
"color":"160 167 231"
},
{
"name":"7.57-7.91",
"expr":"([pixel] >= 7.57 AND [pixel] < 7.91)",
"color":"154 163 228"
},
{
"name":"7.91-8.20",
"expr":"([pixel] >= 7.91 AND [pixel] < 8.20)",
"color":"149 160 225"
},
{
"name":"8.20-8.49",
"expr":"([pixel] >= 8.20 AND [pixel] < 8.49)",
"color":"144 157 221"
},
{
"name":"8.49-8.83",
"expr":"([pixel] >= 8.49 AND [pixel] < 8.83)",
"color":"138 153 217"
},
{
"name":"8.83-9.41",
"expr":"([pixel] >= 8.83 AND [pixel] < 9.41)",
"color":"128 147 208"
},
{
"name":"9.41-9.75",
"expr":"([pixel] >= 9.41 AND [pixel] < 9.75)",
"color":"123 144 203"
},
{
"name":"9.75-10.04",
"expr":"([pixel] >= 9.75 AND [pixel] < 10.04)",
"color":"118 141 198"
},
{
"name":"10.04-10.33",
"expr":"([pixel] >= 10.04 AND [pixel] < 10.33)",
"color":"114 138 193"
},
{
"name":"10.33-10.67",
"expr":"([pixel] >= 10.33 AND [pixel] < 10.67)",
"color":"109 134 186"
},
{
"name":"10.67-11.30",
"expr":"([pixel] >= 10.67 AND [pixel] < 11.30)",
"color":"100 127 174"
},
{
"name":"11.30-11.59",
"expr":"([pixel] >= 11.30 AND [pixel] < 11.59)",
"color":"97 124 167"
},
{
"name":"11.59-11.93",
"expr":"([pixel] >= 11.59 AND [pixel] < 11.93)",
"color":"93 120 160"
},
{
"name":"11.93-12.51",
"expr":"([pixel] >= 11.93 AND [pixel] < 12.51)",
"color":"87 113 146"
},
{
"name":"12.51-12.85",
"expr":"([pixel] >= 12.51 AND [pixel] < 12.85)",
"color":"84 109 138"
},
{
"name":"12.85-13.14",
"expr":"([pixel] >= 12.85 AND [pixel] < 13.14)",
"color":"82 105 131"
},
{
"name":"13.14-13.77",
"expr":"([pixel] >= 13.14 AND [pixel] < 13.77)",
"color":"78 97 115"
},
{
"name":"13.77-14.35",
"expr":"([pixel] >= 13.77 AND [pixel] < 14.35)",
"color":"75 89 100"
},
{
"name":"14.35-14.69",
"expr":"([pixel] >= 14.35 AND [pixel] < 14.69)",
"color":"74 84 91"
},
{
"name":">14.69",
"expr":"([pixel] >= 14.69)",
"color":"74 76 78"
}
]
},
{
"name":"depth_mean_temperature",
"title":"Depth Mean Temperature (grades centigrade)",
"abs":"Depth Mean Temperature in Nador",
"class": [
{
"name":"< 11.78",
"expr":"([pixel] < 11.78)",
"color":"0 0 255"
},
{
"name":"11.78-11.98",
"expr":"([pixel] >= 11.78 AND [pixel] < 11.98)",
"color":"0 3 255"
},
{
"name":"11.98-12.29",
"expr":"([pixel] >= 11.98 AND [pixel] < 12.29)",
"color":"0 15 255"
},
{
"name":"12.29-12.65",
"expr":"([pixel] >= 12.29 AND [pixel] < 12.65)",
"color":"0 54 255"
},
{
"name":"12.65-12.96",
"expr":"([pixel] >= 12.65 AND [pixel] < 12.96)",
"color":"0 72 255"
},
{
"name":"12.96-13.32",
"expr":"([pixel] >= 12.96 AND [pixel] < 13.32)",
"color":"0 93 255"
},
{
"name":"13.32-13.93",
"expr":"([pixel] >= 13.32 AND [pixel] < 13.93)",
"color":"0 129 255"
},
{
"name":"13.93-14.29",
"expr":"([pixel] >= 13.93 AND [pixel] < 14.29)",
"color":"0 150 255"
},
{
"name":"14.29-14.60",
"expr":"([pixel] >= 14.29 AND [pixel] < 14.60)",
"color":"0 168 255"
},
{
"name":"14.60-15.27",
"expr":"([pixel] >= 14.60 AND [pixel] < 15.27)",
"color":"0 207 255"
},
{
"name":"15.27-15.58",
"expr":"([pixel] >= 15.27 AND [pixel] < 15.58)",
"color":"0 225 255"
},
{
"name":"15.58-15.94",
"expr":"([pixel] >= 15.58 AND [pixel] < 15.94)",
"color":"0 246 255"
},
{
"name":"15.94-16.55",
"expr":"([pixel] >= 15.94 AND [pixel] < 16.55)",
"color":"27 255 228"
},
{
"name":"16.55-16.91",
"expr":"([pixel] >= 16.55 AND [pixel] < 16.91)",
"color":"48 255 207"
},
{
"name":"16.91-17.22",
"expr":"([pixel] >= 16.91 AND [pixel] < 17.22)",
"color":"66 255 189"
},
{
"name":"17.22-17.53",
"expr":"([pixel] >= 17.22 AND [pixel] < 17.53)",
"color":"84 255 171"
},
{
"name":"17.53-17.89",
"expr":"([pixel] >= 17.53 AND [pixel] < 17.89)",
"color":"105 255 150"
},
{
"name":"17.89-18.50",
"expr":"([pixel] >= 17.89 AND [pixel] < 18.50)",
"color":"141 255 114"
},
{
"name":"18.50-18.86",
"expr":"([pixel] >= 18.50 AND [pixel] < 18.86)",
"color":"162 255 93"
},
{
"name":"18.86-19.17",
"expr":"([pixel] >= 18.86 AND [pixel] < 19.17)",
"color":"180 255 75"
},
{
"name":"19.17-19.48",
"expr":"([pixel] >= 19.17 AND [pixel] < 19.48)",
"color":"198 255 57"
},
{
"name":"19.48-19.84",
"expr":"([pixel] >= 19.48 AND [pixel] < 19.84)",
"color":"219 255 36"
},
{
"name":"19.84-20.50",
"expr":"([pixel] >= 19.84 AND [pixel] < 20.50)",
"color":"255 252 0"
},
{
"name":"20.50-20.81",
"expr":"([pixel] >= 20.50 AND [pixel] < 20.81)",
"color":"255 234 0"
},
{
"name":"20.81-21.17",
"expr":"([pixel] >= 20.81 AND [pixel] < 21.17)",
"color":"255 213 0"
},
{
"name":"21.17-21.79",
"expr":"([pixel] >= 21.17 AND [pixel] < 21.79)",
"color":"255 213 0"
},
{
"name":"21.79-22.15",
"expr":"([pixel] >= 21.79 AND [pixel] < 22.15)",
"color":"255 156 0"
},
{
"name":"22.15-22.46",
"expr":"([pixel] >= 22.15 AND [pixel] < 22.46)",
"color":"255 138 0"
},
{
"name":"22.46-23.12",
"expr":"([pixel] >= 22.46 AND [pixel] < 23.12)",
"color":"255 99 0"
},
{
"name":"23.12-23.74",
"expr":"([pixel] >= 23.12 AND [pixel] < 23.74)",
"color":"255 63 0"
},
{
"name":"23.74-24.41",
"expr":"([pixel] >= 23.74 AND [pixel] < 24.41)",
"color":"255 42 0"
},
{
"name":">24.41",
"expr":"([pixel] >= 24.41)",
"color":"255 12 0"
}
]
},
{
"name":"gridded_FVCOM_file_oxygen",
"title":"Oxygen (mmol/m3)",
"abs":"Oxygen in Nador",
"class": [
{
"name":"< 150.79",
"expr":"([pixel] < 150.79)",
"color":"235 235 235"
},
{
"name":"150.79-153.94",
"expr":"([pixel] >= 150.79 AND [pixel] < 153.94)",
"color":"235 233 233"
},
{
"name":"153.94-158.66",
"expr":"([pixel] >= 153.94 AND [pixel] < 158.66)",
"color":"236 228 229"
},
{
"name":"158.66-164.17",
"expr":"([pixel] >= 158.66 AND [pixel] < 164.17)",
"color":"238 214 217"
},
{
"name":"164.17-168.90",
"expr":"([pixel] >= 164.17 AND [pixel] < 168.90)",
"color":"239 208 212"
},
{
"name":"168.90-174.41",
"expr":"([pixel] >= 168.90 AND [pixel] < 174.41)",
"color":"239 200 207"
},
{
"name":"174.41-183.86",
"expr":"([pixel] >= 174.41 AND [pixel] < 183.86)",
"color":"239 189 199"
},
{
"name":"183.86-189.37",
"expr":"([pixel] >= 183.86 AND [pixel] < 189.37)",
"color":"239 182 195"
},
{
"name":"189.37-194.09",
"expr":"([pixel] >= 189.37 AND [pixel] < 194.09)",
"color":"238 176 192"
},
{
"name":"194.09-204.33",
"expr":"([pixel] >= 194.09 AND [pixel] < 204.33)",
"color":"235 165 185"
},
{
"name":"204.33-209.06",
"expr":"([pixel] >= 204.33 AND [pixel] < 209.06)",
"color":"233 160 182"
},
{
"name":"209.06-214.57",
"expr":"([pixel] >= 209.06 AND [pixel] < 214.57)",
"color":"231 154 179"
},
{
"name":"214.57-224.02",
"expr":"([pixel] >= 214.57 AND [pixel] < 224.02)",
"color":"226 144 174"
},
{
"name":"224.02-229.53",
"expr":"([pixel] >= 224.02 AND [pixel] < 229.53)",
"color":"223 139 171"
},
{
"name":"229.53-234.25",
"expr":"([pixel] >= 229.53 AND [pixel] < 234.25)",
"color":"219 135 169"
},
{
"name":"234.25-238.98",
"expr":"([pixel] >= 234.25 AND [pixel] < 238.98)",
"color":"216 131 167"
},
{
"name":"238.98-244.49",
"expr":"([pixel] >= 238.98 AND [pixel] < 244.49)",
"color":"212 126 164"
},
{
"name":"244.49-253.94",
"expr":"([pixel] >= 244.49 AND [pixel] < 253.94)",
"color":"203 118 159"
},
{
"name":"253.94-259.45",
"expr":"([pixel] >= 253.94 AND [pixel] < 259.45)",
"color":"198 114 156"
},
{
"name":"259.45-264.17",
"expr":"([pixel] >= 259.45 AND [pixel] < 264.17)",
"color":"193 111 154"
},
{
"name":"264.17-268.90",
"expr":"([pixel] >= 264.17 AND [pixel] < 268.90)",
"color":"188 108 151"
},
{
"name":"268.90-274.41",
"expr":"([pixel] >= 268.90 AND [pixel] < 274.41)",
"color":"182 104 148"
},
{
"name":"274.41-284.65",
"expr":"([pixel] >= 274.41 AND [pixel] < 284.65)",
"color":"169 98 141"
},
{
"name":"284.65-289.37",
"expr":"([pixel] >= 284.65 AND [pixel] < 289.37)",
"color":"163 96 138"
},
{
"name":"289.37-294.88",
"expr":"([pixel] >= 289.37 AND [pixel] < 294.88)",
"color":"156 93 134"
},
{
"name":"294.88-304.33",
"expr":"([pixel] >= 294.88 AND [pixel] < 304.33)",
"color":"143 88 126"
},
{
"name":"304.33-309.84",
"expr":"([pixel] >= 304.33 AND [pixel] < 309.84)",
"color":"135 86 121"
},
{
"name":"309.84-314.57",
"expr":"([pixel] >= 309.84 AND [pixel] < 314.57)",
"color":"129 84 117"
},
{
"name":"314.57-324.80",
"expr":"([pixel] >= 314.57 AND [pixel] < 324.80)",
"color":"113 81 106"
},
{
"name":"324.80-334.25",
"expr":"([pixel] >= 324.80 AND [pixel] < 334.25)",
"color":"99 78 95"
},
{
"name":"334.25-344.49",
"expr":"([pixel] >= 334.25 AND [pixel] < 344.49)",
"color":"90 76 88"
},
{
"name":">344.49",
"expr":"([pixel] >= 344.49)",
"color":"78 74 77"
}
]
},
{
"name":"nitrate",
"title":"Nitrate (mmol/m3)",
"abs":"Nitrate in Nador",
"class": [
{
"name":"< 0.54",
"expr":"([pixel] < 0.54)",
"color":"235 235 235"
},
{
"name":"0.54-0.59",
"expr":"([pixel] >= 0.54 AND [pixel] < 0.59)",
"color":"233 234 235"
},
{
"name":"0.59-0.66",
"expr":"([pixel] >= 0.59 AND [pixel] < 0.66)",
"color":"229 231 236"
},
{
"name":"0.66-0.74",
"expr":"([pixel] >= 0.66 AND [pixel] < 0.74)",
"color":"215 222 239"
},
{
"name":"0.74-0.81",
"expr":"([pixel] >= 0.74 AND [pixel] < 0.81)",
"color":"208 218 240"
},
{
"name":"0.81-0.89",
"expr":"([pixel] >= 0.81 AND [pixel] < 0.89)",
"color":"200 214 240"
},
{
"name":"0.89-1.04",
"expr":"([pixel] >= 0.89 AND [pixel] < 1.04)",
"color":"187 208 239"
},
{
"name":"1.04-1.12",
"expr":"([pixel] >= 1.04 AND [pixel] < 1.12)",
"color":"179 204 237"
},
{
"name":"1.12-1.19",
"expr":"([pixel] >= 1.12 AND [pixel] < 1.19)",
"color":"172 202 235"
},
{
"name":"1.19-1.34",
"expr":"([pixel] >= 1.19 AND [pixel] < 1.34)",
"color":"157 196 230"
},
{
"name":"1.34-1.41",
"expr":"([pixel] >= 1.34 AND [pixel] < 1.41)",
"color":"150 193 227"
},
{
"name":"1.41-1.50",
"expr":"([pixel] >= 1.41 AND [pixel] < 1.50)",
"color":"143 190 222"
},
{
"name":"1.50-1.64",
"expr":"([pixel] >= 1.50 AND [pixel] < 1.64)",
"color":"131 185 214"
},
{
"name":"1.64-1.72",
"expr":"([pixel] >= 1.64 AND [pixel] < 1.72)",
"color":"124 182 209"
},
{
"name":"1.72-1.79",
"expr":"([pixel] >= 1.72 AND [pixel] < 1.79)",
"color":"118 180 204"
},
{
"name":"1.79-1.86",
"expr":"([pixel] >= 1.79 AND [pixel] < 1.86)",
"color":"113 177 199"
},
{
"name":"1.86-1.95",
"expr":"([pixel] >= 1.86 AND [pixel] < 1.95)",
"color":"107 174 192"
},
{
"name":"1.95-2.09",
"expr":"([pixel] >= 1.95 AND [pixel] < 2.09)",
"color":"98 168 181"
},
{
"name":"2.09-2.17",
"expr":"([pixel] >= 2.09 AND [pixel] < 2.17)",
"color":"93 165 174"
},
{
"name":"2.17-2.24",
"expr":"([pixel] >= 2.17 AND [pixel] < 2.24)",
"color":"89 161 168"
},
{
"name":"2.24-2.31",
"expr":"([pixel] >= 2.24 AND [pixel] < 2.31)",
"color":"86 158 161"
},
{
"name":"2.31-2.39",
"expr":"([pixel] >= 2.31 AND [pixel] < 2.39)",
"color":"82 154 154"
},
{
"name":"2.39-2.55",
"expr":"([pixel] >= 2.39 AND [pixel] < 2.55)",
"color":"76 146 140"
},
{
"name":"2.55-2.62",
"expr":"([pixel] >= 2.55 AND [pixel] < 2.62)",
"color":"74 142 134"
},
{
"name":"2.62-2.70",
"expr":"([pixel] >= 2.62 AND [pixel] < 2.70)",
"color":"72 137 127"
},
{
"name":"2.70-2.84",
"expr":"([pixel] >= 2.70 AND [pixel] < 2.84)",
"color":"70 128 115"
},
{
"name":"2.84-2.93",
"expr":"([pixel] >= 2.84 AND [pixel] < 2.93)",
"color":"69 122 109"
},
{
"name":"2.93-3.00",
"expr":"([pixel] >= 2.93 AND [pixel] < 3.00)",
"color":"69 117 104"
},
{
"name":"3.00-3.15",
"expr":"([pixel] >= 3.00 AND [pixel] < 3.15)",
"color":"69 105 93"
},
{
"name":"3.15-3.29",
"expr":"([pixel] >= 3.15 AND [pixel] < 3.29)",
"color":"70 94 84"
},
{
"name":"3.29-3.49",
"expr":"([pixel] >= 3.29 AND [pixel] < 3.49)",
"color":"71 87 80"
},
{
"name":">3.49",
"expr":"([pixel] >= 3.49)",
"color":"73 77 75"
}
]
},
{
"name":"phosphate",
"title":"Phosphate (mmol/m3)",
"abs":"Phosphate in Nador",
"class": [
{
"name":"< 0.03",
"expr":"([pixel] < 0.03)",
"color":"235 235 235"
},
{
"name":"0.03-0.03",
"expr":"([pixel] >= 0.03 AND [pixel] < 0.03)",
"color":"233 234 233"
},
{
"name":"0.03-0.04",
"expr":"([pixel] >= 0.03 AND [pixel] < 0.04)",
"color":"227 233 227"
},
{
"name":"0.04-0.05",
"expr":"([pixel] >= 0.04 AND [pixel] < 0.05)",
"color":"212 229 207"
},
{
"name":"0.05-0.05",
"expr":"([pixel] >= 0.05 AND [pixel] < 0.05)",
"color":"207 227 198"
},
{
"name":"0.05-0.06",
"expr":"([pixel] >= 0.05 AND [pixel] < 0.06)",
"color":"203 223 187"
},
{
"name":"0.06-0.07",
"expr":"([pixel] >= 0.06 AND [pixel] < 0.07)",
"color":"199 215 168"
},
{
"name":"0.07-0.07",
"expr":"([pixel] >= 0.07 AND [pixel] < 0.07)",
"color":"199 209 158"
},
{
"name":"0.07-0.08",
"expr":"([pixel] >= 0.07 AND [pixel] < 0.08)",
"color":"199 203 151"
},
{
"name":"0.08-0.09",
"expr":"([pixel] >= 0.08 AND [pixel] < 0.09)",
"color":"203 190 138"
},
{
"name":"0.09-0.09",
"expr":"([pixel] >= 0.09 AND [pixel] < 0.09)",
"color":"205 183 133"
},
{
"name":"0.09-0.10",
"expr":"([pixel] >= 0.09 AND [pixel] < 0.10)",
"color":"207 175 129"
},
{
"name":"0.10-0.11",
"expr":"([pixel] >= 0.10 AND [pixel] < 0.11)",
"color":"210 161 126"
},
{
"name":"0.11-0.11",
"expr":"([pixel] >= 0.11 AND [pixel] < 0.11)",
"color":"211 153 126"
},
{
"name":"0.11-0.12",
"expr":"([pixel] >= 0.11 AND [pixel] < 0.12)",
"color":"212 146 127"
},
{
"name":"0.12-0.12",
"expr":"([pixel] >= 0.12 AND [pixel] < 0.12)",
"color":"212 140 129"
},
{
"name":"0.12-0.13",
"expr":"([pixel] >= 0.12 AND [pixel] < 0.13)",
"color":"210 133 132"
},
{
"name":"0.13-0.14",
"expr":"([pixel] >= 0.13 AND [pixel] < 0.14)",
"color":"205 122 138"
},
{
"name":"0.14-0.15",
"expr":"([pixel] >= 0.14 AND [pixel] < 0.15)",
"color":"200 116 142"
},
{
"name":"0.15-0.15",
"expr":"([pixel] >= 0.14 AND [pixel] < 0.15)",
"color":"195 112 145"
},
{
"name":"0.15-0.16",
"expr":"([pixel] >= 0.15 AND [pixel] < 0.16)",
"color":"189 108 147"
},
{
"name":"0.16-0.16",
"expr":"([pixel] >= 0.16 AND [pixel] < 0.16)",
"color":"181 104 150"
},
{
"name":"0.16-0.17",
"expr":"([pixel] >= 0.16 AND [pixel] < 0.17)",
"color":"164 99 153"
},
{
"name":"0.17-0.18",
"expr":"([pixel] >= 0.17 AND [pixel] < 0.18)",
"color":"155 97 153"
},
{
"name":"0.18-0.18",
"expr":"([pixel] >= 0.18 AND [pixel] < 0.18)",
"color":"145 95 151"
},
{
"name":"0.18-0.19",
"expr":"([pixel] >= 0.18 AND [pixel] < 0.19)",
"color":"127 93 146"
},
{
"name":"0.19-0.20",
"expr":"([pixel] >= 0.19 AND [pixel] < 0.20)",
"color":"117 92 140"
},
{
"name":"0.20-0.20",
"expr":"([pixel] >= 0.20 AND [pixel] < 0.20)",
"color":"108 91 135"
},
{
"name":"0.20-0.21",
"expr":"([pixel] >= 0.20 AND [pixel] < 0.21)",
"color":"93 88 120"
},
{
"name":"0.21-0.22",
"expr":"([pixel] >= 0.21 AND [pixel] < 0.22)",
"color":"82 85 103"
},
{
"name":"0.22-0.24",
"expr":"([pixel] >= 0.22 AND [pixel] < 0.24)",
"color":"78 82 93"
},
{
"name":">0.24",
"expr":"([pixel] >= 0.24)",
"color":"74 76 78"
}
]
},
{
"name":"phytoplankton_biomass_carbon",
"title":"Phytoplankton biomass carbon (mg C/m3)",
"abs":"Phytoplankton biomass carbon in Nador",
"class": [
{
"name":"< 7.67",
"expr":"([pixel] < 7.67)",
"color":"235 235 235"
},
{
"name":"7.67-8.80",
"expr":"([pixel] >= 7.67 AND [pixel] < 8.80)",
"color":"233 234 234"
},
{
"name":"8.80-10.49",
"expr":"([pixel] >= 8.80 AND [pixel] < 10.49)",
"color":"225 233 231"
},
{
"name":"10.49-12.46",
"expr":"([pixel] >= 10.49 AND [pixel] < 12.46)",
"color":"205 230 221"
},
{
"name":"12.46-14.16",
"expr":"([pixel] >= 12.46 AND [pixel] < 14.16)",
"color":"196 229 216"
},
{
"name":"14.16-16.13",
"expr":"([pixel] >= 14.16 AND [pixel] < 16.13)",
"color":"186 227 210"
},
{
"name":"16.13-19.52",
"expr":"([pixel] >= 16.13 AND [pixel] < 19.52)",
"color":"172 223 199"
},
{
"name":"19.52-21.49",
"expr":"([pixel] >= 19.52 AND [pixel] < 21.49)",
"color":"164 221 192"
},
{
"name":"21.49-23.19",
"expr":"([pixel] >= 21.49 AND [pixel] < 23.19)",
"color":"157 218 185"
},
{
"name":"23.19-26.86",
"expr":"([pixel] >= 23.19 AND [pixel] < 26.86)",
"color":"145 213 172"
},
{
"name":"26.86-28.55",
"expr":"([pixel] >= 26.86 AND [pixel] < 28.55)",
"color":"140 210 165"
},
{
"name":"28.55-30.52",
"expr":"([pixel] >= 28.55 AND [pixel] < 30.52)",
"color":"134 207 158"
},
{
"name":"30.52-33.91",
"expr":"([pixel] >= 30.52 AND [pixel] < 33.91)",
"color":"126 201 145"
},
{
"name":"33.91-35.89",
"expr":"([pixel] >= 33.91 AND [pixel] < 35.89)",
"color":"122 197 137"
},
{
"name":"35.89-37.58",
"expr":"([pixel] >= 35.89 AND [pixel] < 37.58)",
"color":"119 193 131"
},
{
"name":"37.58-39.27",
"expr":"([pixel] >= 37.58 AND [pixel] < 39.27)",
"color":"116 189 125"
},
{
"name":"39.27-41.25",
"expr":"([pixel] >= 39.27 AND [pixel] < 41.25)",
"color":"113 185 118"
},
{
"name":"41.25-44.63",
"expr":"([pixel] >= 41.25 AND [pixel] < 44.63)",
"color":"109 176 107"
},
{
"name":"44.63-46.61",
"expr":"([pixel] >= 44.63 AND [pixel] < 46.61)",
"color":"106 171 102"
},
{
"name":"46.61-48.30",
"expr":"([pixel] >= 46.61 AND [pixel] < 48.30)",
"color":"105 166 97"
},
{
"name":"48.30-50.00",
"expr":"([pixel] >= 48.30 AND [pixel] < 50.00)",
"color":"103 162 92"
},
{
"name":"50.00-51.97",
"expr":"([pixel] >= 50.00 AND [pixel] < 51.97)",
"color":"102 156 88"
},
{
"name":"51.97-55.64",
"expr":"([pixel] >= 51.97 AND [pixel] < 55.64)",
"color":"99 145 80"
},
{
"name":"55.64-57.33",
"expr":"([pixel] >= 55.64 AND [pixel] < 57.33)",
"color":"98 140 77"
},
{
"name":"57.33-59.31",
"expr":"([pixel] >= 57.33 AND [pixel] < 59.31)",
"color":"97 134 74"
},
{
"name":"59.31-62.69",
"expr":"([pixel] >= 59.31 AND [pixel] < 62.69)",
"color":"95 124 71"
},
{
"name":"62.69-64.67",
"expr":"([pixel] >= 62.69 AND [pixel] < 64.67)",
"color":"93 117 69"
},
{
"name":"64.67-66.36",
"expr":"([pixel] >= 64.67 AND [pixel] < 66.36)",
"color":"92 112 68"
},
{
"name":"66.36-70.03",
"expr":"([pixel] >= 66.36 AND [pixel] < 70.03)",
"color":"88 101 68"
},
{
"name":"70.03-73.42",
"expr":"([pixel] >= 70.03 AND [pixel] < 73.42)",
"color":"84 90 69"
},
{
"name":"73.42-78.21",
"expr":"([pixel] >= 73.42 AND [pixel] < 78.21)",
"color":"81 84 70"
},
{
"name":">78.21",
"expr":"([pixel] >= 78.21)",
"color":"76 76 73"
}
]
},
{
"name":"zooplankton_carbon_biomass",
"title":"Zooplankton carbon biomass (mg C/m3)",
"abs":"Zooplankton carbon biomass in Nador",
"class": [
{
"name":"< 0.58",
"expr":"([pixel] < 0.58)",
"color":"235 235 235"
},
{
"name":"0.58-1.50",
"expr":"([pixel] >= 0.58 AND [pixel] < 1.50)",
"color":"233 234 234"
},
{
"name":"1.50-2.89",
"expr":"([pixel] >= 1.50 AND [pixel] < 2.89)",
"color":"225 233 231"
},
{
"name":"2.89-4.50",
"expr":"([pixel] >= 2.89 AND [pixel] < 4.50)",
"color":"205 230 221"
},
{
"name":"4.50-5.89",
"expr":"([pixel] >= 4.50 AND [pixel] < 5.89)",
"color":"196 229 216"
},
{
"name":"5.89-7.50",
"expr":"([pixel] >= 5.89 AND [pixel] < 7.50)",
"color":"186 227 210"
},
{
"name":"7.50-10.27",
"expr":"([pixel] >= 7.50 AND [pixel] < 10.27)",
"color":"172 223 199"
},
{
"name":"10.27-11.88",
"expr":"([pixel] >= 10.27 AND [pixel] < 11.88)",
"color":"164 221 192"
},
{
"name":"11.88-13.27",
"expr":"([pixel] >= 11.88 AND [pixel] < 13.27)",
"color":"157 218 185"
},
{
"name":"13.27-16.27",
"expr":"([pixel] >= 13.27 AND [pixel] < 16.27)",
"color":"145 213 172"
},
{
"name":"16.27-17.65",
"expr":"([pixel] >= 16.27 AND [pixel] < 17.65)",
"color":"140 210 165"
},
{
"name":"17.65-19.27",
"expr":"([pixel] >= 17.65 AND [pixel] < 19.27)",
"color":"134 207 158"
},
{
"name":"19.27-22.04",
"expr":"([pixel] >= 19.27 AND [pixel] < 22.04)",
"color":"126 201 145"
},
{
"name":"22.04-23.65",
"expr":"([pixel] >= 22.04 AND [pixel] < 23.65)",
"color":"122 197 137"
},
{
"name":"23.65-25.03",
"expr":"([pixel] >= 23.65 AND [pixel] < 25.03)",
"color":"119 193 131"
},
{
"name":"25.03-26.42",
"expr":"([pixel] >= 25.03 AND [pixel] < 26.42)",
"color":"116 189 125"
},
{
"name":"26.42-28.03",
"expr":"([pixel] >= 26.42 AND [pixel] < 28.03)",
"color":"113 185 118"
},
{
"name":"28.03-30.80",
"expr":"([pixel] >= 28.03 AND [pixel] < 30.80)",
"color":"109 176 107"
},
{
"name":"30.80-32.42",
"expr":"([pixel] >= 30.80 AND [pixel] < 32.42)",
"color":"106 171 102"
},
{
"name":"32.42-33.80",
"expr":"([pixel] >= 32.42 AND [pixel] < 33.80)",
"color":"105 166 97"
},
{
"name":"33.80-35.18",
"expr":"([pixel] >= 33.80 AND [pixel] < 35.18)",
"color":"103 162 92"
},
{
"name":"35.18-36.80",
"expr":"([pixel] >= 35.18 AND [pixel] < 36.80)",
"color":"102 156 88"
},
{
"name":"36.80-39.80",
"expr":"([pixel] >= 36.80 AND [pixel] < 39.80)",
"color":"99 145 80"
},
{
"name":"39.80-41.18",
"expr":"([pixel] >= 39.80 AND [pixel] < 41.18)",
"color":"98 140 77"
},
{
"name":"41.18-42.80",
"expr":"([pixel] >= 41.18 AND [pixel] < 42.80)",
"color":"97 134 74"
},
{
"name":"42.80-45.57",
"expr":"([pixel] >= 42.80 AND [pixel] < 45.57)",
"color":"95 124 71"
},
{
"name":"45.57-47.18",
"expr":"([pixel] >= 45.57 AND [pixel] < 47.18)",
"color":"93 117 69"
},
{
"name":"47.18-48.56",
"expr":"([pixel] >= 47.18 AND [pixel] < 48.56)",
"color":"92 112 68"
},
{
"name":"48.56-51.56",
"expr":"([pixel] >= 48.56 AND [pixel] < 51.56)",
"color":"88 101 68"
},
{
"name":"51.56-54.33",
"expr":"([pixel] >= 51.56 AND [pixel] < 54.33)",
"color":"84 90 69"
},
{
"name":"54.33-58.25",
"expr":"([pixel] >= 54.33 AND [pixel] < 58.25)",
"color":"81 84 70"
},
{
"name":">58.25",
"expr":"([pixel] >= 58.25)",
"color":"76 76 73"
}
]
},
{
"name":"chlorophyl",
"title":"Chlorophyl (mg C/m3)",
"abs":"Chlorophyl in Nador",
"class": [
{
"name":"< 0.21",
"expr":"([pixel] < 0.21)",
"color":"235 235 235"
},
{
"name":"0.21-0.24",
"expr":"([pixel] >= 0.21 AND [pixel] < 0.24)",
"color":"233 234 234"
},
{
"name":"0.24-0.28",
"expr":"([pixel] >= 0.24 AND [pixel] < 0.28)",
"color":"225 233 231"
},
{
"name":"0.28-0.32",
"expr":"([pixel] >= 0.28 AND [pixel] < 0.32)",
"color":"205 230 221"
},
{
"name":"0.32-0.36",
"expr":"([pixel] >= 0.32 AND [pixel] < 0.36)",
"color":"196 229 216"
},
{
"name":"0.36-0.40",
"expr":"([pixel] >= 0.36 AND [pixel] < 0.40)",
"color":"186 227 210"
},
{
"name":"0.40-0.48",
"expr":"([pixel] >= 0.40 AND [pixel] < 0.48)",
"color":"172 223 199"
},
{
"name":"0.48-0.52",
"expr":"([pixel] >= 0.48 AND [pixel] < 0.52)",
"color":"164 221 192"
},
{
"name":"0.52-0.56",
"expr":"([pixel] >= 0.52 AND [pixel] < 0.56)",
"color":"157 218 185"
},
{
"name":"0.56-0.64",
"expr":"([pixel] >= 0.56 AND [pixel] < 0.64)",
"color":"145 213 172"
},
{
"name":"0.64-0.68",
"expr":"([pixel] >= 0.64 AND [pixel] < 0.68)",
"color":"140 210 165"
},
{
"name":"0.68-0.72",
"expr":"([pixel] >= 0.68 AND [pixel] < 0.72)",
"color":"134 207 158"
},
{
"name":"0.72-0.80",
"expr":"([pixel] >= 0.72 AND [pixel] < 0.80)",
"color":"126 201 145"
},
{
"name":"0.80-0.84",
"expr":"([pixel] >= 0.80 AND [pixel] < 0.84)",
"color":"122 197 137"
},
{
"name":"0.84-0.88",
"expr":"([pixel] >= 0.84 AND [pixel] < 0.88)",
"color":"119 193 131"
},
{
"name":"0.88-0.92",
"expr":"([pixel] >= 0.88 AND [pixel] < 0.92)",
"color":"116 189 125"
},
{
"name":"0.92-0.96",
"expr":"([pixel] >= 0.92 AND [pixel] < 0.96)",
"color":"113 185 118"
},
{
"name":"0.96-1.04",
"expr":"([pixel] >= 0.96 AND [pixel] < 1.04)",
"color":"109 176 107"
},
{
"name":"1.04-1.08",
"expr":"([pixel] >= 1.04 AND [pixel] < 1.08)",
"color":"106 171 102"
},
{
"name":"1.08-1.12",
"expr":"([pixel] >= 1.08 AND [pixel] < 1.12)",
"color":"105 166 97"
},
{
"name":"1.12-1.16",
"expr":"([pixel] >= 1.12 AND [pixel] < 1.16)",
"color":"103 162 92"
},
{
"name":"1.16-1.20",
"expr":"([pixel] >= 1.16 AND [pixel] < 1.20)",
"color":"102 156 88"
},
{
"name":"1.20-1.28",
"expr":"([pixel] >= 1.20 AND [pixel] < 1.28)",
"color":"99 145 80"
},
{
"name":"1.28-1.32",
"expr":"([pixel] >= 1.28 AND [pixel] < 1.32)",
"color":"98 140 77"
},
{
"name":"1.32-1.36",
"expr":"([pixel] >= 1.32 AND [pixel] < 1.36)",
"color":"97 134 74"
},
{
"name":"1.36-1.44",
"expr":"([pixel] >= 1.36 AND [pixel] < 1.44)",
"color":"95 124 71"
},
{
"name":"1.44-1.48",
"expr":"([pixel] >= 1.44 AND [pixel] < 1.48)",
"color":"93 117 69"
},
{
"name":"1.48-1.52",
"expr":"([pixel] >= 1.48 AND [pixel] < 1.52)",
"color":"92 112 68"
},
{
"name":"1.52-1.60",
"expr":"([pixel] >= 1.52 AND [pixel] < 1.60)",
"color":"88 101 68"
},
{
"name":"1.60-1.68",
"expr":"([pixel] >= 1.60 AND [pixel] < 1.68)",
"color":"84 90 69"
},
{
"name":"1.68-1.72",
"expr":"([pixel] >= 1.68 AND [pixel] < 1.72)",
"color":"81 84 70"
},
{
"name":">1.72",
"expr":"([pixel] >= 1.72)",
"color":"76 76 73"
}
]
}
]
}
|
gpl-3.0
|
home-assistant/home-assistant
|
tests/helpers/test_discovery.py
|
6
|
6983
|
"""Test discovery helpers."""
from unittest.mock import patch
from homeassistant import setup
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.util.async_ import run_callback_threadsafe
from tests.common import (
MockModule,
MockPlatform,
get_test_home_assistant,
mock_coro,
mock_entity_platform,
mock_integration,
)
class TestHelpersDiscovery:
"""Tests for discovery helper methods."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@patch("homeassistant.setup.async_setup_component", return_value=mock_coro())
def test_listen(self, mock_setup_component):
"""Test discovery listen/discover combo."""
helpers = self.hass.helpers
calls_single = []
@callback
def callback_single(service, info):
"""Service discovered callback."""
calls_single.append((service, info))
self.hass.add_job(
helpers.discovery.async_listen, "test service", callback_single
)
self.hass.add_job(
helpers.discovery.async_discover,
"test service",
"discovery info",
"test_component",
{},
)
self.hass.block_till_done()
assert mock_setup_component.called
assert mock_setup_component.call_args[0] == (self.hass, "test_component", {})
assert len(calls_single) == 1
assert calls_single[0] == ("test service", "discovery info")
@patch("homeassistant.setup.async_setup_component", return_value=mock_coro(True))
def test_platform(self, mock_setup_component):
"""Test discover platform method."""
calls = []
@callback
def platform_callback(platform, info):
"""Platform callback method."""
calls.append((platform, info))
run_callback_threadsafe(
self.hass.loop,
discovery.async_listen_platform,
self.hass,
"test_component",
platform_callback,
).result()
discovery.load_platform(
self.hass,
"test_component",
"test_platform",
"discovery info",
{"test_component": {}},
)
self.hass.block_till_done()
assert mock_setup_component.called
assert mock_setup_component.call_args[0] == (
self.hass,
"test_component",
{"test_component": {}},
)
self.hass.block_till_done()
discovery.load_platform(
self.hass,
"test_component_2",
"test_platform",
"discovery info",
{"test_component": {}},
)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0] == ("test_platform", "discovery info")
dispatcher_send(
self.hass,
discovery.SIGNAL_PLATFORM_DISCOVERED,
{"service": discovery.EVENT_LOAD_PLATFORM.format("test_component")},
)
self.hass.block_till_done()
assert len(calls) == 1
def test_circular_import(self):
"""Test we don't break doing circular import.
This test will have test_component discover the switch.test_circular
component while setting up.
The supplied config will load test_component and will load
switch.test_circular.
That means that after startup, we will have test_component and switch
setup. The test_circular platform has been loaded twice.
"""
component_calls = []
platform_calls = []
def component_setup(hass, config):
"""Set up mock component."""
discovery.load_platform(hass, "switch", "test_circular", "disc", config)
component_calls.append(1)
return True
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up mock platform."""
platform_calls.append("disc" if discovery_info else "component")
mock_integration(self.hass, MockModule("test_component", setup=component_setup))
# dependencies are only set in component level
# since we are using manifest to hold them
mock_integration(
self.hass, MockModule("test_circular", dependencies=["test_component"])
)
mock_entity_platform(
self.hass, "switch.test_circular", MockPlatform(setup_platform)
)
setup.setup_component(
self.hass,
"test_component",
{"test_component": None, "switch": [{"platform": "test_circular"}]},
)
self.hass.block_till_done()
# test_component will only be setup once
assert len(component_calls) == 1
# The platform will be setup once via the config in `setup_component`
# and once via the discovery inside test_component.
assert len(platform_calls) == 2
assert "test_component" in self.hass.config.components
assert "switch" in self.hass.config.components
@patch("homeassistant.helpers.signal.async_register_signal_handling")
def test_1st_discovers_2nd_component(self, mock_signal):
"""Test that we don't break if one component discovers the other.
If the first component fires a discovery event to set up the
second component while the second component is about to be set up,
it should not set up the second component twice.
"""
component_calls = []
async def component1_setup(hass, config):
"""Set up mock component."""
print("component1 setup")
await discovery.async_discover(
hass, "test_component2", {}, "test_component2", {}
)
return True
def component2_setup(hass, config):
"""Set up mock component."""
component_calls.append(1)
return True
mock_integration(
self.hass, MockModule("test_component1", async_setup=component1_setup)
)
mock_integration(
self.hass, MockModule("test_component2", setup=component2_setup)
)
@callback
def do_setup():
"""Set up 2 components."""
self.hass.async_add_job(
setup.async_setup_component(self.hass, "test_component1", {})
)
self.hass.async_add_job(
setup.async_setup_component(self.hass, "test_component2", {})
)
self.hass.add_job(do_setup)
self.hass.block_till_done()
# test_component will only be setup once
assert len(component_calls) == 1
|
apache-2.0
|
joergdietrich/astropy
|
astropy/vo/client/async.py
|
3
|
2515
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Asynchronous VO service requests."""
from __future__ import absolute_import, division, print_function, unicode_literals
# LOCAL
from ...utils.compat.futures import ThreadPoolExecutor
from ...utils.decorators import deprecated
__all__ = ['AsyncBase']
@deprecated(
'2.0', alternative='astroquery.vo_conesearch.async.AsyncBase')
class AsyncBase(object):
"""Base class for asynchronous VO service requests
using :py:class:`concurrent.futures.ThreadPoolExecutor`.
Service request will be forced to run in silent
mode by setting ``verbose=False``. Warnings are controlled
by :py:mod:`warnings` module.
.. note::
Methods of the attributes can be accessed directly,
with priority given to ``executor``.
Parameters
----------
func : function
The function to run.
args, kwargs
Arguments and keywords accepted by the service request
function to be called asynchronously.
Attributes
----------
executor : :py:class:`concurrent.futures.ThreadPoolExecutor`
Executor running the function on single thread.
future : :py:class:`concurrent.futures.Future`
Asynchronous execution created by ``executor``.
"""
def __init__(self, func, *args, **kwargs):
kwargs['verbose'] = False
self.executor = ThreadPoolExecutor(1)
self.future = self.executor.submit(func, *args, **kwargs)
def __getattr__(self, what):
"""Expose ``executor`` and ``future`` methods."""
try:
return getattr(self.executor, what)
except AttributeError:
return getattr(self.future, what)
def get(self, timeout=None):
"""Get result, if available, then shut down thread.
Parameters
----------
timeout : int or float
Wait the given amount of time in seconds before
obtaining result. If not given, wait indefinitely
until function is done.
Returns
-------
result
Result returned by the function.
Raises
------
Exception
Errors raised by :py:class:`concurrent.futures.Future`.
"""
try:
result = self.future.result(timeout=timeout)
except Exception as e: # pragma: no cover
result = None
raise e
finally:
self.executor.shutdown(wait=False)
return result
|
bsd-3-clause
|
fangxingli/hue
|
desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/openssl/bio.py
|
7
|
5009
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/bio.h>
"""
TYPES = """
typedef struct bio_st BIO;
typedef void bio_info_cb(BIO *, int, const char *, int, long, long);
struct bio_method_st {
int type;
const char *name;
int (*bwrite)(BIO *, const char *, int);
int (*bread)(BIO *, char *, int);
int (*bputs)(BIO *, const char *);
int (*bgets)(BIO *, char *, int);
long (*ctrl)(BIO *, int, long, void *);
int (*create)(BIO *);
int (*destroy)(BIO *);
long (*callback_ctrl)(BIO *, int, bio_info_cb *);
...;
};
typedef struct bio_method_st BIO_METHOD;
struct bio_st {
BIO_METHOD *method;
long (*callback)(struct bio_st *, int, const char *, int, long, long);
char *cb_arg;
int init;
int shutdown;
int flags;
int retry_reason;
int num;
void *ptr;
struct bio_st *next_bio;
struct bio_st *prev_bio;
int references;
unsigned long num_read;
unsigned long num_write;
...;
};
typedef ... BUF_MEM;
static const int BIO_TYPE_MEM;
static const int BIO_TYPE_FILE;
static const int BIO_TYPE_FD;
static const int BIO_TYPE_SOCKET;
static const int BIO_TYPE_CONNECT;
static const int BIO_TYPE_ACCEPT;
static const int BIO_TYPE_NULL;
static const int BIO_CLOSE;
static const int BIO_NOCLOSE;
static const int BIO_TYPE_SOURCE_SINK;
static const int BIO_CTRL_RESET;
static const int BIO_CTRL_EOF;
static const int BIO_CTRL_SET;
static const int BIO_CTRL_SET_CLOSE;
static const int BIO_CTRL_FLUSH;
static const int BIO_CTRL_DUP;
static const int BIO_CTRL_GET_CLOSE;
static const int BIO_CTRL_INFO;
static const int BIO_CTRL_GET;
static const int BIO_CTRL_PENDING;
static const int BIO_CTRL_WPENDING;
static const int BIO_C_FILE_SEEK;
static const int BIO_C_FILE_TELL;
static const int BIO_TYPE_NONE;
static const int BIO_TYPE_NBIO_TEST;
static const int BIO_TYPE_BER;
static const int BIO_TYPE_BIO;
static const int BIO_TYPE_DESCRIPTOR;
static const int BIO_FLAGS_READ;
static const int BIO_FLAGS_WRITE;
static const int BIO_FLAGS_IO_SPECIAL;
static const int BIO_FLAGS_RWS;
static const int BIO_FLAGS_SHOULD_RETRY;
static const int BIO_TYPE_NULL_FILTER;
static const int BIO_TYPE_SSL;
static const int BIO_TYPE_MD;
static const int BIO_TYPE_BUFFER;
static const int BIO_TYPE_CIPHER;
static const int BIO_TYPE_BASE64;
static const int BIO_TYPE_FILTER;
"""
FUNCTIONS = """
BIO *BIO_new(BIO_METHOD *);
int BIO_set(BIO *, BIO_METHOD *);
int BIO_free(BIO *);
void BIO_vfree(BIO *);
void BIO_free_all(BIO *);
BIO *BIO_push(BIO *, BIO *);
BIO *BIO_pop(BIO *);
BIO *BIO_next(BIO *);
BIO *BIO_find_type(BIO *, int);
BIO_METHOD *BIO_s_mem(void);
BIO_METHOD *BIO_s_file(void);
BIO *BIO_new_file(const char *, const char *);
BIO *BIO_new_fp(FILE *, int);
BIO_METHOD *BIO_s_fd(void);
BIO *BIO_new_fd(int, int);
BIO_METHOD *BIO_s_socket(void);
BIO *BIO_new_socket(int, int);
BIO_METHOD *BIO_s_null(void);
long BIO_ctrl(BIO *, int, long, void *);
long BIO_callback_ctrl(
BIO *,
int,
void (*)(struct bio_st *, int, const char *, int, long, long)
);
long BIO_int_ctrl(BIO *, int, long, int);
size_t BIO_ctrl_pending(BIO *);
size_t BIO_ctrl_wpending(BIO *);
int BIO_read(BIO *, void *, int);
int BIO_gets(BIO *, char *, int);
int BIO_write(BIO *, const void *, int);
int BIO_puts(BIO *, const char *);
BIO_METHOD *BIO_f_null(void);
BIO_METHOD *BIO_f_buffer(void);
"""
MACROS = """
/* BIO_new_mem_buf became const void * in 1.0.2g */
BIO *BIO_new_mem_buf(void *, int);
long BIO_set_fd(BIO *, long, int);
long BIO_get_fd(BIO *, char *);
long BIO_set_mem_eof_return(BIO *, int);
long BIO_get_mem_data(BIO *, char **);
long BIO_set_mem_buf(BIO *, BUF_MEM *, int);
long BIO_get_mem_ptr(BIO *, BUF_MEM **);
long BIO_set_fp(BIO *, FILE *, int);
long BIO_get_fp(BIO *, FILE **);
long BIO_read_filename(BIO *, char *);
long BIO_write_filename(BIO *, char *);
long BIO_append_filename(BIO *, char *);
long BIO_rw_filename(BIO *, char *);
int BIO_should_read(BIO *);
int BIO_should_write(BIO *);
int BIO_should_io_special(BIO *);
int BIO_retry_type(BIO *);
int BIO_should_retry(BIO *);
int BIO_reset(BIO *);
int BIO_seek(BIO *, int);
int BIO_tell(BIO *);
int BIO_flush(BIO *);
int BIO_eof(BIO *);
int BIO_set_close(BIO *,long);
int BIO_get_close(BIO *);
int BIO_pending(BIO *);
int BIO_wpending(BIO *);
int BIO_get_info_callback(BIO *, bio_info_cb **);
int BIO_set_info_callback(BIO *, bio_info_cb *);
long BIO_get_buffer_num_lines(BIO *);
long BIO_set_read_buffer_size(BIO *, long);
long BIO_set_write_buffer_size(BIO *, long);
long BIO_set_buffer_size(BIO *, long);
long BIO_set_buffer_read_data(BIO *, void *, long);
/* The following was a macro in 0.9.8e. Once we drop support for RHEL/CentOS 5
we should move this back to FUNCTIONS. */
int BIO_method_type(const BIO *);
"""
CUSTOMIZATIONS = """
"""
|
apache-2.0
|
Bernardo-MG/CWR-WebClient
|
cwr_webclient/report/mera.py
|
2
|
8673
|
# -*- encoding: utf-8 -*-
import StringIO
import xlsxwriter
"""
Web app module.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
def generate_match_report_excel(matches, filename):
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
generate_match_report_excel_summary(workbook, filename)
generate_match_report_excel_queries_list(workbook, matches)
generate_match_report_excel_queries_results_all(workbook, matches)
generate_match_report_excel_queries_results_matched(workbook, matches)
generate_match_report_excel_queries_results_not_matched(workbook, matches)
workbook.close()
output.seek(0)
return output.read()
def generate_match_report_excel_queries_results_all(workbook, matches):
results_sheet = workbook.add_worksheet('All queries results')
bold = workbook.add_format({'bold': 1})
row = 1
col = 0
for match in (matches):
results_sheet.write(row, col, 'Query', bold)
results_sheet.write(row, col + 1, match['query'])
row += 1
results_sheet.write(row, col, 'ISWC', bold)
results_sheet.write(row, col + 1, match['iswc'])
row += 1
results_sheet.write(row, col, 'Type', bold)
results_sheet.write(row, col + 1, match['type_of_query'])
row += 1
results = match['results']
if len(results) > 0:
results_sheet.write(row, col, 'Results', bold)
row += 1
for result in results:
results_sheet.write(row, col + 1, 'Entity', bold)
results_sheet.write(row, col + 2, result['entity'])
row += 1
results_sheet.write(row, col + 1, 'ISRC', bold)
results_sheet.write(row, col + 2, result['isrc'])
row += 1
results_sheet.write(row, col + 1, 'USO Transaction ID', bold)
results_sheet.write(row, col + 2, result['usos_transaction_id'])
row += 1
results_sheet.write(row, col + 1, 'Refined score', bold)
results_sheet.write(row, col + 2, result['refined_score'])
row += 1
results_sheet.write(row, col + 1, 'Raw score', bold)
results_sheet.write(row, col + 2, result['raw_score'])
row += 1
results_sheet.write(row, col + 1, 'Matched forms:', bold)
for key, value in result['matched_forms'].iteritems():
row += 1
results_sheet.write(row, col + 2, 'Form', bold)
results_sheet.write(row, col + 3, key)
row += 1
results_sheet.write(row, col + 2, 'Rating', bold)
results_sheet.write(row, col + 3, value)
row += 1
results_sheet.write(row, col + 1, 'Refinements:', bold)
for refinement in (result['refinements']):
row += 1
results_sheet.write(row, col + 2, 'Content', bold)
results_sheet.write(row, col + 3, refinement['content'])
row += 1
results_sheet.write(row, col + 2, 'Type', bold)
results_sheet.write(row, col + 3, refinement['type'])
row += 1
results_sheet.write(row, col + 2, 'Relevance', bold)
results_sheet.write(row, col + 3, refinement['relevance'])
row += 1
results_sheet.write(row, col + 2, 'Matched forms', bold)
for form in (result['matched_forms']):
row += 1
results_sheet.write(row, col + 2, form)
row += 1
row += 1
def generate_match_report_excel_queries_results_matched(workbook, matches):
results_sheet = workbook.add_worksheet('Queries results (matched)')
bold = workbook.add_format({'bold': 1})
row = 1
col = 0
for match in (matches):
results = match['results']
if len(results) > 0:
results_sheet.write(row, col, 'Query', bold)
results_sheet.write(row, col + 1, match['query'])
row += 1
results_sheet.write(row, col, 'ISWC', bold)
results_sheet.write(row, col + 1, match['iswc'])
row += 1
results_sheet.write(row, col, 'Type', bold)
results_sheet.write(row, col + 1, match['type_of_query'])
row += 1
results_sheet.write(row, col, 'Results', bold)
row += 1
for result in results:
results_sheet.write(row, col + 1, 'Entity', bold)
results_sheet.write(row, col + 2, result['entity'])
row += 1
results_sheet.write(row, col + 1, 'ISRC', bold)
results_sheet.write(row, col + 2, result['isrc'])
row += 1
results_sheet.write(row, col + 1, 'USO Transaction ID', bold)
results_sheet.write(row, col + 2, result['usos_transaction_id'])
row += 1
results_sheet.write(row, col + 1, 'Refined score', bold)
results_sheet.write(row, col + 2, result['refined_score'])
row += 1
results_sheet.write(row, col + 1, 'Raw score', bold)
results_sheet.write(row, col + 2, result['raw_score'])
row += 1
results_sheet.write(row, col + 1, 'Matched forms:', bold)
for key, value in result['matched_forms'].iteritems():
row += 1
results_sheet.write(row, col + 2, 'Form', bold)
results_sheet.write(row, col + 3, key)
row += 1
results_sheet.write(row, col + 2, 'Rating', bold)
results_sheet.write(row, col + 3, value)
row += 1
results_sheet.write(row, col + 1, 'Refinements:', bold)
for refinement in (result['refinements']):
row += 1
results_sheet.write(row, col + 2, 'Content', bold)
results_sheet.write(row, col + 3, refinement['content'])
row += 1
results_sheet.write(row, col + 2, 'Type', bold)
results_sheet.write(row, col + 3, refinement['type'])
row += 1
results_sheet.write(row, col + 2, 'Relevance', bold)
results_sheet.write(row, col + 3, refinement['relevance'])
row += 1
results_sheet.write(row, col + 2, 'Matched forms', bold)
for form in (result['matched_forms']):
row += 1
results_sheet.write(row, col + 2, form)
row += 1
row += 1
def generate_match_report_excel_queries_results_not_matched(workbook, matches):
results_sheet = workbook.add_worksheet('Queries results (not matched)')
bold = workbook.add_format({'bold': 1})
row = 1
col = 0
for match in (matches):
results = match['results']
if len(results) == 0:
results_sheet.write(row, col, 'Query', bold)
results_sheet.write(row, col + 1, match['query'])
row += 1
results_sheet.write(row, col, 'ISWC', bold)
results_sheet.write(row, col + 1, match['iswc'])
row += 1
results_sheet.write(row, col, 'Type', bold)
results_sheet.write(row, col + 1, match['type_of_query'])
row += 1
row += 1
def generate_match_report_excel_queries_list(workbook, matches):
queries_sheet = workbook.add_worksheet('Queries list')
bold = workbook.add_format({'bold': 1})
queries_sheet.write('A1', 'Query', bold)
queries_sheet.write('B1', 'ISWC', bold)
queries_sheet.write('C1', 'Type of Query', bold)
row = 1
col = 0
for match in (matches):
queries_sheet.write(row, col, match['query'])
queries_sheet.write(row, col + 1, match['iswc'])
queries_sheet.write(row, col + 2, match['type_of_query'])
row += 1
queries_sheet.write(row, 0, 'Count', bold)
queries_sheet.write(row, 1, '=ROWS(B2:B%s)' % row)
def generate_match_report_excel_summary(workbook, filename):
queries_sheet = workbook.add_worksheet('Summary')
bold = workbook.add_format({'bold': 1})
queries_sheet.write('A1', 'Filename', bold)
queries_sheet.write('B1', filename)
|
mit
|
Jorge-Rodriguez/ansible
|
lib/ansible/module_utils/facts/virtual/freebsd.py
|
31
|
2052
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
class FreeBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
virtual_facts = {}
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
if virtual_facts['virtualization_type'] == '':
virtual_product_facts = self.detect_virt_product('kern.vm_guest') or self.detect_virt_product('hw.hv_vendor')
virtual_facts.update(virtual_product_facts)
if virtual_facts['virtualization_type'] == '':
virtual_vendor_facts = self.detect_virt_vendor('hw.model')
virtual_facts.update(virtual_vendor_facts)
return virtual_facts
class FreeBSDVirtualCollector(VirtualCollector):
_fact_class = FreeBSDVirtual
_platform = 'FreeBSD'
|
gpl-3.0
|
proxysh/Safejumper-for-Desktop
|
buildmac/Resources/env/lib/python2.7/site-packages/twisted/trial/test/test_keyboard.py
|
16
|
4036
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for interrupting tests with Control-C.
"""
from __future__ import absolute_import, division
from twisted.python.compat import NativeStringIO
from twisted.trial import unittest
from twisted.trial import reporter, runner
class TrialTest(unittest.SynchronousTestCase):
def setUp(self):
self.output = NativeStringIO()
self.reporter = reporter.TestResult()
self.loader = runner.TestLoader()
class InterruptInTestTests(TrialTest):
class InterruptedTest(unittest.TestCase):
def test_02_raiseInterrupt(self):
raise KeyboardInterrupt
def test_01_doNothing(self):
pass
def test_03_doNothing(self):
InterruptInTestTests.test_03_doNothing_run = True
def setUp(self):
super(InterruptInTestTests, self).setUp()
self.suite = self.loader.loadClass(InterruptInTestTests.InterruptedTest)
InterruptInTestTests.test_03_doNothing_run = None
def test_setUpOK(self):
self.assertEqual(3, self.suite.countTestCases())
self.assertEqual(0, self.reporter.testsRun)
self.assertFalse(self.reporter.shouldStop)
def test_interruptInTest(self):
runner.TrialSuite([self.suite]).run(self.reporter)
self.assertTrue(self.reporter.shouldStop)
self.assertEqual(2, self.reporter.testsRun)
self.assertFalse(InterruptInTestTests.test_03_doNothing_run,
"test_03_doNothing ran.")
class InterruptInSetUpTests(TrialTest):
testsRun = 0
class InterruptedTest(unittest.TestCase):
def setUp(self):
if InterruptInSetUpTests.testsRun > 0:
raise KeyboardInterrupt
def test_01(self):
InterruptInSetUpTests.testsRun += 1
def test_02(self):
InterruptInSetUpTests.testsRun += 1
InterruptInSetUpTests.test_02_run = True
def setUp(self):
super(InterruptInSetUpTests, self).setUp()
self.suite = self.loader.loadClass(
InterruptInSetUpTests.InterruptedTest)
InterruptInSetUpTests.test_02_run = False
InterruptInSetUpTests.testsRun = 0
def test_setUpOK(self):
self.assertEqual(0, InterruptInSetUpTests.testsRun)
self.assertEqual(2, self.suite.countTestCases())
self.assertEqual(0, self.reporter.testsRun)
self.assertFalse(self.reporter.shouldStop)
def test_interruptInSetUp(self):
runner.TrialSuite([self.suite]).run(self.reporter)
self.assertTrue(self.reporter.shouldStop)
self.assertEqual(2, self.reporter.testsRun)
self.assertFalse(InterruptInSetUpTests.test_02_run,
"test_02 ran")
class InterruptInTearDownTests(TrialTest):
testsRun = 0
class InterruptedTest(unittest.TestCase):
def tearDown(self):
if InterruptInTearDownTests.testsRun > 0:
raise KeyboardInterrupt
def test_01(self):
InterruptInTearDownTests.testsRun += 1
def test_02(self):
InterruptInTearDownTests.testsRun += 1
InterruptInTearDownTests.test_02_run = True
def setUp(self):
super(InterruptInTearDownTests, self).setUp()
self.suite = self.loader.loadClass(
InterruptInTearDownTests.InterruptedTest)
InterruptInTearDownTests.testsRun = 0
InterruptInTearDownTests.test_02_run = False
def test_setUpOK(self):
self.assertEqual(0, InterruptInTearDownTests.testsRun)
self.assertEqual(2, self.suite.countTestCases())
self.assertEqual(0, self.reporter.testsRun)
self.assertFalse(self.reporter.shouldStop)
def test_interruptInTearDown(self):
runner.TrialSuite([self.suite]).run(self.reporter)
self.assertEqual(1, self.reporter.testsRun)
self.assertTrue(self.reporter.shouldStop)
self.assertFalse(InterruptInTearDownTests.test_02_run,
"test_02 ran")
|
gpl-2.0
|
gengliangwang/spark
|
python/pyspark/tests/test_serializers.py
|
15
|
9192
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import sys
import unittest
from pyspark import serializers
from pyspark.serializers import CloudPickleSerializer, CompressedSerializer, \
AutoBatchedSerializer, BatchedSerializer, AutoSerializer, NoOpSerializer, PairDeserializer, \
FlattenedValuesSerializer, CartesianDeserializer, PickleSerializer, UTF8Deserializer, \
MarshalSerializer
from pyspark.testing.utils import PySparkTestCase, read_int, write_int, ByteArrayOutput, \
have_numpy, have_scipy
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
try:
import xmlrunner # type: ignore[import] # noqa: F401
except ImportError:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise RuntimeError("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
@unittest.skipIf(not have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
import numpy as np
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
class SerializersTest(unittest.TestCase):
def test_chunked_stream(self):
original_bytes = bytearray(range(100))
for data_length in [1, 10, 100]:
for buffer_length in [1, 2, 3, 5, 20, 99, 100, 101, 500]:
dest = ByteArrayOutput()
stream_out = serializers.ChunkedStream(dest, buffer_length)
stream_out.write(original_bytes[:data_length])
stream_out.close()
num_chunks = int(math.ceil(float(data_length) / buffer_length))
# length for each chunk, and a final -1 at the very end
exp_size = (num_chunks + 1) * 4 + data_length
self.assertEqual(len(dest.buffer), exp_size)
dest_pos = 0
data_pos = 0
for chunk_idx in range(num_chunks):
chunk_length = read_int(dest.buffer[dest_pos:(dest_pos + 4)])
if chunk_idx == num_chunks - 1:
exp_length = data_length % buffer_length
if exp_length == 0:
exp_length = buffer_length
else:
exp_length = buffer_length
self.assertEqual(chunk_length, exp_length)
dest_pos += 4
dest_chunk = dest.buffer[dest_pos:dest_pos + chunk_length]
orig_chunk = original_bytes[data_pos:data_pos + chunk_length]
self.assertEqual(dest_chunk, orig_chunk)
dest_pos += chunk_length
data_pos += chunk_length
# ends with a -1
self.assertEqual(dest.buffer[-4:], write_int(-1))
if __name__ == "__main__":
from pyspark.tests.test_serializers import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
grlee77/numpy
|
setup.py
|
1
|
18343
|
#!/usr/bin/env python3
""" NumPy is the fundamental package for array computing with Python.
It provides:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
- and much more
Besides its obvious scientific uses, NumPy can also be used as an efficient
multi-dimensional container of generic data. Arbitrary data-types can be
defined. This allows NumPy to seamlessly and speedily integrate with a wide
variety of databases.
All NumPy wheels distributed on PyPI are BSD licensed.
"""
DOCLINES = (__doc__ or '').split("\n")
import os
import sys
import subprocess
import textwrap
import warnings
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version >= 3.6 required.")
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Typing :: Typed
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MAJOR = 1
MINOR = 20
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# The first version not in the `Programming Language :: Python :: ...` classifiers above
if sys.version_info >= (3, 10):
warnings.warn(
f"NumPy {VERSION} may not yet support Python "
f"{sys.version_info.major}.{sys.version_info.minor}.",
RuntimeWarning,
)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except (subprocess.SubprocessError, OSError):
GIT_REVISION = "Unknown"
if not GIT_REVISION:
# this shouldn't happen but apparently can (see gh-8512)
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# numpy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__NUMPY_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
raise ImportError("Unable to import git_revision. Try removing "
"numpy/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='numpy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
#
# To compare versions robustly, use `numpy.lib.NumpyVersion`
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('numpy')
config.add_data_files(('numpy', 'LICENSE.txt'))
config.add_data_files(('numpy', 'numpy/*.pxd'))
config.get_version('numpy/version.py') # sets config.version
return config
def check_submodules():
""" verify that the submodules are checked out and clean
use `git submodule update --init`; on failure
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for line in f:
if 'path' in line:
p = line.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule {} missing'.format(p))
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: {}'.format(line))
class concat_license_files():
"""Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation
Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see
gh-13447). This makes GitHub state correctly how NumPy is licensed.
"""
def __init__(self):
self.f1 = 'LICENSE.txt'
self.f2 = 'LICENSES_bundled.txt'
def __enter__(self):
"""Concatenate files and remove LICENSES_bundled.txt"""
with open(self.f1, 'r') as f1:
self.bsd_text = f1.read()
with open(self.f1, 'a') as f1:
with open(self.f2, 'r') as f2:
self.bundled_text = f2.read()
f1.write('\n\n')
f1.write(self.bundled_text)
def __exit__(self, exception_type, exception_value, traceback):
"""Restore content of both files"""
with open(self.f1, 'w') as f:
f.write(self.bsd_text)
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
with concat_license_files():
sdist.run(self)
def get_build_overrides():
"""
Custom build commands to add `-std=c99` to compilation
"""
from numpy.distutils.command.build_clib import build_clib
from numpy.distutils.command.build_ext import build_ext
from distutils.version import LooseVersion
def _needs_gcc_c99_flag(obj):
if obj.compiler.compiler_type != 'unix':
return False
cc = obj.compiler.compiler[0]
if "gcc" not in cc:
return False
# will print something like '4.2.1\n'
out = subprocess.run([cc, '-dumpversion'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
# -std=c99 is default from this version on
if LooseVersion(out.stdout) >= LooseVersion('5.0'):
return False
return True
class new_build_clib(build_clib):
def build_a_library(self, build_info, lib_name, libraries):
if _needs_gcc_c99_flag(self):
args = build_info.get('extra_compiler_args') or []
args.append('-std=c99')
build_info['extra_compiler_args'] = args
build_clib.build_a_library(self, build_info, lib_name, libraries)
class new_build_ext(build_ext):
def build_extension(self, ext):
if _needs_gcc_c99_flag(self):
if '-std=c99' not in ext.extra_compile_args:
ext.extra_compile_args.append('-std=c99')
build_ext.build_extension(self, ext)
return new_build_clib, new_build_ext
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
for d in ('random',):
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'numpy/{0}'.format(d)],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
args = sys.argv[1:]
if not args:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
for command in info_commands:
if command in args:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src')
for command in good_commands:
if command in args:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in args:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install numpy` (last NumPy release on PyPi)
"""))
return True
if '--help' in args or '-h' in sys.argv[1]:
print(textwrap.dedent("""
NumPy-specific help
-------------------
To install NumPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest NumPy release
from PyPi, use `pip install numpy`.
For help with build/installation issues, please ask on the
numpy-discussion mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/numpy/numpy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python runtests.py` (to build and test)
- `python runtests.py --no-build` (to test installed numpy)
- `>>> numpy.test()` (run tests for installed numpy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
build_sphinx="""
`setup.py build_sphinx` is not supported, use the
Makefile under doc/""",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in args:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# Commands that do more than print info, but also don't need Cython and
# template parsing.
other_commands = ['egg_info', 'install_egg_info', 'rotate']
for command in other_commands:
if command in args:
return False
# If we got here, we didn't detect what setup.py command was given
import warnings
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates",
stacklevel=2)
return True
def get_docs_url():
if not ISRELEASED:
return "https://numpy.org/devdocs"
else:
# For releases, this URL ends up on pypi.
# By pinning the version, users looking at old PyPI releases can get
# to the associated docs easily.
return "https://numpy.org/doc/{}.{}".format(MAJOR, MINOR)
def setup_package():
src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# Rewrite the version file every time
write_version_py()
# The f2py scripts that will be installed
if sys.platform == 'win32':
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
]
else:
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
'f2py%s = numpy.f2py.f2py2e:main' % sys.version_info[:1],
'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],
]
cmdclass = {"sdist": sdist_checked, }
metadata = dict(
name='numpy',
maintainer="NumPy Developers",
maintainer_email="[email protected]",
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
url="https://www.numpy.org",
author="Travis E. Oliphant et al.",
download_url="https://pypi.python.org/pypi/numpy",
project_urls={
"Bug Tracker": "https://github.com/numpy/numpy/issues",
"Documentation": get_docs_url(),
"Source Code": "https://github.com/numpy/numpy",
},
license='BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='pytest',
cmdclass=cmdclass,
python_requires='>=3.6',
zip_safe=False,
entry_points={
'console_scripts': f2py_cmds
},
)
if "--force" in sys.argv:
run_build = True
sys.argv.remove('--force')
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = parse_setuppy_commands()
if run_build:
# patches distutils, even though we don't use it
import setuptools # noqa: F401
from numpy.distutils.core import setup
if 'sdist' not in sys.argv:
# Generate Cython sources, unless we're generating an sdist
generate_cython()
metadata['configuration'] = configuration
# Customize extension building
cmdclass['build_clib'], cmdclass['build_ext'] = get_build_overrides()
else:
from setuptools import setup
# Version number is added to metadata inside configuration() if build
# is run.
metadata['version'] = get_version_info()[0]
try:
setup(**metadata)
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
# This may avoid problems where numpy is installed via ``*_requires`` by
# setuptools, the global namespace isn't reset properly, and then numpy is
# imported later (which will then fail to load numpy extension modules).
# See gh-7956 for details
del builtins.__NUMPY_SETUP__
|
bsd-3-clause
|
anbasile/flask_sample
|
flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhungarianmodel.py
|
2763
|
12536
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
|
mit
|
witgo/spark
|
python/pyspark/mllib/random.py
|
22
|
19517
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for random data generation.
"""
import sys
from functools import wraps
from pyspark.mllib.common import callMLlibFunc
__all__ = ['RandomRDDs', ]
def toArray(f):
@wraps(f)
def func(sc, *a, **kw):
rdd = f(sc, *a, **kw)
return rdd.map(lambda vec: vec.toArray())
return func
class RandomRDDs(object):
"""
Generator methods for creating RDDs comprised of i.i.d samples from
some distribution.
.. versionadded:: 1.1.0
"""
@staticmethod
def uniformRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the
uniform distribution U(0.0, 1.0).
To transform the distribution in the generated RDD from U(0.0, 1.0)
to U(a, b), use
``RandomRDDs.uniformRDD(sc, n, p, seed).map(lambda v: a + (b - a) * v)``
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ `U(0.0, 1.0)`.
Examples
--------
>>> x = RandomRDDs.uniformRDD(sc, 100).collect()
>>> len(x)
100
>>> max(x) <= 1.0 and min(x) >= 0.0
True
>>> RandomRDDs.uniformRDD(sc, 100, 4).getNumPartitions()
4
>>> parts = RandomRDDs.uniformRDD(sc, 100, seed=4).getNumPartitions()
>>> parts == sc.defaultParallelism
True
"""
return callMLlibFunc("uniformRDD", sc._jsc, size, numPartitions, seed)
@staticmethod
def normalRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the standard normal
distribution.
To transform the distribution in the generated RDD from standard normal
to some other normal N(mean, sigma^2), use
``RandomRDDs.normal(sc, n, p, seed).map(lambda v: mean + sigma * v)``
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0).
Examples
--------
>>> x = RandomRDDs.normalRDD(sc, 1000, seed=1)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - 0.0) < 0.1
True
>>> abs(stats.stdev() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalRDD", sc._jsc, size, numPartitions, seed)
@staticmethod
def logNormalRDD(sc, mean, std, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the log normal
distribution with the input mean and standard distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
mean : float
mean for the log Normal distribution
std : float
std for the log Normal distribution
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
RDD of float comprised of i.i.d. samples ~ log N(mean, std).
Examples
--------
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> x = RandomRDDs.logNormalRDD(sc, mean, std, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("logNormalRDD", sc._jsc, float(mean), float(std),
size, numPartitions, seed)
@staticmethod
def poissonRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Poisson
distribution with the input mean.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or lambda, for the Poisson distribution.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Pois(mean).
Examples
--------
>>> mean = 100.0
>>> x = RandomRDDs.poissonRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonRDD", sc._jsc, float(mean), size, numPartitions, seed)
@staticmethod
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or 1 / lambda, for the Exponential distribution.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Exp(mean).
Examples
--------
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
@staticmethod
def gammaRDD(sc, shape, scale, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Gamma
distribution with the input shape and scale.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
shape : float
shape (> 0) parameter for the Gamma distribution
scale : float
scale (> 0) parameter for the Gamma distribution
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Gamma(shape, scale).
Examples
--------
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> x = RandomRDDs.gammaRDD(sc, shape, scale, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("gammaRDD", sc._jsc, float(shape),
float(scale), size, numPartitions, seed)
@staticmethod
@toArray
def uniformVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the uniform distribution U(0.0, 1.0).
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD.
seed : int, optional
Seed for the RNG that generates the seed for the generator in each partition.
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`.
Examples
--------
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect())
>>> mat.shape
(10, 10)
>>> mat.max() <= 1.0 and mat.min() >= 0.0
True
>>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions()
4
"""
return callMLlibFunc("uniformVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def normalVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the standard normal distribution.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`.
Examples
--------
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
True
>>> abs(mat.std() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def logNormalVectorRDD(sc, mean, std, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the log normal distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean of the log normal distribution
std : float
Standard Deviation of the log normal distribution
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ log `N(mean, std)`.
Examples
--------
>>> import numpy as np
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> m = RandomRDDs.logNormalVectorRDD(sc, mean, std, 100, 100, seed=1).collect()
>>> mat = np.matrix(m)
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("logNormalVectorRDD", sc._jsc, float(mean), float(std),
numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or lambda, for the Poisson distribution.
numRows : float
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`)
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Pois(mean).
Examples
--------
>>> import numpy as np
>>> mean = 100.0
>>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
@staticmethod
@toArray
def exponentialVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Exponential distribution with the input mean.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or 1 / lambda, for the Exponential distribution.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`)
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Exp(mean).
Examples
--------
>>> import numpy as np
>>> mean = 0.5
>>> rdd = RandomRDDs.exponentialVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
@staticmethod
@toArray
def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
shape : float
Shape (> 0) of the Gamma distribution
scale : float
Scale (> 0) of the Gamma distribution
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional,
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
Examples
--------
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale),
numRows, numCols, numPartitions, seed)
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.random tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
WillieMaddox/numpy
|
numpy/distutils/command/build_src.py
|
141
|
32258
|
""" Build swig, f2py, pyrex sources.
"""
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import shlex
import copy
from distutils.command import build_ext
from distutils.dep_util import newer_group, newer
from distutils.util import get_platform
from distutils.errors import DistutilsError, DistutilsSetupError
def have_pyrex():
try:
import Pyrex.Compiler.Main
return True
except ImportError:
return False
# this import can't be done here, as it uses numpy stuff only available
# after it's installed
#import numpy.f2py
from numpy.distutils import log
from numpy.distutils.misc_util import fortran_ext_match, \
appendpath, is_string, is_sequence, get_cmd
from numpy.distutils.from_template import process_file as process_f_file
from numpy.distutils.conv_template import process_file as process_c_file
def subst_vars(target, source, d):
"""Substitute any occurence of @foo@ by d['foo'] from source file into
target."""
var = re.compile('@([a-zA-Z_]+)@')
fs = open(source, 'r')
try:
ft = open(target, 'w')
try:
for l in fs:
m = var.search(l)
if m:
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
else:
ft.write(l)
finally:
ft.close()
finally:
fs.close()
class build_src(build_ext.build_ext):
description = "build sources from SWIG, F2PY files or a function"
user_options = [
('build-src=', 'd', "directory to \"build\" sources to"),
('f2py-opts=', None, "list of f2py command line options"),
('swig=', None, "path to the SWIG executable"),
('swig-opts=', None, "list of SWIG command line options"),
('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
('force', 'f', "forcibly build everything (ignore file timestamps)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
]
boolean_options = ['force', 'inplace']
help_options = []
def initialize_options(self):
self.extensions = None
self.package = None
self.py_modules = None
self.py_modules_dict = None
self.build_src = None
self.build_lib = None
self.build_base = None
self.force = None
self.inplace = None
self.package_dir = None
self.f2pyflags = None # obsolete
self.f2py_opts = None
self.swigflags = None # obsolete
self.swig_opts = None
self.swig_cpp = None
self.swig = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('force', 'force'))
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
self.libraries = self.distribution.libraries or []
self.py_modules = self.distribution.py_modules or []
self.data_files = self.distribution.data_files or []
if self.build_src is None:
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
# py_modules_dict is used in build_py.find_package_modules
self.py_modules_dict = {}
if self.f2pyflags:
if self.f2py_opts:
log.warn('ignoring --f2pyflags as --f2py-opts already used')
else:
self.f2py_opts = self.f2pyflags
self.f2pyflags = None
if self.f2py_opts is None:
self.f2py_opts = []
else:
self.f2py_opts = shlex.split(self.f2py_opts)
if self.swigflags:
if self.swig_opts:
log.warn('ignoring --swigflags as --swig-opts already used')
else:
self.swig_opts = self.swigflags
self.swigflags = None
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = shlex.split(self.swig_opts)
# use options from build_ext command
build_ext = self.get_finalized_command('build_ext')
if self.inplace is None:
self.inplace = build_ext.inplace
if self.swig_cpp is None:
self.swig_cpp = build_ext.swig_cpp
for c in ['swig', 'swig_opt']:
o = '--'+c.replace('_', '-')
v = getattr(build_ext, c, None)
if v:
if getattr(self, c):
log.warn('both build_src and build_ext define %s option' % (o))
else:
log.info('using "%s=%s" option from build_ext command' % (o, v))
setattr(self, c, v)
def run(self):
log.info("build_src")
if not (self.extensions or self.libraries):
return
self.build_sources()
def build_sources(self):
if self.inplace:
self.get_package_dir = \
self.get_finalized_command('build_py').get_package_dir
self.build_py_modules_sources()
for libname_info in self.libraries:
self.build_library_sources(*libname_info)
if self.extensions:
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension_sources(ext)
self.build_data_files_sources()
self.build_npy_pkg_config()
def build_data_files_sources(self):
if not self.data_files:
return
log.info('building data_files sources')
from numpy.distutils.misc_util import get_data_files
new_data_files = []
for data in self.data_files:
if isinstance(data, str):
new_data_files.append(data)
elif isinstance(data, tuple):
d, files = data
if self.inplace:
build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
else:
build_dir = os.path.join(self.build_src, d)
funcs = [f for f in files if hasattr(f, '__call__')]
files = [f for f in files if not hasattr(f, '__call__')]
for f in funcs:
if f.__code__.co_argcount==1:
s = f(build_dir)
else:
s = f()
if s is not None:
if isinstance(s, list):
files.extend(s)
elif isinstance(s, str):
files.append(s)
else:
raise TypeError(repr(s))
filenames = get_data_files((d, files))
new_data_files.append((d, filenames))
else:
raise TypeError(repr(data))
self.data_files[:] = new_data_files
def _build_npy_pkg_config(self, info, gd):
import shutil
template, install_dir, subst_dict = info
template_dir = os.path.dirname(template)
for k, v in gd.items():
subst_dict[k] = v
if self.inplace == 1:
generated_dir = os.path.join(template_dir, install_dir)
else:
generated_dir = os.path.join(self.build_src, template_dir,
install_dir)
generated = os.path.basename(os.path.splitext(template)[0])
generated_path = os.path.join(generated_dir, generated)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
subst_vars(generated_path, template, subst_dict)
# Where to install relatively to install prefix
full_install_dir = os.path.join(template_dir, install_dir)
return full_install_dir, generated_path
def build_npy_pkg_config(self):
log.info('build_src: building npy-pkg config files')
# XXX: another ugly workaround to circumvent distutils brain damage. We
# need the install prefix here, but finalizing the options of the
# install command when only building sources cause error. Instead, we
# copy the install command instance, and finalize the copy so that it
# does not disrupt how distutils want to do things when with the
# original install command instance.
install_cmd = copy.copy(get_cmd('install'))
if not install_cmd.finalized == 1:
install_cmd.finalize_options()
build_npkg = False
gd = {}
if self.inplace == 1:
top_prefix = '.'
build_npkg = True
elif hasattr(install_cmd, 'install_libbase'):
top_prefix = install_cmd.install_libbase
build_npkg = True
if build_npkg:
for pkg, infos in self.distribution.installed_pkg_config.items():
pkg_path = self.distribution.package_dir[pkg]
prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
d = {'prefix': prefix}
for info in infos:
install_dir, generated = self._build_npy_pkg_config(info, d)
self.distribution.data_files.append((install_dir,
[generated]))
def build_py_modules_sources(self):
if not self.py_modules:
return
log.info('building py_modules sources')
new_py_modules = []
for source in self.py_modules:
if is_sequence(source) and len(source)==3:
package, module_base, source = source
if self.inplace:
build_dir = self.get_package_dir(package)
else:
build_dir = os.path.join(self.build_src,
os.path.join(*package.split('.')))
if hasattr(source, '__call__'):
target = os.path.join(build_dir, module_base + '.py')
source = source(target)
if source is None:
continue
modules = [(package, module_base, source)]
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
self.py_modules_dict[package] += modules
else:
new_py_modules.append(source)
self.py_modules[:] = new_py_modules
def build_library_sources(self, lib_name, build_info):
sources = list(build_info.get('sources', []))
if not sources:
return
log.info('building library "%s" sources' % (lib_name))
sources = self.generate_sources(sources, (lib_name, build_info))
sources = self.template_sources(sources, (lib_name, build_info))
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
self.package, h_files)
#for f in h_files:
# self.distribution.headers.append((lib_name,f))
build_info['sources'] = sources
return
def build_extension_sources(self, ext):
sources = list(ext.sources)
log.info('building extension "%s" sources' % (ext.name))
fullname = self.get_ext_fullname(ext.name)
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
if self.inplace:
self.ext_target_dir = self.get_package_dir(package)
sources = self.generate_sources(sources, ext)
sources = self.template_sources(sources, ext)
sources = self.swig_sources(sources, ext)
sources = self.f2py_sources(sources, ext)
sources = self.pyrex_sources(sources, ext)
sources, py_files = self.filter_py_files(sources)
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
modules = []
for f in py_files:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
self.py_modules_dict[package] += modules
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
package, h_files)
#for f in h_files:
# self.distribution.headers.append((package,f))
ext.sources = sources
def generate_sources(self, sources, extension):
new_sources = []
func_sources = []
for source in sources:
if is_string(source):
new_sources.append(source)
else:
func_sources.append(source)
if not func_sources:
return new_sources
if self.inplace and not is_sequence(extension):
build_dir = self.ext_target_dir
else:
if is_sequence(extension):
name = extension[0]
# if 'include_dirs' not in extension[1]:
# extension[1]['include_dirs'] = []
# incl_dirs = extension[1]['include_dirs']
else:
name = extension.name
# incl_dirs = extension.include_dirs
#if self.build_src not in incl_dirs:
# incl_dirs.append(self.build_src)
build_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
self.mkpath(build_dir)
for func in func_sources:
source = func(extension, build_dir)
if not source:
continue
if is_sequence(source):
[log.info(" adding '%s' to sources." % (s,)) for s in source]
new_sources.extend(source)
else:
log.info(" adding '%s' to sources." % (source,))
new_sources.append(source)
return new_sources
def filter_py_files(self, sources):
return self.filter_files(sources, ['.py'])
def filter_h_files(self, sources):
return self.filter_files(sources, ['.h', '.hpp', '.inc'])
def filter_files(self, sources, exts = []):
new_sources = []
files = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext in exts:
files.append(source)
else:
new_sources.append(source)
return new_sources, files
def template_sources(self, sources, extension):
new_sources = []
if is_sequence(extension):
depends = extension[1].get('depends')
include_dirs = extension[1].get('include_dirs')
else:
depends = extension.depends
include_dirs = extension.include_dirs
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.src': # Template file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
self.mkpath(target_dir)
target_file = os.path.join(target_dir, os.path.basename(base))
if (self.force or newer_group([source] + depends, target_file)):
if _f_pyf_ext_match(base):
log.info("from_template:> %s" % (target_file))
outstr = process_f_file(source)
else:
log.info("conv_template:> %s" % (target_file))
outstr = process_c_file(source)
fid = open(target_file, 'w')
fid.write(outstr)
fid.close()
if _header_ext_match(target_file):
d = os.path.dirname(target_file)
if d not in include_dirs:
log.info(" adding '%s' to include_dirs." % (d))
include_dirs.append(d)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def pyrex_sources(self, sources, extension):
new_sources = []
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyx':
target_file = self.generate_a_pyrex_source(base, ext_name,
source,
extension)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def generate_a_pyrex_source(self, base, ext_name, source, extension):
if self.inplace or not have_pyrex():
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
target_file = os.path.join(target_dir, ext_name + '.c')
depends = [source] + extension.depends
if self.force or newer_group(depends, target_file, 'newer'):
if have_pyrex():
import Pyrex.Compiler.Main
log.info("pyrexc:> %s" % (target_file))
self.mkpath(target_dir)
options = Pyrex.Compiler.Main.CompilationOptions(
defaults=Pyrex.Compiler.Main.default_options,
include_path=extension.include_dirs,
output_file=target_file)
pyrex_result = Pyrex.Compiler.Main.compile(source,
options=options)
if pyrex_result.num_errors != 0:
raise DistutilsError("%d errors while compiling %r with Pyrex" \
% (pyrex_result.num_errors, source))
elif os.path.isfile(target_file):
log.warn("Pyrex required for compiling %r but not available,"\
" using old target %r"\
% (source, target_file))
else:
raise DistutilsError("Pyrex required for compiling %r"\
" but notavailable" % (source,))
return target_file
def f2py_sources(self, sources, extension):
new_sources = []
f2py_sources = []
f_sources = []
f2py_targets = {}
target_dirs = []
ext_name = extension.name.split('.')[-1]
skip_f2py = 0
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyf': # F2PY interface file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
if os.path.isfile(source):
name = get_f2py_modulename(source)
if name != ext_name:
raise DistutilsSetupError('mismatch of extension names: %s '
'provides %r but expected %r' % (
source, name, ext_name))
target_file = os.path.join(target_dir, name+'module.c')
else:
log.debug(' source %s does not exist: skipping f2py\'ing.' \
% (source))
name = ext_name
skip_f2py = 1
target_file = os.path.join(target_dir, name+'module.c')
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %smodule.c was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = os.path.join(target_dir, name+'module.c')
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.info(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
f2py_sources.append(source)
f2py_targets[source] = target_file
new_sources.append(target_file)
elif fortran_ext_match(ext):
f_sources.append(source)
else:
new_sources.append(source)
if not (f2py_sources or f_sources):
return new_sources
for d in target_dirs:
self.mkpath(d)
f2py_options = extension.f2py_options + self.f2py_opts
if self.distribution.libraries:
for name, build_info in self.distribution.libraries:
if name in extension.libraries:
f2py_options.extend(build_info.get('f2py_options', []))
log.info("f2py options: %s" % (f2py_options))
if f2py_sources:
if len(f2py_sources) != 1:
raise DistutilsSetupError(
'only one .pyf file is allowed per extension module but got'\
' more: %r' % (f2py_sources,))
source = f2py_sources[0]
target_file = f2py_targets[source]
target_dir = os.path.dirname(target_file) or '.'
depends = [source] + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
log.info("f2py: %s" % (source))
import numpy.f2py
numpy.f2py.run_main(f2py_options
+ ['--build-dir', target_dir, source])
else:
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
else:
#XXX TODO: --inplace support for sdist command
if is_sequence(extension):
name = extension[0]
else: name = extension.name
target_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
target_file = os.path.join(target_dir, ext_name + 'module.c')
new_sources.append(target_file)
depends = f_sources + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
log.info("f2py:> %s" % (target_file))
self.mkpath(target_dir)
import numpy.f2py
numpy.f2py.run_main(f2py_options + ['--lower',
'--build-dir', target_dir]+\
['-m', ext_name]+f_sources)
else:
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
% (target_file))
if not os.path.isfile(target_file):
raise DistutilsError("f2py target file %r not generated" % (target_file,))
target_c = os.path.join(self.build_src, 'fortranobject.c')
target_h = os.path.join(self.build_src, 'fortranobject.h')
log.info(" adding '%s' to sources." % (target_c))
new_sources.append(target_c)
if self.build_src not in extension.include_dirs:
log.info(" adding '%s' to include_dirs." \
% (self.build_src))
extension.include_dirs.append(self.build_src)
if not skip_f2py:
import numpy.f2py
d = os.path.dirname(numpy.f2py.__file__)
source_c = os.path.join(d, 'src', 'fortranobject.c')
source_h = os.path.join(d, 'src', 'fortranobject.h')
if newer(source_c, target_c) or newer(source_h, target_h):
self.mkpath(os.path.dirname(target_c))
self.copy_file(source_c, target_c)
self.copy_file(source_h, target_h)
else:
if not os.path.isfile(target_c):
raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
if not os.path.isfile(target_h):
raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
filename = os.path.join(target_dir, ext_name + name_ext)
if os.path.isfile(filename):
log.info(" adding '%s' to sources." % (filename))
f_sources.append(filename)
return new_sources + f_sources
def swig_sources(self, sources, extension):
# Assuming SWIG 1.3.14 or later. See compatibility note in
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
new_sources = []
swig_sources = []
swig_targets = {}
target_dirs = []
py_files = [] # swig generated .py files
target_ext = '.c'
if '-c++' in extension.swig_opts:
typ = 'c++'
is_cpp = True
extension.swig_opts.remove('-c++')
elif self.swig_cpp:
typ = 'c++'
is_cpp = True
else:
typ = None
is_cpp = False
skip_swig = 0
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.i': # SWIG interface file
# the code below assumes that the sources list
# contains not more than one .i SWIG interface file
if self.inplace:
target_dir = os.path.dirname(base)
py_target_dir = self.ext_target_dir
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
py_target_dir = target_dir
if os.path.isfile(source):
name = get_swig_modulename(source)
if name != ext_name[1:]:
raise DistutilsSetupError(
'mismatch of extension names: %s provides %r'
' but expected %r' % (source, name, ext_name[1:]))
if typ is None:
typ = get_swig_target(source)
is_cpp = typ=='c++'
else:
typ2 = get_swig_target(source)
if typ2 is None:
log.warn('source %r does not define swig target, assuming %s swig target' \
% (source, typ))
elif typ!=typ2:
log.warn('expected %r but source %r defines %r swig target' \
% (typ, source, typ2))
if typ2=='c++':
log.warn('resetting swig target to c++ (some targets may have .c extension)')
is_cpp = True
else:
log.warn('assuming that %r has c++ swig target' % (source))
if is_cpp:
target_ext = '.cpp'
target_file = os.path.join(target_dir, '%s_wrap%s' \
% (name, target_ext))
else:
log.warn(' source %s does not exist: skipping swig\'ing.' \
% (source))
name = ext_name[1:]
skip_swig = 1
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %s_wrap.{c,cpp} was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.warn(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
new_sources.append(target_file)
py_files.append(os.path.join(py_target_dir, name+'.py'))
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
if skip_swig:
return new_sources + py_files
for d in target_dirs:
self.mkpath(d)
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"] + extension.swig_opts
if is_cpp:
swig_cmd.append('-c++')
for d in extension.include_dirs:
swig_cmd.append('-I'+d)
for source in swig_sources:
target = swig_targets[source]
depends = [source] + extension.depends
if self.force or newer_group(depends, target, 'newer'):
log.info("%s: %s" % (os.path.basename(swig) \
+ (is_cpp and '++' or ''), source))
self.spawn(swig_cmd + self.swig_opts \
+ ["-o", target, '-outdir', py_target_dir, source])
else:
log.debug(" skipping '%s' swig interface (up-to-date)" \
% (source))
return new_sources + py_files
_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match
#### SWIG related auxiliary functions ####
_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
re.I).match
_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search
_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search
def get_swig_target(source):
f = open(source, 'r')
result = None
line = f.readline()
if _has_cpp_header(line):
result = 'c++'
if _has_c_header(line):
result = 'c'
f.close()
return result
def get_swig_modulename(source):
f = open(source, 'r')
name = None
for line in f:
m = _swig_module_name_match(line)
if m:
name = m.group('name')
break
f.close()
return name
def _find_swig_target(target_dir, name):
for ext in ['.cpp', '.c']:
target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
if os.path.isfile(target):
break
return target
#### F2PY related auxiliary functions ####
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\
'__user__[\w_]*)', re.I).match
def get_f2py_modulename(source):
name = None
f = open(source)
for line in f:
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
f.close()
return name
##########################################
|
bsd-3-clause
|
BonexGu/Blik2D-SDK
|
Blik2D/addon/opencv-3.1.0_for_blik/samples/python/color_histogram.py
|
5
|
1373
|
#!/usr/bin/env python
'''
Video histogram sample to show live histogram of video
Keys:
ESC - exit
'''
import numpy as np
import cv2
# built-in modules
import sys
# local modules
import video
if __name__ == '__main__':
hsv_map = np.zeros((180, 256, 3), np.uint8)
h, s = np.indices(hsv_map.shape[:2])
hsv_map[:,:,0] = h
hsv_map[:,:,1] = s
hsv_map[:,:,2] = 255
hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR)
cv2.imshow('hsv_map', hsv_map)
cv2.namedWindow('hist', 0)
hist_scale = 10
def set_scale(val):
global hist_scale
hist_scale = val
cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)
try:
fn = sys.argv[1]
except:
fn = 0
cam = video.create_capture(fn, fallback='synth:bg=../data/baboon.jpg:class=chess:noise=0.05')
while True:
flag, frame = cam.read()
cv2.imshow('camera', frame)
small = cv2.pyrDown(frame)
hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV)
dark = hsv[...,2] < 32
hsv[dark] = 0
h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
h = np.clip(h*0.005*hist_scale, 0, 1)
vis = hsv_map*h[:,:,np.newaxis] / 255.0
cv2.imshow('hist', vis)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
cv2.destroyAllWindows()
|
mit
|
github-borat/cinder
|
cinder/volume/rpcapi.py
|
4
|
7290
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from cinder.openstack.common import jsonutils
from cinder import rpc
CONF = cfg.CONF
class VolumeAPI(object):
'''Client side of the volume rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
1.5 - Add accept_transfer.
1.6 - Add extend_volume.
1.7 - Adds host_name parameter to attach_volume()
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
1.11 - Adds mode parameter to attach_volume()
to support volume read-only attaching.
1.12 - Adds retype.
1.13 - Adds create_export.
1.14 - Adds reservation parameter to extend_volume().
1.15 - Adds manage_existing and unmanage_only flag to delete_volume.
1.16 - Removes create_export.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
super(VolumeAPI, self).__init__()
target = messaging.Target(topic=CONF.volume_topic,
version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, '1.15')
def create_volume(self, ctxt, volume, host,
request_spec, filter_properties,
allow_reschedule=True,
snapshot_id=None, image_id=None,
source_volid=None):
cctxt = self.client.prepare(server=host, version='1.4')
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume',
volume_id=volume['id'],
request_spec=request_spec_p,
filter_properties=filter_properties,
allow_reschedule=allow_reschedule,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid),
def delete_volume(self, ctxt, volume, unmanage_only=False):
cctxt = self.client.prepare(server=volume['host'], version='1.15')
cctxt.cast(ctxt, 'delete_volume',
volume_id=volume['id'],
unmanage_only=unmanage_only)
def create_snapshot(self, ctxt, volume, snapshot):
cctxt = self.client.prepare(server=volume['host'])
cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'],
snapshot_id=snapshot['id'])
def delete_snapshot(self, ctxt, snapshot, host):
cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'delete_snapshot', snapshot_id=snapshot['id'])
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
cctxt = self.client.prepare(server=volume['host'], version='1.11')
return cctxt.call(ctxt, 'attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
host_name=host_name,
mountpoint=mountpoint,
mode=mode)
def detach_volume(self, ctxt, volume):
cctxt = self.client.prepare(server=volume['host'])
return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'])
def copy_volume_to_image(self, ctxt, volume, image_meta):
cctxt = self.client.prepare(server=volume['host'], version='1.3')
cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'],
image_meta=image_meta)
def initialize_connection(self, ctxt, volume, connector):
cctxt = self.client.prepare(server=volume['host'])
return cctxt.call(ctxt, 'initialize_connection',
volume_id=volume['id'],
connector=connector)
def terminate_connection(self, ctxt, volume, connector, force=False):
cctxt = self.client.prepare(server=volume['host'])
return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
connector=connector, force=force)
def publish_service_capabilities(self, ctxt):
cctxt = self.client.prepare(fanout=True, version='1.2')
cctxt.cast(ctxt, 'publish_service_capabilities')
def accept_transfer(self, ctxt, volume, new_user, new_project):
cctxt = self.client.prepare(server=volume['host'], version='1.9')
cctxt.cast(ctxt, 'accept_transfer', volume_id=volume['id'],
new_user=new_user, new_project=new_project)
def extend_volume(self, ctxt, volume, new_size, reservations):
cctxt = self.client.prepare(server=volume['host'], version='1.14')
cctxt.cast(ctxt, 'extend_volume', volume_id=volume['id'],
new_size=new_size, reservations=reservations)
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
cctxt = self.client.prepare(server=volume['host'], version='1.8')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'migrate_volume', volume_id=volume['id'],
host=host_p, force_host_copy=force_host_copy)
def migrate_volume_completion(self, ctxt, volume, new_volume, error):
cctxt = self.client.prepare(server=volume['host'], version='1.10')
return cctxt.call(ctxt, 'migrate_volume_completion',
volume_id=volume['id'],
new_volume_id=new_volume['id'],
error=error)
def retype(self, ctxt, volume, new_type_id, dest_host,
migration_policy='never', reservations=None):
cctxt = self.client.prepare(server=volume['host'], version='1.12')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'retype', volume_id=volume['id'],
new_type_id=new_type_id, host=host_p,
migration_policy=migration_policy,
reservations=reservations)
def manage_existing(self, ctxt, volume, ref):
cctxt = self.client.prepare(server=volume['host'], version='1.15')
cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)
|
apache-2.0
|
tengyifei/grpc
|
examples/python/helloworld/run_codegen.py
|
10
|
1765
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
from grpc.tools import protoc
protoc.main(
(
'',
'-I../../protos',
'--python_out=.',
'--grpc_python_out=.',
'../../protos/helloworld.proto',
)
)
|
bsd-3-clause
|
coxmediagroup/googleads-python-lib
|
examples/dfp/v201411/company_service/get_all_companies.py
|
4
|
2020
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all companies.
To create companies, run create_companies.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CompanyService.getCompaniesByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
company_service = client.GetService('CompanyService', version='v201411')
# Create statement object to select all companies.
statement = dfp.FilterStatement()
# Get companies by statement.
while True:
response = company_service.getCompaniesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for company in response['results']:
print ('Company with ID \'%s\', name \'%s\', and type \'%s\' was found.'
% (company['id'], company['name'], company['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
apache-2.0
|
businesstep/blogger
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
6497
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
mit
|
mruffalo/sysv_ipc
|
tests/test_message_queues.py
|
1
|
14661
|
# Python imports
# Don't add any from __future__ imports here. This code should execute
# against standard Python.
import unittest
import time
import os
# Project imports
import sysv_ipc
# Hack -- add tests directory to sys.path so Python 3 can find base.py.
import sys
sys.path.insert(0, os.path.join(os.getcwd(), 'tests')) # noqa - tell flake8 to chill
import base as tests_base
# Not tested --
# - mode seems to be settable and readable, but ignored by the OS
# - max_message_size of init
class MessageQueueTestBase(tests_base.Base):
"""base class for MessageQueue test classes"""
def setUp(self):
self.mq = sysv_ipc.MessageQueue(None, sysv_ipc.IPC_CREX)
def tearDown(self):
if self.mq:
self.mq.remove()
def assertWriteToReadOnlyPropertyFails(self, property_name, value):
"""test that writing to a readonly property raises TypeError"""
tests_base.Base.assertWriteToReadOnlyPropertyFails(self, self.mq,
property_name, value)
class TestMessageQueueCreation(MessageQueueTestBase):
"""Exercise stuff related to creating MessageQueue"""
def test_no_flags(self):
"""tests that opening a MessageQueue with no flags opens the existing
MessageQueue and doesn't create a new MessageQueue"""
mem_copy = sysv_ipc.MessageQueue(self.mq.key)
self.assertEqual(self.mq.key, mem_copy.key)
def test_IPC_CREAT_existing(self):
"""tests sysv_ipc.IPC_CREAT to open an existing MessageQueue without IPC_EXCL"""
mem_copy = sysv_ipc.MessageQueue(self.mq.key, sysv_ipc.IPC_CREAT)
self.assertEqual(self.mq.key, mem_copy.key)
def test_IPC_CREAT_new(self):
"""tests sysv_ipc.IPC_CREAT to create a new MessageQueue without IPC_EXCL"""
# I can't pass None for the name unless I also pass IPC_EXCL.
key = tests_base.make_key()
# Note: this method of finding an unused key is vulnerable to a race
# condition. It's good enough for test, but don't copy it for use in
# production code!
key_is_available = False
while not key_is_available:
try:
mem = sysv_ipc.MessageQueue(key)
mem.detach()
mem.remove()
except sysv_ipc.ExistentialError:
key_is_available = True
else:
key = tests_base.make_key()
mq = sysv_ipc.MessageQueue(key, sysv_ipc.IPC_CREAT)
self.assertIsNotNone(mq)
mq.remove()
def test_IPC_EXCL(self):
"""tests IPC_CREAT | IPC_EXCL prevents opening an existing MessageQueue"""
with self.assertRaises(sysv_ipc.ExistentialError):
sysv_ipc.MessageQueue(self.mq.key, sysv_ipc.IPC_CREX)
def test_randomly_generated_key(self):
"""tests that the randomly-generated key works"""
# This is tested implicitly elsewhere but I want to test it explicitly
mq = sysv_ipc.MessageQueue(None, sysv_ipc.IPC_CREX)
self.assertIsNotNone(mq.key)
self.assertGreaterEqual(mq.key, sysv_ipc.KEY_MIN)
self.assertLessEqual(mq.key, sysv_ipc.KEY_MAX)
mq.remove()
# don't bother testing mode, it's ignored by the OS?
def test_default_flags(self):
"""tests that the flag is 0 by default (==> open existing)"""
mq = sysv_ipc.MessageQueue(self.mq.key)
self.assertEqual(self.mq.id, mq.id)
def test_kwargs(self):
"""ensure init accepts keyword args as advertised"""
# mode 0x180 = 0600. Octal is difficult to express in Python 2/3 compatible code.
mq = sysv_ipc.MessageQueue(None, flags=sysv_ipc.IPC_CREX, mode=0x180,
max_message_size=256)
mq.remove()
class TestMessageQueueSendReceive(MessageQueueTestBase):
"""Exercise send() and receive()"""
def test_simple_send_receive(self):
test_string = b'abcdefg'
self.mq.send(test_string)
self.assertEqual(self.mq.receive(), (test_string, 1))
def test_message_type_send(self):
"""test the msg type param of send()"""
test_string = b'abcdefg'
self.mq.send(test_string, type=2)
self.assertEqual(self.mq.receive(), (test_string, 2))
with self.assertRaises(ValueError):
self.mq.send(test_string, type=-1)
def test_message_type_receive_default_order(self):
"""test that receive() doesn't filter by type by default"""
for i in range(1, 4):
self.mq.send('type' + str(i), type=i)
# default order is FIFO.
self.assertEqual(self.mq.receive(), (b'type1', 1))
self.assertEqual(self.mq.receive(), (b'type2', 2))
self.assertEqual(self.mq.receive(), (b'type3', 3))
self.assertEqual(self.mq.current_messages, 0)
# The bug referenced below affects use of a negative type. Supposedly it's only on 32 binaries
# running on 64 bit systems, but I see it using 64-bit Python under 64-bit Linux.
# A less demanding version of this test follows so Linux doesn't go entirely untested.
@unittest.skipIf(sys.platform.startswith('linux'),
'msgrcv() buggy on Linux: https://bugzilla.kernel.org/show_bug.cgi?id=94181')
def test_message_type_receive_specific_order(self):
# Place messsages in Q w/highest type first
for i in range(4, 0, -1):
self.mq.send('type' + str(i), type=i)
# receive(type=-2) should get "the first message of the lowest type that is <= the absolute
# value of type."
self.assertEqual(self.mq.receive(type=-2), (b'type2', 2))
# receive(type=3) should get "the first message of that type."
self.assertEqual(self.mq.receive(type=3), (b'type3', 3))
# Ensure the others are still there.
self.assertEqual(self.mq.receive(), (b'type4', 4))
self.assertEqual(self.mq.receive(), (b'type1', 1))
self.assertEqual(self.mq.current_messages, 0)
def test_message_type_receive_specific_order_no_negative_type(self):
"""test that receive() filters appropriately on positive msg type (softer test for Linux)"""
# Place messsages in Q w/highest type first
for i in range(4, 0, -1):
self.mq.send('type' + str(i), type=i)
# receive(type=3) should get "the first message of that type."
self.assertEqual(self.mq.receive(type=3), (b'type3', 3))
# Ensure the others are still there.
self.assertEqual(self.mq.receive(), (b'type4', 4))
self.assertEqual(self.mq.receive(), (b'type2', 2))
self.assertEqual(self.mq.receive(), (b'type1', 1))
self.assertEqual(self.mq.current_messages, 0)
def test_send_non_blocking(self):
"""Test that send(block=False) raises BusyError as appropriate"""
# This is a bit tricky since the OS has its own ideas about when the queue is full.
# I would like to fill it precisely to the brim and then make one last call to send(),
# but I don't know exactly when the OS will decide the Q is full. Instead, I just keep
# stuffing messages in until I get some kind of an error. If it's a BusyError, all is well.
done = False
while not done:
try:
self.mq.send('x', block=False)
except sysv_ipc.BusyError:
done = True
def test_receive_non_blocking(self):
"""Test that receive(block=False) raises BusyError as appropriate"""
with self.assertRaises(sysv_ipc.BusyError):
self.mq.receive(block=False)
self.mq.send('x', type=3)
with self.assertRaises(sysv_ipc.BusyError):
self.mq.receive(type=2, block=False)
def test_ascii_null(self):
"""ensure I can send & receive 0x00"""
test_string = b'abc' + bytes(0) + b'def'
self.mq.send(test_string)
self.assertEqual(self.mq.receive(), (test_string, 1))
def test_utf8(self):
"""Test writing encoded Unicode"""
test_string = 'G' + '\u00F6' + 'teborg'
test_string = test_string.encode('utf-8')
self.mq.send(test_string)
self.assertEqual(self.mq.receive(), (test_string, 1))
def test_send_kwargs(self):
"""ensure send() accepts keyword args as advertised"""
self.mq.send(b'x', block=True, type=1)
def test_receive_kwargs(self):
"""ensure receive() accepts keyword args as advertised"""
self.mq.send(b'x', block=True, type=1)
self.mq.receive(block=False, type=0)
class TestMessageQueueRemove(MessageQueueTestBase):
"""Exercise mq.remove()"""
def test_remove(self):
"""tests that mq.remove() works"""
self.mq.remove()
with self.assertRaises(sysv_ipc.ExistentialError):
sysv_ipc.MessageQueue(self.mq.key)
# Wipe this out so that self.tearDown() doesn't crash.
self.mq = None
class TestMessageQueuePropertiesAndAttributes(MessageQueueTestBase):
"""Exercise props and attrs"""
def test_property_key(self):
"""exercise MessageQueue.key"""
self.assertGreaterEqual(self.mq.key, sysv_ipc.KEY_MIN)
self.assertLessEqual(self.mq.key, sysv_ipc.KEY_MAX)
self.assertWriteToReadOnlyPropertyFails('key', 42)
# The POSIX spec says "msgget() shall return a non-negative integer", but OS X sometimes
# returns a negative number like -1765146624. My guess is that they're using a UINT somewhere
# which exceeds INT_MAX and hence looks negative, or they just don't care about the spec.
# msgget() ref: http://pubs.opengroup.org/onlinepubs/009695399/functions/msgget.html
@unittest.skipIf(sys.platform.startswith('darwin'),
'OS X message queues sometimes return negative ids')
def test_property_id(self):
"""exercise MessageQueue.id"""
self.assertGreaterEqual(self.mq.id, 0)
self.assertWriteToReadOnlyPropertyFails('id', 42)
def test_property_id_weak_for_darwin(self):
"""exercise MessageQueue.id with the Darwin-failing test removed"""
self.assertWriteToReadOnlyPropertyFails('id', 42)
def test_attribute_max_size(self):
"""exercise MessageQueue.max_size"""
self.assertGreaterEqual(self.mq.max_size, 0)
# writing to max_size should not fail, and I test that here. However, as documented,
# setting it is no guarantee that the OS will respect it. Caveat emptor.
self.mq.max_size = 2048
def test_property_last_send_time(self):
"""exercise MessageQueue.last_send_time"""
self.assertEqual(self.mq.last_send_time, 0)
# I can't record exactly when this send() happens, but as long as it is within 5 seconds
# of the assertion happening, this test will pass.
self.mq.send('x')
self.assertLess(self.mq.last_send_time - time.time(), 5)
self.assertWriteToReadOnlyPropertyFails('last_send_time', 42)
def test_property_last_receive_time(self):
"""exercise MessageQueue.last_receive_time"""
self.assertEqual(self.mq.last_receive_time, 0)
self.mq.send('x')
self.mq.receive()
# I can't record exactly when this send() happens, but as long as it is within 5 seconds
# of the assertion happening, this test will pass.
self.assertLess(self.mq.last_receive_time - time.time(), 5)
self.assertWriteToReadOnlyPropertyFails('last_receive_time', 42)
# Changing the mode uses IPC_SET, and that should change last_change_time (msg_ctime in C)
# according to the docs that bother to explain what msg_ctime is at all. I don't think
# OS X ever sets msg_ctime. (Setting mq.uid and mq.mode didn't do the trick.)
@unittest.skipIf(sys.platform.startswith('darwin'),
'last_change_time not implemented properly by OS X')
def test_property_last_change_time(self):
"""exercise MessageQueue.last_change_time"""
# Note that last_change_time doesn't start out as 0 (unlike e.g. last_receive_time), so
# I don't test that here.
original_last_change_time = self.mq.last_change_time
# Sleep to ensure the following statement actually happens at a different "time"
time.sleep(1)
# This might seem like a no-op, but setting the UID to any value triggers a call that
# should set last_change_time.
self.mq.uid = self.mq.uid
self.assertLess(self.mq.last_change_time - time.time(), 5)
# Ensure the time actually changed.
self.assertNotEqual(self.mq.last_change_time, original_last_change_time)
self.assertWriteToReadOnlyPropertyFails('last_change_time', 42)
def test_property_last_send_pid(self):
"""exercise MessageQueue.last_send_pid"""
self.assertEqual(self.mq.last_send_pid, 0)
self.mq.send('x')
self.assertEqual(self.mq.last_send_pid, os.getpid())
self.assertWriteToReadOnlyPropertyFails('last_send_pid', 42)
def test_property_last_receive_pid(self):
"""exercise MessageQueue.last_receive_pid"""
self.assertEqual(self.mq.last_receive_pid, 0)
self.mq.send('x')
self.mq.receive()
self.assertEqual(self.mq.last_receive_pid, os.getpid())
self.assertWriteToReadOnlyPropertyFails('last_receive_pid', 42)
def test_property_current_messages(self):
"""exercise MessageQueue.current_messages"""
self.assertEqual(self.mq.current_messages, 0)
self.mq.send('x')
self.mq.send('x')
self.mq.send('x')
self.assertEqual(self.mq.current_messages, 3)
self.mq.receive()
self.assertEqual(self.mq.current_messages, 2)
self.mq.receive()
self.assertEqual(self.mq.current_messages, 1)
self.mq.receive()
self.assertEqual(self.mq.current_messages, 0)
self.assertWriteToReadOnlyPropertyFails('current_messages', 42)
def test_attribute_uid(self):
"""exercise MessageQueue.uid"""
self.assertEqual(self.mq.uid, os.geteuid())
def test_attribute_gid(self):
"""exercise MessageQueue.gid"""
self.assertEqual(self.mq.gid, os.getgid())
def test_attribute_cuid(self):
"""exercise MessageQueue.cuid"""
self.assertEqual(self.mq.cuid, os.geteuid())
self.assertWriteToReadOnlyPropertyFails('cuid', 42)
def test_attribute_cgid(self):
"""exercise MessageQueue.cgid"""
self.assertEqual(self.mq.cgid, os.getgid())
self.assertWriteToReadOnlyPropertyFails('cgid', 42)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
jamilatta/scielo-manager
|
scielomanager/journalmanager/context_processors.py
|
2
|
2205
|
# coding: utf-8
from django.conf import settings
from journalmanager import models
from maintenancewindow import models as maintenance_models
def dynamic_template_inheritance(request):
"""
Changes between base_lv0.html e base_lv1.html
"""
if request.GET.get('popup', None):
return {'dynamic_tpl': 'base_lv0.html'}
else:
return {'dynamic_tpl': 'base_lv1.html'}
def access_to_settings(request):
return {'SETTINGS': settings}
def show_system_notes(request):
"""
Add system notes as maintenance events, notes, etc to the context
"""
def wrap():
return maintenance_models.Event.objects.scheduled_events()
return {'system_notes': wrap}
def show_system_notes_blocking_users(request):
"""
Add system notes that are blockin the user access
as maintenance events, notes, etc to the context
"""
def wrap():
return maintenance_models.Event.objects.blocking_users_scheduled_event()
return {'blocking_users_system_note': wrap}
def on_maintenance(request):
"""
Add on_maintenance item to the context. Defining if there is or not active
maintenance events.
"""
def wrap():
return maintenance_models.Event.on_maintenance()
return {'on_maintenance': wrap}
def show_user_collections(request):
"""
Adds `user_collections` item to the context, which is a
queryset of collections the user relates to.
"""
def wrap():
return models.Collection.objects.all_by_user(request.user)
if request.user.is_authenticated():
return {'user_collections': wrap}
else:
return {}
def add_default_collection(request):
if request.user.is_authenticated():
try:
collection = models.Collection.objects.get_default_by_user(request.user)
except models.Collection.DoesNotExist:
return {}
else:
def wrap_is_managed_by_user():
return collection.is_managed_by_user(request.user)
return {
'default_collection': collection,
'is_manager_of_default_collection': wrap_is_managed_by_user
}
else:
return {}
|
bsd-2-clause
|
tadebayo/myedge
|
myvenv/Lib/encodings/cp863.py
|
272
|
34252
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xb6' # 0x0086 -> PILCROW SIGN
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u2017' # 0x008d -> DOUBLE LOW LINE
'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa7' # 0x008f -> SECTION SIGN
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xa4' # 0x0098 -> CURRENCY SIGN
'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xa6' # 0x00a0 -> BROKEN BAR
'\xb4' # 0x00a1 -> ACUTE ACCENT
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xa8' # 0x00a4 -> DIAERESIS
'\xb8' # 0x00a5 -> CEDILLA
'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
'\xaf' # 0x00a7 -> MACRON
'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
mit
|
throwable-one/lettuce
|
tests/integration/lib/Django-1.3/django/contrib/localflavor/pe/forms.py
|
309
|
2272
|
# -*- coding: utf-8 -*-
"""
PE-specific Form helpers.
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, CharField, Select
from django.utils.translation import ugettext_lazy as _
class PERegionSelect(Select):
"""
A Select widget that uses a list of Peruvian Regions as its choices.
"""
def __init__(self, attrs=None):
from pe_region import REGION_CHOICES
super(PERegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class PEDNIField(CharField):
"""
A field that validates `Documento Nacional de IdentidadŽ (DNI) numbers.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 8 digits."),
}
def __init__(self, *args, **kwargs):
super(PEDNIField, self).__init__(max_length=8, min_length=8, *args,
**kwargs)
def clean(self, value):
"""
Value must be a string in the XXXXXXXX formats.
"""
value = super(PEDNIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) != 8:
raise ValidationError(self.error_messages['max_digits'])
return value
class PERUCField(RegexField):
"""
This field validates a RUC (Registro Unico de Contribuyentes). A RUC is of
the form XXXXXXXXXXX.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 11 digits."),
}
def __init__(self, *args, **kwargs):
super(PERUCField, self).__init__(max_length=11, min_length=11, *args,
**kwargs)
def clean(self, value):
"""
Value must be an 11-digit number.
"""
value = super(PERUCField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) != 11:
raise ValidationError(self.error_messages['max_digits'])
return value
|
gpl-3.0
|
inspyration/odoo
|
addons/website_forum/tests/test_forum.py
|
87
|
7632
|
# -*- coding: utf-8 -*-
from openerp.addons.website_forum.tests.common import KARMA, TestForumCommon
from openerp.addons.website_forum.models.forum import KarmaError
from openerp.exceptions import Warning, AccessError
from openerp.tools import mute_logger
class TestForum(TestForumCommon):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_ask(self):
Post = self.env['forum.post']
# Public user asks a question: not allowed
with self.assertRaises(AccessError):
Post.sudo(self.user_public).create({
'name': " Question ?",
'forum_id': self.forum.id,
})
# Portal user asks a question with tags: not allowed, unsufficient karma
with self.assertRaises(KarmaError):
Post.sudo(self.user_portal).create({
'name': " Q_0",
'forum_id': self.forum.id,
'tag_ids': [(0, 0, {'name': 'Tag0', 'forum_id': self.forum.id})]
})
# Portal user asks a question with tags: ok if enough karma
self.user_portal.karma = KARMA['ask']
Post.sudo(self.user_portal).create({
'name': " Q0",
'forum_id': self.forum.id,
'tag_ids': [(0, 0, {'name': 'Tag0', 'forum_id': self.forum.id})]
})
self.assertEqual(self.user_portal.karma, KARMA['ask'] + KARMA['gen_que_new'], 'website_forum: wrong karma generation when asking question')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_answer(self):
Post = self.env['forum.post']
# Answers its own question: not allowed, unsufficient karma
with self.assertRaises(KarmaError):
Post.sudo(self.user_employee).create({
'name': " A0",
'forum_id': self.forum.id,
'parent_id': self.post.id,
})
# Answers on question: ok if enough karma
self.user_employee.karma = KARMA['ans']
Post.sudo(self.user_employee).create({
'name': " A0",
'forum_id': self.forum.id,
'parent_id': self.post.id,
})
self.assertEqual(self.user_employee.karma, KARMA['ans'], 'website_forum: wrong karma generation when answering question')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_vote_crash(self):
Post = self.env['forum.post']
self.user_employee.karma = KARMA['ans']
emp_answer = Post.sudo(self.user_employee).create({
'name': 'TestAnswer',
'forum_id': self.forum.id,
'parent_id': self.post.id})
# upvote its own post
with self.assertRaises(Warning):
emp_answer.vote(upvote=True)
# not enough karma
with self.assertRaises(KarmaError):
self.post.sudo(self.user_portal).vote(upvote=True)
def test_vote(self):
self.post.create_uid.karma = KARMA['ask']
self.user_portal.karma = KARMA['upv']
self.post.sudo(self.user_portal).vote(upvote=True)
self.assertEqual(self.post.create_uid.karma, KARMA['ask'] + KARMA['gen_que_upv'], 'website_forum: wrong karma generation of upvoted question author')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_downvote_crash(self):
Post = self.env['forum.post']
self.user_employee.karma = KARMA['ans']
emp_answer = Post.sudo(self.user_employee).create({
'name': 'TestAnswer',
'forum_id': self.forum.id,
'parent_id': self.post.id})
# downvote its own post
with self.assertRaises(Warning):
emp_answer.vote(upvote=False)
# not enough karma
with self.assertRaises(KarmaError):
self.post.sudo(self.user_portal).vote(upvote=False)
def test_downvote(self):
self.post.create_uid.karma = 50
self.user_portal.karma = KARMA['dwv']
self.post.sudo(self.user_portal).vote(upvote=False)
self.assertEqual(self.post.create_uid.karma, 50 + KARMA['gen_que_dwv'], 'website_forum: wrong karma generation of downvoted question author')
def test_comment_crash(self):
with self.assertRaises(KarmaError):
self.post.sudo(self.user_portal).message_post(body='Should crash', type='comment')
def test_comment(self):
self.post.sudo(self.user_employee).message_post(body='Test0', type='notification')
self.user_employee.karma = KARMA['com_all']
self.post.sudo(self.user_employee).message_post(body='Test1', type='comment')
self.assertEqual(len(self.post.message_ids), 4, 'website_forum: wrong behavior of message_post')
def test_convert_answer_to_comment_crash(self):
Post = self.env['forum.post']
# converting a question does nothing
msg_ids = self.post.sudo(self.user_portal).convert_answer_to_comment()
self.assertEqual(msg_ids[0], False, 'website_forum: question to comment conversion failed')
self.assertEqual(Post.search([('name', '=', 'TestQuestion')])[0].forum_id.name, 'TestForum', 'website_forum: question to comment conversion failed')
with self.assertRaises(KarmaError):
self.answer.sudo(self.user_portal).convert_answer_to_comment()
def test_convert_answer_to_comment(self):
self.user_portal.karma = KARMA['com_conv_all']
post_author = self.answer.create_uid.partner_id
msg_ids = self.answer.sudo(self.user_portal).convert_answer_to_comment()
self.assertEqual(len(msg_ids), 1, 'website_forum: wrong answer to comment conversion')
msg = self.env['mail.message'].browse(msg_ids[0])
self.assertEqual(msg.author_id, post_author, 'website_forum: wrong answer to comment conversion')
self.assertIn('I am an anteater', msg.body, 'website_forum: wrong answer to comment conversion')
def test_edit_post_crash(self):
with self.assertRaises(KarmaError):
self.post.sudo(self.user_portal).write({'name': 'I am not your father.'})
def test_edit_post(self):
self.post.create_uid.karma = KARMA['edit_own']
self.post.write({'name': 'Actually I am your dog.'})
self.user_portal.karma = KARMA['edit_all']
self.post.sudo(self.user_portal).write({'name': 'Actually I am your cat.'})
def test_close_post_crash(self):
with self.assertRaises(KarmaError):
self.post.sudo(self.user_portal).close(None)
def test_close_post_own(self):
self.post.create_uid.karma = KARMA['close_own']
self.post.close(None)
def test_close_post_all(self):
self.user_portal.karma = KARMA['close_all']
self.post.sudo(self.user_portal).close(None)
def test_deactivate_post_crash(self):
with self.assertRaises(KarmaError):
self.post.sudo(self.user_portal).write({'active': False})
def test_deactivate_post_own(self):
self.post.create_uid.karma = KARMA['unlink_own']
self.post.write({'active': False})
def test_deactivate_post_all(self):
self.user_portal.karma = KARMA['unlink_all']
self.post.sudo(self.user_portal).write({'active': False})
def test_unlink_post_crash(self):
with self.assertRaises(KarmaError):
self.post.sudo(self.user_portal).unlink()
def test_unlink_post_own(self):
self.post.create_uid.karma = KARMA['unlink_own']
self.post.unlink()
def test_unlink_post_all(self):
self.user_portal.karma = KARMA['unlink_all']
self.post.sudo(self.user_portal).unlink()
|
agpl-3.0
|
mvaled/OpenUpgrade
|
addons/product_margin/wizard/product_margin.py
|
338
|
3457
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_margin(osv.osv_memory):
_name = 'product.margin'
_description = 'Product Margin'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
'invoice_state': fields.selection([
('paid', 'Paid'),
('open_paid', 'Open and Paid'),
('draft_open_paid', 'Draft, Open and Paid'),
], 'Invoice State', select=True, required=True),
}
_defaults = {
'from_date': time.strftime('%Y-01-01'),
'to_date': time.strftime('%Y-12-31'),
'invoice_state': "open_paid",
}
def action_open_window(self, cr, uid, ids, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the ID or list of IDs if we want more than one
@return:
"""
context = dict(context or {})
def ref(module, xml_id):
proxy = self.pool.get('ir.model.data')
return proxy.get_object_reference(cr, uid, module, xml_id)
model, search_view_id = ref('product', 'product_search_form_view')
model, graph_view_id = ref('product_margin', 'view_product_margin_graph')
model, form_view_id = ref('product_margin', 'view_product_margin_form')
model, tree_view_id = ref('product_margin', 'view_product_margin_tree')
#get the current product.margin object to obtain the values from it
records = self.browse(cr, uid, ids, context=context)
record = records[0]
context.update(invoice_state=record.invoice_state)
if record.from_date:
context.update(date_from=record.from_date)
if record.to_date:
context.update(date_to=record.to_date)
views = [
(tree_view_id, 'tree'),
(form_view_id, 'form'),
(graph_view_id, 'graph')
]
return {
'name': _('Product Margins'),
'context': context,
'view_type': 'form',
"view_mode": 'tree,form,graph',
'res_model': 'product.product',
'type': 'ir.actions.act_window',
'views': views,
'view_id': False,
'search_view_id': search_view_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
nypl-spacetime/oldnyc
|
viewer/simplejson/tests/test_pass1.py
|
259
|
1903
|
from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E666,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],
"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
try:
json.dumps(res, allow_nan=False)
except ValueError:
pass
else:
self.fail("23456789012E666 should be out of range")
|
apache-2.0
|
newsteinking/docker
|
tests/lib/test_lib.py
|
48
|
1899
|
"""Test the test support."""
from __future__ import absolute_import
import filecmp
import re
from os.path import join, isdir
from tests.lib import SRC_DIR
def test_tmp_dir_exists_in_env(script):
"""
Test that $TMPDIR == env.temp_path and path exists and env.assert_no_temp()
passes (in fast env)
"""
# need these tests to ensure the assert_no_temp feature of scripttest is
# working
script.assert_no_temp() # this fails if env.tmp_path doesn't exist
assert script.environ['TMPDIR'] == script.temp_path
assert isdir(script.temp_path)
def test_correct_pip_version(script):
"""
Check we are running proper version of pip in run_pip.
"""
# output is like:
# pip PIPVERSION from PIPDIRECTORY (python PYVERSION)
result = script.pip('--version')
# compare the directory tree of the invoked pip with that of this source
# distribution
dir = re.match(
r'pip \d(\.[\d])+(\.?(rc|dev|pre|post)\d+)? from (.*) '
r'\(python \d(.[\d])+\)$',
result.stdout
).group(4)
pip_folder = join(SRC_DIR, 'pip')
pip_folder_outputed = join(dir, 'pip')
diffs = filecmp.dircmp(pip_folder, pip_folder_outputed)
# If any non-matching .py files exist, we have a problem: run_pip
# is picking up some other version! N.B. if this project acquires
# primary resources other than .py files, this code will need
# maintenance
mismatch_py = [
x for x in diffs.left_only + diffs.right_only + diffs.diff_files
if x.endswith('.py')
]
assert not mismatch_py, (
'mismatched source files in %r and %r: %r' %
(pip_folder, pip_folder_outputed, mismatch_py)
)
def test_as_import(script):
""" test that pip.__init__.py does not shadow
the command submodule with a dictionary
"""
import pip.commands.install as inst
assert inst is not None
|
mit
|
onceuponatimeforever/oh-mainline
|
vendor/packages/django-tastypie/tastypie/fields.py
|
20
|
32548
|
from __future__ import unicode_literals
import datetime
from dateutil.parser import parse
from decimal import Decimal
import re
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils import datetime_safe, importlib
from django.utils import six
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.utils import dict_strip_unicode_keys, make_aware
class NOT_PROVIDED:
def __str__(self):
return 'No default provided.'
DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}).*?$')
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the ApiField variants.
class ApiField(object):
"""The base implementation of a field used by the resources."""
dehydrated_type = 'string'
help_text = ''
def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None, use_in='all'):
"""
Sets up the field. This is generally called when the containing
``Resource`` is initialized.
Optionally accepts an ``attribute``, which should be a string of
either an instance attribute or callable off the object during the
``dehydrate`` or push data onto an object during the ``hydrate``.
Defaults to ``None``, meaning data will be manually accessed.
Optionally accepts a ``default``, which provides default data when the
object being ``dehydrated``/``hydrated`` has no data on the field.
Defaults to ``NOT_PROVIDED``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
Optionally accepts ``use_in``. This may be one of ``list``, ``detail``
``all`` or a callable which accepts a ``bundle`` and returns
``True`` or ``False``. Indicates wheather this field will be included
during dehydration of a list of objects or a single object. If ``use_in``
is a callable, and returns ``True``, the field will be included during
dehydration.
Defaults to ``all``.
"""
# Track what the index thinks this field is called.
self.instance_name = None
self._resource = None
self.attribute = attribute
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.value = None
self.unique = unique
self.use_in = 'all'
if use_in in ['all', 'detail', 'list'] or callable(use_in):
self.use_in = use_in
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
# Do the least we can here so that we don't hate ourselves in the
# morning.
self.instance_name = name
self._resource = cls
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def dehydrate(self, bundle, for_list=True):
"""
Takes data from the provided object and prepares it for the
resource.
"""
if self.attribute is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.attribute.split('__')
current_object = bundle.obj
for attr in attrs:
previous_object = current_object
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr))
if callable(current_object):
current_object = current_object()
return self.convert(current_object)
if self.has_default():
return self.convert(self.default)
else:
return None
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if not self.instance_name in bundle.data:
if getattr(self, 'is_related', False) and not getattr(self, 'is_m2m', False):
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
if self.blank:
return None
elif self.attribute and getattr(bundle.obj, self.attribute, None):
return getattr(bundle.obj, self.attribute)
elif self.instance_name and hasattr(bundle.obj, self.instance_name):
return getattr(bundle.obj, self.instance_name)
elif self.has_default():
if callable(self._default):
return self._default()
return self._default
elif self.null:
return None
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
return bundle.data[self.instance_name]
class CharField(ApiField):
"""
A text field of arbitrary length.
Covers both ``models.CharField`` and ``models.TextField``.
"""
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class FileField(ApiField):
"""
A file-related field.
Covers both ``models.FileField`` and ``models.ImageField``.
"""
dehydrated_type = 'string'
help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"'
def convert(self, value):
if value is None:
return None
try:
# Try to return the URL if it's a ``File``, falling back to the string
# itself if it's been overridden or is a default.
return getattr(value, 'url', value)
except ValueError:
return None
class IntegerField(ApiField):
"""
An integer field.
Covers ``models.IntegerField``, ``models.PositiveIntegerField``,
``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.
"""
dehydrated_type = 'integer'
help_text = 'Integer data. Ex: 2673'
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(ApiField):
"""
A floating point field.
"""
dehydrated_type = 'float'
help_text = 'Floating point numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(ApiField):
"""
A decimal field.
"""
dehydrated_type = 'decimal'
help_text = 'Fixed precision numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return Decimal(value)
def hydrate(self, bundle):
value = super(DecimalField, self).hydrate(bundle)
if value and not isinstance(value, Decimal):
value = Decimal(value)
return value
class BooleanField(ApiField):
"""
A boolean field.
Covers both ``models.BooleanField`` and ``models.NullBooleanField``.
"""
dehydrated_type = 'boolean'
help_text = 'Boolean data. Ex: True'
def convert(self, value):
if value is None:
return None
return bool(value)
class ListField(ApiField):
"""
A list field.
"""
dehydrated_type = 'list'
help_text = "A list of data. Ex: ['abc', 26.73, 8]"
def convert(self, value):
if value is None:
return None
return list(value)
class DictField(ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return dict(value)
class DateField(ApiField):
"""
A date field.
"""
dehydrated_type = 'date'
help_text = 'A date as a string. Ex: "2010-11-10"'
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATE_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
if hasattr(value, 'hour'):
value = value.date()
except ValueError:
pass
return value
class DateTimeField(ApiField):
"""
A datetime field.
"""
dehydrated_type = 'datetime'
help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"'
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second'])))
else:
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateTimeField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
if isinstance(value, six.string_types):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
except (ValueError, TypeError):
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
else:
raise ApiFieldError("Datetime provided to '%s' field must be a string: %s" % (self.instance_name, value))
return value
class RelatedField(ApiField):
"""
Provides access to data that is related within the database.
The ``RelatedField`` base class is not intended for direct use but provides
functionality that ``ToOneField`` and ``ToManyField`` build upon.
The contents of this field actually point to another ``Resource``,
rather than the related object. This allows the field to represent its data
in different ways.
The abstractions based around this are "leaky" in that, unlike the other
fields provided by ``tastypie``, these fields don't handle arbitrary objects
very well. The subclasses use Django's ORM layer to make things go, though
there is no ORM-specific code at this level.
"""
dehydrated_type = 'related'
is_related = True
self_referential = False
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None, use_in='all', full_list=True, full_detail=True):
"""
Builds the field and prepares it to access to related data.
The ``to`` argument should point to a ``Resource`` class, NOT
to a ``Model``. Required.
The ``attribute`` argument should specify what field/callable points to
the related data on the instance object. Required.
Optionally accepts a ``related_name`` argument. Currently unused, as
unlike Django's ORM layer, reverse relations between ``Resource``
classes are not automatically created. Defaults to ``None``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``full``, which indicates how the related
``Resource`` will appear post-``dehydrate``. If ``False``, the
related ``Resource`` will appear as a URL to the endpoint of that
resource. If ``True``, the result of the sub-resource's
``dehydrate`` will be included in full.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
Optionally accepts ``use_in``. This may be one of ``list``, ``detail``
``all`` or a callable which accepts a ``bundle`` and returns
``True`` or ``False``. Indicates wheather this field will be included
during dehydration of a list of objects or a single object. If ``use_in``
is a callable, and returns ``True``, the field will be included during
dehydration.
Defaults to ``all``.
Optionally accepts a ``full_list``, which indicated whether or not
data should be fully dehydrated when the request is for a list of
resources. Accepts ``True``, ``False`` or a callable that accepts
a bundle and returns ``True`` or ``False``. Depends on ``full``
being ``True``. Defaults to ``True``.
Optionally accepts a ``full_detail``, which indicated whether or not
data should be fully dehydrated when then request is for a single
resource. Accepts ``True``, ``False`` or a callable that accepts a
bundle and returns ``True`` or ``False``.Depends on ``full``
being ``True``. Defaults to ``True``.
"""
self.instance_name = None
self._resource = None
self.to = to
self.attribute = attribute
self.related_name = related_name
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.full = full
self.api_name = None
self.resource_name = None
self.unique = unique
self._to_class = None
self.use_in = 'all'
self.full_list = full_list
self.full_detail = full_detail
if use_in in ['all', 'detail', 'list'] or callable(use_in):
self.use_in = use_in
if self.to == 'self':
self.self_referential = True
self._to_class = self.__class__
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
super(RelatedField, self).contribute_to_class(cls, name)
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.self_referential or self.to == 'self':
self._to_class = cls
def get_related_resource(self, related_instance):
"""
Instaniates the related resource.
"""
related_resource = self.to_class()
# Fix the ``api_name`` if it's not present.
if related_resource._meta.api_name is None:
if self._resource and not self._resource._meta.api_name is None:
related_resource._meta.api_name = self._resource._meta.api_name
# Try to be efficient about DB queries.
related_resource.instance = related_instance
return related_resource
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, six.string_types):
self._to_class = self.to
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name))
return self._to_class
def dehydrate_related(self, bundle, related_resource, for_list=True):
"""
Based on the ``full_resource``, returns either the endpoint or the data
from ``full_dehydrate`` for the related resource.
"""
should_dehydrate_full_resource = self.should_full_dehydrate(bundle, for_list=for_list)
if not should_dehydrate_full_resource:
# Be a good netizen.
return related_resource.get_resource_uri(bundle)
else:
# ZOMG extra data and big payloads.
bundle = related_resource.build_bundle(
obj=related_resource.instance,
request=bundle.request,
objects_saved=bundle.objects_saved
)
return related_resource.full_dehydrate(bundle)
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
"""
Given a URI is provided, the related resource is attempted to be
loaded based on the identifiers in the URI.
"""
try:
obj = fk_resource.get_via_uri(uri, request=request)
bundle = fk_resource.build_bundle(
obj=obj,
request=request
)
return fk_resource.full_dehydrate(bundle)
except ObjectDoesNotExist:
raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri)
def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):
"""
Given a dictionary-like structure is provided, a fresh related
resource is created using that data.
"""
# Try to hydrate the data provided.
data = dict_strip_unicode_keys(data)
fk_bundle = fk_resource.build_bundle(
data=data,
request=request
)
if related_obj:
fk_bundle.related_obj = related_obj
fk_bundle.related_name = related_name
unique_keys = dict((k, v) for k, v in data.items() if k == 'pk' or (hasattr(fk_resource, k) and getattr(fk_resource, k).unique))
# If we have no unique keys, we shouldn't go look for some resource that
# happens to match other kwargs. In the case of a create, it might be the
# completely wrong resource.
# We also need to check to see if updates are allowed on the FK resource.
if unique_keys and fk_resource.can_update():
try:
return fk_resource.obj_update(fk_bundle, skip_errors=True, **data)
except (NotFound, TypeError):
try:
# Attempt lookup by primary key
return fk_resource.obj_update(fk_bundle, skip_errors=True, **unique_keys)
except NotFound:
pass
except MultipleObjectsReturned:
pass
# If we shouldn't update a resource, or we couldn't find a matching
# resource we'll just return a populated bundle instead
# of mistakenly updating something that should be read-only.
fk_bundle = fk_resource.full_hydrate(fk_bundle)
fk_resource.is_valid(fk_bundle)
return fk_bundle
def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):
"""
Given an object with a ``pk`` attribute, the related resource
is attempted to be loaded via that PK.
"""
bundle = fk_resource.build_bundle(
obj=obj,
request=request
)
return fk_resource.full_dehydrate(bundle)
def build_related_resource(self, value, request=None, related_obj=None, related_name=None):
"""
Returns a bundle of data built by the related resource, usually via
``hydrate`` with the data provided.
Accepts either a URI, a data dictionary (or dictionary-like structure)
or an object with a ``pk``.
"""
self.fk_resource = self.to_class()
kwargs = {
'request': request,
'related_obj': related_obj,
'related_name': related_name,
}
if isinstance(value, Bundle):
# Already hydrated, probably nested bundles. Just return.
return value
elif isinstance(value, six.string_types):
# We got a URI. Load the object and assign it.
return self.resource_from_uri(self.fk_resource, value, **kwargs)
elif hasattr(value, 'items'):
# We've got a data dictionary.
# Since this leads to creation, this is the only one of these
# methods that might care about "parent" data.
return self.resource_from_data(self.fk_resource, value, **kwargs)
elif hasattr(value, 'pk'):
# We've got an object with a primary key.
return self.resource_from_pk(self.fk_resource, value, **kwargs)
else:
raise ApiFieldError("The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value))
def should_full_dehydrate(self, bundle, for_list):
"""
Based on the ``full``, ``list_full`` and ``detail_full`` returns ``True`` or ``False``
indicating weather the resource should be fully dehydrated.
"""
should_dehydrate_full_resource = False
if self.full:
is_details_view = not for_list
if is_details_view:
if (not callable(self.full_detail) and self.full_detail) or (callable(self.full_detail) and self.full_detail(bundle)):
should_dehydrate_full_resource = True
else:
if (not callable(self.full_list) and self.full_list) or (callable(self.full_list) and self.full_list(bundle)):
should_dehydrate_full_resource = True
return should_dehydrate_full_resource
class ToOneField(RelatedField):
"""
Provides access to related data via foreign key.
This subclass requires Django's ORM layer to work properly.
"""
help_text = 'A single related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None, use_in='all', full_list=True, full_detail=True):
super(ToOneField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text, use_in=use_in,
full_list=full_list, full_detail=full_detail
)
self.fk_resource = None
def dehydrate(self, bundle, for_list=True):
foreign_obj = None
if isinstance(self.attribute, six.string_types):
attrs = self.attribute.split('__')
foreign_obj = bundle.obj
for attr in attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
elif callable(self.attribute):
foreign_obj = self.attribute(bundle)
if not foreign_obj:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
self.fk_resource = self.get_related_resource(foreign_obj)
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
return self.dehydrate_related(fk_bundle, self.fk_resource, for_list=for_list)
def hydrate(self, bundle):
value = super(ToOneField, self).hydrate(bundle)
if value is None:
return value
return self.build_related_resource(value, request=bundle.request)
class ForeignKey(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToOneField(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class ToManyField(RelatedField):
"""
Provides access to related data via a join table.
This subclass requires Django's ORM layer to work properly.
Note that the ``hydrate`` portions of this field are quite different than
any other field. ``hydrate_m2m`` actually handles the data and relations.
This is due to the way Django implements M2M relationships.
"""
is_m2m = True
help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None, use_in='all', full_list=True, full_detail=True):
super(ToManyField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text, use_in=use_in,
full_list=full_list, full_detail=full_detail
)
self.m2m_bundles = []
def dehydrate(self, bundle, for_list=True):
if not bundle.obj or not bundle.obj.pk:
if not self.null:
raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj)
return []
the_m2ms = None
previous_obj = bundle.obj
attr = self.attribute
if isinstance(self.attribute, six.string_types):
attrs = self.attribute.split('__')
the_m2ms = bundle.obj
for attr in attrs:
previous_obj = the_m2ms
try:
the_m2ms = getattr(the_m2ms, attr, None)
except ObjectDoesNotExist:
the_m2ms = None
if not the_m2ms:
break
elif callable(self.attribute):
the_m2ms = self.attribute(bundle)
if not the_m2ms:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return []
self.m2m_resources = []
m2m_dehydrated = []
# TODO: Also model-specific and leaky. Relies on there being a
# ``Manager`` there.
for m2m in the_m2ms.all():
m2m_resource = self.get_related_resource(m2m)
m2m_bundle = Bundle(obj=m2m, request=bundle.request)
self.m2m_resources.append(m2m_resource)
m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource, for_list=for_list))
return m2m_dehydrated
def hydrate(self, bundle):
pass
def hydrate_m2m(self, bundle):
if self.readonly:
return None
if bundle.data.get(self.instance_name) is None:
if self.blank:
return []
elif self.null:
return []
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name)
m2m_hydrated = []
for value in bundle.data.get(self.instance_name):
if value is None:
continue
kwargs = {
'request': bundle.request,
}
if self.related_name:
kwargs['related_obj'] = bundle.obj
kwargs['related_name'] = self.related_name
m2m_hydrated.append(self.build_related_resource(value, **kwargs))
return m2m_hydrated
class ManyToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class TimeField(ApiField):
dehydrated_type = 'time'
help_text = 'A time as string. Ex: "20:05:23"'
def dehydrate(self, obj, for_list=True):
return self.convert(super(TimeField, self).dehydrate(obj))
def convert(self, value):
if isinstance(value, six.string_types):
return self.to_time(value)
return value
def to_time(self, s):
try:
dt = parse(s)
except (ValueError, TypeError) as e:
raise ApiFieldError(str(e))
else:
return datetime.time(dt.hour, dt.minute, dt.second)
def hydrate(self, bundle):
value = super(TimeField, self).hydrate(bundle)
if value and not isinstance(value, datetime.time):
value = self.to_time(value)
return value
|
agpl-3.0
|
dodger487/MIST
|
data/baselines.py
|
1
|
1444
|
# Chris Riederer
# Google, Inc
# 2014-08-15
"""Record baselines and show improvements for working on magnet"""
import test_detect as t
import glob
cleanedList = t.GetRunDataFromArgs(glob.glob('cleaned/*.json'))
snipList = t.GetRunDataFromArgs(glob.glob('snips/*.json'))
decalCleanedList = t.GetRunDataFromArgs(glob.glob('cleaned/*.json'))
decalCleanedList = t.preprocessRunData(decalCleanedList)
decalSnipList = t.GetRunDataFromArgs(glob.glob('snips/*.json'))
decalSnipList = t.preprocessRunData(decalSnipList)
def tryDetector(detector, detectorLabel):
print detectorLabel
print "Running on snippets"
t.testDetector(detector, snipList, printFileData=False)
print
print detectorLabel
print "Running on full files"
t.testDetector(detector, cleanedList, printFileData=False)
print
print detectorLabel
print "Running on snippets, decalibrated"
t.testDetector(detector, decalSnipList, printFileData=False)
print
print detectorLabel
print "Running on full files, decalibrated"
t.testDetector(detector, decalCleanedList, printFileData=False)
print
def runAll():
"""Run every detector and print out the results"""
detector = t.OriginalDetector()
tryDetector(detector, "ORIGINAL DETECTOR")
detector = t.TimeWindowDetector()
tryDetector(detector, "TIME WINDOW DETECTOR")
detector = t.VectorChangeDetector()
tryDetector(detector, "VECTOR CHANGE DETECTOR")
if __name__ == '__main__':
runAll()
|
apache-2.0
|
aioue/ansible
|
lib/ansible/modules/packaging/language/pear.py
|
70
|
7380
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <[email protected]>
# (c) 2015, Jonathan Lestrelin <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pear
short_description: Manage pear/pecl packages
description:
- Manage PHP packages with the pear package manager.
version_added: 2.0
author:
- "'jonathan.lestrelin' <[email protected]>"
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: true
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
'''
EXAMPLES = '''
# Install pear package
- pear:
name: Net_URL2
state: present
# Install pecl package
- pear:
name: pecl/json_post
state: present
# Upgrade package
- pear:
name: Net_URL2
state: latest
# Remove packages
- pear:
name: Net_URL2,pecl/json_post
state: absent
'''
import os
def get_local_version(pear_output):
"""Take pear remoteinfo output and get the installed version"""
lines = pear_output.split('\n')
for line in lines:
if 'Installed ' in line:
installed = line.rsplit(None, 1)[-1].strip()
if installed == '-':
continue
return installed
return None
def get_repository_version(pear_output):
"""Take pear remote-info output and get the latest version"""
lines = pear_output.split('\n')
for line in lines:
if 'Latest ' in line:
return line.rsplit(None, 1)[-1].strip()
return None
def query_package(module, name, state="present"):
"""Query the package status in both the local system and the repository.
Returns a boolean to indicate if the package is installed,
and a second boolean to indicate if the package is up-to-date."""
if state == "present":
lcmd = "pear info %s" % (name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False
rcmd = "pear remote-info %s" % (name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version installed locally (if any)
lversion = get_local_version(rstdout)
# get the version in the repository
rversion = get_repository_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally,
# and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion)
return False, False
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated = query_package(module, package)
if not installed:
continue
cmd = "pear uninstall %s" % (package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, state, packages):
install_c = 0
for i, package in enumerate(packages):
# if the package is installed and state == present
# or state == latest and is up-to-date then skip
installed, updated = query_package(module, package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if state == 'present':
command = 'install'
if state == 'latest':
command = 'upgrade'
cmd = "pear %s %s" % (command, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (package))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already installed")
def check_packages(module, packages, state):
would_be_changed = []
for package in packages:
installed, updated = query_package(module, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state))
else:
module.exit_json(change=False, msg="package(s) already %s" % state)
def exe_exists(program):
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
return True
return False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg']),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed'])),
required_one_of = [['name']],
supports_check_mode = True)
if not exe_exists("pear"):
module.fail_json(msg="cannot find pear executable in PATH")
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['name']:
pkgs = p['name'].split(',')
pkg_files = []
for i, pkg in enumerate(pkgs):
pkg_files.append(None)
if module.check_mode:
check_packages(module, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['state'], pkgs)
elif p['state'] == 'absent':
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
cloudfoundry/php-buildpack-legacy
|
builds/runtimes/python-2.7.6/lib/python2.7/lib2to3/patcomp.py
|
304
|
7091
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = "Guido van Rossum <[email protected]>"
# Python imports
import os
import StringIO
# Fairly local imports
from .pgen2 import driver, literals, token, tokenize, parse, grammar
# Really local imports
from . import pytree
from . import pygram
# The pattern grammar file
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class PatternSyntaxError(Exception):
pass
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False, with_tree=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
# Compile STRING | NAME [Details] | (...) | [...]
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
value = unicode(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
elif node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise PatternSyntaxError("Invalid token: %r" % value)
if nodes[1:]:
raise PatternSyntaxError("Can't have details for token")
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == "any":
type = None
elif not value.startswith("_"):
type = getattr(self.pysyms, value, None)
if type is None:
raise PatternSyntaxError("Invalid symbol: %r" % value)
if nodes[1:]: # Details present
content = [self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
elif node.value == "(":
return self.compile_node(nodes[1])
elif node.value == "[":
assert repeat is None
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
assert False, node
def get_int(self, node):
assert node.type == token.NUMBER
return int(node.value)
# Map named tokens to the type value for a LeafPattern
TOKEN_MAP = {"NAME": token.NAME,
"STRING": token.STRING,
"NUMBER": token.NUMBER,
"TOKEN": None}
def _type_of_literal(value):
if value[0].isalpha():
return token.NAME
elif value in grammar.opmap:
return grammar.opmap[value]
else:
return None
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
|
mit
|
marcuskelly/recover
|
Lib/site-packages/werkzeug/contrib/limiter.py
|
365
|
1334
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
|
bsd-2-clause
|
sasukeh/neutron
|
neutron/tests/unit/agent/linux/test_keepalived.py
|
15
|
11362
|
# Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from neutron.agent.linux import keepalived
from neutron.common import constants as n_consts
from neutron.tests import base
# Keepalived user guide:
# http://www.keepalived.org/pdf/UserGuide.pdf
class KeepalivedGetFreeRangeTestCase(base.BaseTestCase):
def test_get_free_range(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=['169.254.0.0/24',
'169.254.1.0/24',
'169.254.2.0/24'],
size=24)
self.assertEqual('169.254.3.0/24', free_range)
def test_get_free_range_without_excluded(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=[],
size=20)
self.assertEqual('169.254.0.0/20', free_range)
def test_get_free_range_excluded_out_of_parent(self):
free_range = keepalived.get_free_range(
parent_range='169.254.0.0/16',
excluded_ranges=['255.255.255.0/24'],
size=24)
self.assertEqual('169.254.0.0/24', free_range)
def test_get_free_range_not_found(self):
tiny_parent_range = '192.168.1.0/24'
huge_size = 8
with testtools.ExpectedException(ValueError):
keepalived.get_free_range(
parent_range=tiny_parent_range,
excluded_ranges=[],
size=huge_size)
class KeepalivedConfBaseMixin(object):
def _get_config(self):
config = keepalived.KeepalivedConf()
instance1 = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
['169.254.192.0/18'],
advert_int=5)
instance1.set_authentication('AH', 'pass123')
instance1.track_interfaces.append("eth0")
vip_address1 = keepalived.KeepalivedVipAddress('192.168.1.0/24',
'eth1')
vip_address2 = keepalived.KeepalivedVipAddress('192.168.2.0/24',
'eth2')
vip_address3 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
'eth2')
vip_address_ex = keepalived.KeepalivedVipAddress('192.168.55.0/24',
'eth10')
instance1.vips.append(vip_address1)
instance1.vips.append(vip_address2)
instance1.vips.append(vip_address3)
instance1.vips.append(vip_address_ex)
virtual_route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY,
"192.168.1.1",
"eth1")
instance1.virtual_routes.gateway_routes = [virtual_route]
instance2 = keepalived.KeepalivedInstance('MASTER', 'eth4', 2,
['169.254.192.0/18'],
mcast_src_ip='224.0.0.1')
instance2.track_interfaces.append("eth4")
vip_address1 = keepalived.KeepalivedVipAddress('192.168.3.0/24',
'eth6')
instance2.vips.append(vip_address1)
instance2.vips.append(vip_address2)
instance2.vips.append(vip_address_ex)
config.add_instance(instance1)
config.add_instance(instance2)
return config
class KeepalivedConfTestCase(base.BaseTestCase,
KeepalivedConfBaseMixin):
expected = """vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
garp_master_repeat 5
garp_master_refresh 10
advert_int 5
authentication {
auth_type AH
auth_pass pass123
}
track_interface {
eth0
}
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
virtual_ipaddress_excluded {
192.168.1.0/24 dev eth1
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth2
192.168.55.0/24 dev eth10
}
virtual_routes {
0.0.0.0/0 via 192.168.1.1 dev eth1
}
}
vrrp_instance VR_2 {
state MASTER
interface eth4
virtual_router_id 2
priority 50
garp_master_repeat 5
garp_master_refresh 10
mcast_src_ip 224.0.0.1
track_interface {
eth4
}
virtual_ipaddress {
169.254.0.2/24 dev eth4
}
virtual_ipaddress_excluded {
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth6
192.168.55.0/24 dev eth10
}
}"""
def test_config_generation(self):
config = self._get_config()
self.assertEqual(self.expected, config.get_config_str())
def test_config_with_reset(self):
config = self._get_config()
self.assertEqual(self.expected, config.get_config_str())
config.reset()
self.assertEqual('', config.get_config_str())
def test_get_existing_vip_ip_addresses_returns_list(self):
config = self._get_config()
instance = config.get_instance(1)
current_vips = sorted(instance.get_existing_vip_ip_addresses('eth2'))
self.assertEqual(['192.168.2.0/24', '192.168.3.0/24'], current_vips)
class KeepalivedStateExceptionTestCase(base.BaseTestCase):
def test_state_exception(self):
invalid_vrrp_state = 'a seal walks'
self.assertRaises(keepalived.InvalidInstanceStateException,
keepalived.KeepalivedInstance,
invalid_vrrp_state, 'eth0', 33,
['169.254.192.0/18'])
invalid_auth_type = 'into a club'
instance = keepalived.KeepalivedInstance('MASTER', 'eth0', 1,
['169.254.192.0/18'])
self.assertRaises(keepalived.InvalidAuthenticationTypeException,
instance.set_authentication,
invalid_auth_type, 'some_password')
class KeepalivedInstanceRoutesTestCase(base.BaseTestCase):
@classmethod
def _get_instance_routes(cls):
routes = keepalived.KeepalivedInstanceRoutes()
default_gw_eth0 = keepalived.KeepalivedVirtualRoute(
'0.0.0.0/0', '1.0.0.254', 'eth0')
default_gw_eth1 = keepalived.KeepalivedVirtualRoute(
'::/0', 'fe80::3e97:eff:fe26:3bfa/64', 'eth1')
routes.gateway_routes = [default_gw_eth0, default_gw_eth1]
extra_routes = [
keepalived.KeepalivedVirtualRoute('10.0.0.0/8', '1.0.0.1'),
keepalived.KeepalivedVirtualRoute('20.0.0.0/8', '2.0.0.2')]
routes.extra_routes = extra_routes
extra_subnets = [
keepalived.KeepalivedVirtualRoute(
'30.0.0.0/8', None, 'eth0', scope='link')]
routes.extra_subnets = extra_subnets
return routes
def test_routes(self):
routes = self._get_instance_routes()
self.assertEqual(len(routes.routes), 5)
def test_remove_routes_on_interface(self):
routes = self._get_instance_routes()
routes.remove_routes_on_interface('eth0')
self.assertEqual(len(routes.routes), 3)
routes.remove_routes_on_interface('eth1')
self.assertEqual(len(routes.routes), 2)
def test_build_config(self):
expected = """ virtual_routes {
0.0.0.0/0 via 1.0.0.254 dev eth0
::/0 via fe80::3e97:eff:fe26:3bfa/64 dev eth1
10.0.0.0/8 via 1.0.0.1
20.0.0.0/8 via 2.0.0.2
30.0.0.0/8 dev eth0 scope link
}"""
routes = self._get_instance_routes()
self.assertEqual(expected, '\n'.join(routes.build_config()))
class KeepalivedInstanceTestCase(base.BaseTestCase,
KeepalivedConfBaseMixin):
def test_get_primary_vip(self):
instance = keepalived.KeepalivedInstance('MASTER', 'ha0', 42,
['169.254.192.0/18'])
self.assertEqual('169.254.0.42/24', instance.get_primary_vip())
def test_remove_addresses_by_interface(self):
config = self._get_config()
instance = config.get_instance(1)
instance.remove_vips_vroutes_by_interface('eth2')
instance.remove_vips_vroutes_by_interface('eth10')
expected = """vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
garp_master_repeat 5
garp_master_refresh 10
advert_int 5
authentication {
auth_type AH
auth_pass pass123
}
track_interface {
eth0
}
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
virtual_ipaddress_excluded {
192.168.1.0/24 dev eth1
}
virtual_routes {
0.0.0.0/0 via 192.168.1.1 dev eth1
}
}
vrrp_instance VR_2 {
state MASTER
interface eth4
virtual_router_id 2
priority 50
garp_master_repeat 5
garp_master_refresh 10
mcast_src_ip 224.0.0.1
track_interface {
eth4
}
virtual_ipaddress {
169.254.0.2/24 dev eth4
}
virtual_ipaddress_excluded {
192.168.2.0/24 dev eth2
192.168.3.0/24 dev eth6
192.168.55.0/24 dev eth10
}
}"""
self.assertEqual(expected, config.get_config_str())
def test_build_config_no_vips(self):
expected = """vrrp_instance VR_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 50
garp_master_repeat 5
garp_master_refresh 10
virtual_ipaddress {
169.254.0.1/24 dev eth0
}
}"""
instance = keepalived.KeepalivedInstance(
'MASTER', 'eth0', 1, ['169.254.192.0/18'])
self.assertEqual(expected, '\n'.join(instance.build_config()))
class KeepalivedVipAddressTestCase(base.BaseTestCase):
def test_vip_with_scope(self):
vip = keepalived.KeepalivedVipAddress('fe80::3e97:eff:fe26:3bfa/64',
'eth1',
'link')
self.assertEqual('fe80::3e97:eff:fe26:3bfa/64 dev eth1 scope link',
vip.build_config())
class KeepalivedVirtualRouteTestCase(base.BaseTestCase):
def test_virtual_route_with_dev(self):
route = keepalived.KeepalivedVirtualRoute(n_consts.IPv4_ANY, '1.2.3.4',
'eth0')
self.assertEqual('0.0.0.0/0 via 1.2.3.4 dev eth0',
route.build_config())
def test_virtual_route_without_dev(self):
route = keepalived.KeepalivedVirtualRoute('50.0.0.0/8', '1.2.3.4')
self.assertEqual('50.0.0.0/8 via 1.2.3.4', route.build_config())
|
apache-2.0
|
bollu/vispy
|
examples/demo/gloo/shadertoy.py
|
18
|
11859
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 2, testskip
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Shadertoy demo. You can copy-paste shader code from an example on
www.shadertoy.com and get the demo.
TODO: support cubes and videos as channel inputs (currently, only images
are supported).
"""
# NOTE: This example throws warnings about variables not being used;
# this is normal because only some shadertoy examples make use of all
# variables, and the GPU may compile some of them away.
import sys
from datetime import datetime, time
import numpy as np
from vispy import gloo
from vispy import app
vertex = """
#version 120
attribute vec2 position;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
}
"""
fragment = """
#version 120
uniform vec3 iResolution; // viewport resolution (in pixels)
uniform float iGlobalTime; // shader playback time (in seconds)
uniform vec4 iMouse; // mouse pixel coords
uniform vec4 iDate; // (year, month, day, time in seconds)
uniform float iSampleRate; // sound sample rate (i.e., 44100)
uniform sampler2D iChannel0; // input channel. XX = 2D/Cube
uniform sampler2D iChannel1; // input channel. XX = 2D/Cube
uniform sampler2D iChannel2; // input channel. XX = 2D/Cube
uniform sampler2D iChannel3; // input channel. XX = 2D/Cube
uniform vec3 iChannelResolution[4]; // channel resolution (in pixels)
uniform float iChannelTime[4]; // channel playback time (in sec)
%s
"""
def get_idate():
now = datetime.now()
utcnow = datetime.utcnow()
midnight_utc = datetime.combine(utcnow.date(), time(0))
delta = utcnow - midnight_utc
return (now.year, now.month, now.day, delta.seconds)
def noise(resolution=64, nchannels=1):
# Random texture.
return np.random.randint(low=0, high=256,
size=(resolution, resolution, nchannels)
).astype(np.uint8)
class Canvas(app.Canvas):
def __init__(self, shadertoy=None):
app.Canvas.__init__(self, keys='interactive')
if shadertoy is None:
shadertoy = """
void main(void)
{
vec2 uv = gl_FragCoord.xy / iResolution.xy;
gl_FragColor = vec4(uv,0.5+0.5*sin(iGlobalTime),1.0);
}"""
self.program = gloo.Program(vertex, fragment % shadertoy)
self.program["position"] = [(-1, -1), (-1, 1), (1, 1),
(-1, -1), (1, 1), (1, -1)]
self.program['iMouse'] = 0, 0, 0, 0
self.program['iSampleRate'] = 44100.
for i in range(4):
self.program['iChannelTime[%d]' % i] = 0.
self.program['iGlobalTime'] = 0.
self.activate_zoom()
self._timer = app.Timer('auto', connect=self.on_timer, start=True)
self.show()
def set_channel_input(self, img, i=0):
tex = gloo.Texture2D(img)
tex.interpolation = 'linear'
tex.wrapping = 'repeat'
self.program['iChannel%d' % i] = tex
self.program['iChannelResolution[%d]' % i] = img.shape
def on_draw(self, event):
self.program.draw()
def on_mouse_click(self, event):
# BUG: DOES NOT WORK YET, NO CLICK EVENT IN VISPY FOR NOW...
imouse = event.pos + event.pos
self.program['iMouse'] = imouse
def on_mouse_move(self, event):
if event.is_dragging:
x, y = event.pos
px, py = event.press_event.pos
imouse = (x, self.size[1] - y, px, self.size[1] - py)
self.program['iMouse'] = imouse
def on_timer(self, event):
self.program['iGlobalTime'] = event.elapsed
self.program['iDate'] = get_idate() # used in some shadertoy exs
self.update()
def on_resize(self, event):
self.activate_zoom()
def activate_zoom(self):
gloo.set_viewport(0, 0, *self.physical_size)
self.program['iResolution'] = (self.physical_size[0],
self.physical_size[1], 0.)
# -------------------------------------------------------------------------
# COPY-PASTE SHADERTOY CODE BELOW
# -------------------------------------------------------------------------
SHADERTOY = """
// From: https://www.shadertoy.com/view/MdX3Rr
// Created by inigo quilez - iq/2013
// License Creative Commons Attribution-NonCommercial-ShareAlike 3.0
// Unported License.
//stereo thanks to Croqueteer
//#define STEREO
// value noise, and its analytical derivatives
vec3 noised( in vec2 x )
{
vec2 p = floor(x);
vec2 f = fract(x);
vec2 u = f*f*(3.0-2.0*f);
float a = texture2D(iChannel0,(p+vec2(0.5,0.5))/256.0,-100.0).x;
float b = texture2D(iChannel0,(p+vec2(1.5,0.5))/256.0,-100.0).x;
float c = texture2D(iChannel0,(p+vec2(0.5,1.5))/256.0,-100.0).x;
float d = texture2D(iChannel0,(p+vec2(1.5,1.5))/256.0,-100.0).x;
return vec3(a+(b-a)*u.x+(c-a)*u.y+(a-b-c+d)*u.x*u.y,
6.0*f*(1.0-f)*(vec2(b-a,c-a)+(a-b-c+d)*u.yx));
}
const mat2 m2 = mat2(0.8,-0.6,0.6,0.8);
float terrain( in vec2 x )
{
vec2 p = x*0.003;
float a = 0.0;
float b = 1.0;
vec2 d = vec2(0.0);
for( int i=0; i<6; i++ )
{
vec3 n = noised(p);
d += n.yz;
a += b*n.x/(1.0+dot(d,d));
b *= 0.5;
p = m2*p*2.0;
}
return 140.0*a;
}
float terrain2( in vec2 x )
{
vec2 p = x*0.003;
float a = 0.0;
float b = 1.0;
vec2 d = vec2(0.0);
for( int i=0; i<14; i++ )
{
vec3 n = noised(p);
d += n.yz;
a += b*n.x/(1.0+dot(d,d));
b *= 0.5;
p=m2*p*2.0;
}
return 140.0*a;
}
float terrain3( in vec2 x )
{
vec2 p = x*0.003;
float a = 0.0;
float b = 1.0;
vec2 d = vec2(0.0);
for( int i=0; i<4; i++ )
{
vec3 n = noised(p);
d += n.yz;
a += b*n.x/(1.0+dot(d,d));
b *= 0.5;
p = m2*p*2.0;
}
return 140.0*a;
}
float map( in vec3 p )
{
float h = terrain(p.xz);
return p.y - h;
}
float map2( in vec3 p )
{
float h = terrain2(p.xz);
return p.y - h;
}
float interesct( in vec3 ro, in vec3 rd )
{
float h = 1.0;
float t = 1.0;
for( int i=0; i<120; i++ )
{
if( h<0.01 || t>2000.0 ) break;
t += 0.5*h;
h = map( ro + t*rd );
}
if( t>2000.0 ) t = -1.0;
return t;
}
float sinteresct(in vec3 ro, in vec3 rd )
{
#if 0
// no shadows
return 1.0;
#endif
#if 0
// fake shadows
vec3 nor;
vec3 eps = vec3(20.0,0.0,0.0);
nor.x = terrain3(ro.xz-eps.xy) - terrain3(ro.xz+eps.xy);
nor.y = 1.0*eps.x;
nor.z = terrain3(ro.xz-eps.yx) - terrain3(ro.xz+eps.yx);
nor = normalize(nor);
return clamp( 4.0*dot(nor,rd), 0.0, 1.0 );
#endif
#if 1
// real shadows
float res = 1.0;
float t = 0.0;
for( int j=0; j<48; j++ )
{
vec3 p = ro + t*rd;
float h = map( p );
res = min( res, 16.0*h/t );
t += h;
if( res<0.001 ||p.y>300.0 ) break;
}
return clamp( res, 0.0, 1.0 );
#endif
}
vec3 calcNormal( in vec3 pos, float t )
{
float e = 0.001;
e = 0.001*t;
vec3 eps = vec3(e,0.0,0.0);
vec3 nor;
#if 0
nor.x = map2(pos+eps.xyy) - map2(pos-eps.xyy);
nor.y = map2(pos+eps.yxy) - map2(pos-eps.yxy);
nor.z = map2(pos+eps.yyx) - map2(pos-eps.yyx);
#else
nor.x = terrain2(pos.xz-eps.xy) - terrain2(pos.xz+eps.xy);
nor.y = 2.0*e;
nor.z = terrain2(pos.xz-eps.yx) - terrain2(pos.xz+eps.yx);
#endif
return normalize(nor);
}
vec3 camPath( float time )
{
vec2 p = 1100.0*vec2( cos(0.0+0.23*time), cos(1.5+0.21*time) );
return vec3( p.x, 0.0, p.y );
}
float fbm( vec2 p )
{
float f = 0.0;
f += 0.5000*texture2D( iChannel0, p/256.0 ).x; p = m2*p*2.02;
f += 0.2500*texture2D( iChannel0, p/256.0 ).x; p = m2*p*2.03;
f += 0.1250*texture2D( iChannel0, p/256.0 ).x; p = m2*p*2.01;
f += 0.0625*texture2D( iChannel0, p/256.0 ).x;
return f/0.9375;
}
void main(void)
{
vec2 xy = -1.0 + 2.0*gl_FragCoord.xy / iResolution.xy;
vec2 s = xy*vec2(iResolution.x/iResolution.y,1.0);
#ifdef STEREO
float isCyan = mod(gl_FragCoord.x + mod(gl_FragCoord.y,2.0),2.0);
#endif
float time = iGlobalTime*0.15 + 0.3 + 4.0*iMouse.x/iResolution.x;
vec3 light1 = normalize( vec3(-0.8,0.4,-0.3) );
vec3 ro = camPath( time );
vec3 ta = camPath( time + 3.0 );
ro.y = terrain3( ro.xz ) + 11.0;
ta.y = ro.y - 20.0;
float cr = 0.2*cos(0.1*time);
vec3 cw = normalize(ta-ro);
vec3 cp = vec3(sin(cr), cos(cr),0.0);
vec3 cu = normalize( cross(cw,cp) );
vec3 cv = normalize( cross(cu,cw) );
vec3 rd = normalize( s.x*cu + s.y*cv + 2.0*cw );
#ifdef STEREO
ro += 2.0*cu*isCyan;
#endif
float sundot = clamp(dot(rd,light1),0.0,1.0);
vec3 col;
float t = interesct( ro, rd );
if( t<0.0 )
{
// sky
col = vec3(0.3,.55,0.8)*(1.0-0.8*rd.y);
col += 0.25*vec3(1.0,0.7,0.4)*pow( sundot,5.0 );
col += 0.25*vec3(1.0,0.8,0.6)*pow( sundot,64.0 );
col += 0.2*vec3(1.0,0.8,0.6)*pow( sundot,512.0 );
vec2 sc = ro.xz + rd.xz*(1000.0-ro.y)/rd.y;
col = mix( col, vec3(1.0,0.95,1.0),
0.5*smoothstep(0.5,0.8,fbm(0.0005*sc)) );
}
else
{
// mountains
vec3 pos = ro + t*rd;
vec3 nor = calcNormal( pos, t );
float r = texture2D( iChannel0, 7.0*pos.xz/256.0 ).x;
col = (r*0.25+0.75)*0.9*mix( vec3(0.08,0.05,0.03),
vec3(0.10,0.09,0.08), texture2D(iChannel0,0.00007*vec2(
pos.x,pos.y*48.0)).x );
col = mix( col, 0.20*vec3(0.45,.30,0.15)*(0.50+0.50*r),
smoothstep(0.70,0.9,nor.y) );
col = mix( col, 0.15*vec3(0.30,.30,0.10)*(0.25+0.75*r),
smoothstep(0.95,1.0,nor.y) );
// snow
float h = smoothstep(55.0,80.0,pos.y + 25.0*fbm(0.01*pos.xz) );
float e = smoothstep(1.0-0.5*h,1.0-0.1*h,nor.y);
float o = 0.3 + 0.7*smoothstep(0.0,0.1,nor.x+h*h);
float s = h*e*o;
col = mix( col, 0.29*vec3(0.62,0.65,0.7), smoothstep(
0.1, 0.9, s ) );
// lighting
float amb = clamp(0.5+0.5*nor.y,0.0,1.0);
float dif = clamp( dot( light1, nor ), 0.0, 1.0 );
float bac = clamp( 0.2 + 0.8*dot( normalize(
vec3(-light1.x, 0.0, light1.z ) ), nor ), 0.0, 1.0 );
float sh = 1.0; if( dif>=0.0001 ) sh = sinteresct(
pos+light1*20.0,light1);
vec3 lin = vec3(0.0);
lin += dif*vec3(7.00,5.00,3.00)*vec3( sh, sh*sh*0.5+0.5*sh,
sh*sh*0.8+0.2*sh );
lin += amb*vec3(0.40,0.60,0.80)*1.5;
lin += bac*vec3(0.40,0.50,0.60);
col *= lin;
float fo = 1.0-exp(-0.0005*t);
vec3 fco = 0.55*vec3(0.55,0.65,0.75) + 0.1*vec3(1.0,0.8,0.5)*pow(
sundot, 4.0 );
col = mix( col, fco, fo );
col += 0.3*vec3(1.0,0.8,0.4)*pow( sundot,
8.0 )*(1.0-exp(-0.002*t));
}
col = pow(col,vec3(0.4545));
// vignetting
col *= 0.5 + 0.5*pow( (xy.x+1.0)*(xy.y+1.0)*(xy.x-1.0)*(xy.y-1.0),
0.1 );
#ifdef STEREO
col *= vec3( isCyan, 1.0-isCyan, 1.0-isCyan );
#endif
// col *= smoothstep( 0.0, 2.0, iGlobalTime );
gl_FragColor=vec4(col,1.0);
}
"""
# -------------------------------------------------------------------------
canvas = Canvas(SHADERTOY)
# Input data.
canvas.set_channel_input(noise(resolution=256, nchannels=1), i=0)
if __name__ == '__main__':
canvas.show()
if sys.flags.interactive == 0:
canvas.app.run()
|
bsd-3-clause
|
eternalthinker/flask-server-rq-example
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/auth.py
|
120
|
6669
|
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self.num_401_calls = 1
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
|
apache-2.0
|
argivaitv/argivaitv
|
plugin.video.salts/scrapers/furk_scraper.py
|
1
|
10706
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urllib
import urlparse
import json
import re
import xml.etree.ElementTree as ET
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib.trans_utils import i18n
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
BASE_URL = 'http://www.furk.net'
SEARCH_URL = '/api/plugins/metasearch'
LOGIN_URL = '/api/login/login'
class Furk_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
self.username = kodi.get_setting('%s-username' % (self.get_name()))
self.password = kodi.get_setting('%s-password' % (self.get_name()))
self.max_results = int(kodi.get_setting('%s-result_limit' % (self.get_name())))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Furk.net'
def resolve_link(self, link):
playlist = super(Furk_Scraper, self)._http_get(link, cache_limit=.5)
root = ET.fromstring(playlist)
location = root.find('.//{http://xspf.org/ns/0/}location')
if location is not None:
return location.text
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
if 'size' in item:
label += ' (%s)' % (item['size'])
if 'extra' in item:
label += ' [%s]' % (item['extra'])
return label
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if source_url and source_url != FORCE_NO_MATCH:
params = urlparse.parse_qs(urlparse.urlparse(source_url).query)
if 'title' in params:
query = params['title'][0]
if video.video_type == VIDEO_TYPES.MOVIE:
if 'year' in params: query += ' %s' % (params['year'][0])
else:
sxe = ''
if 'season' in params:
sxe = 'S%02d' % (int(params['season'][0]))
if 'episode' in params:
sxe += 'E%02d' % (int(params['episode'][0]))
if sxe: query = '%s %s' % (query, sxe)
query = urllib.quote_plus(query)
query_url = '/search?query=%s' % (query)
hosters = self.__get_links(query_url, video)
if not hosters and video.video_type == VIDEO_TYPES.EPISODE and params['air_date'][0]:
query = urllib.quote_plus('%s %s' % (params['title'][0], params['air_date'][0].replace('-', '.')))
query_url = '/search?query=%s' % (query)
hosters = self.__get_links(query_url, video)
return hosters
def __get_links(self, url, video):
hosters = []
search_url = urlparse.urljoin(self.base_url, SEARCH_URL)
query = self.__translate_search(url)
result = self._http_get(search_url, data=query, allow_redirect=False, cache_limit=.5)
if 'files' in result:
for item in result['files']:
checks = [False] * 6
if 'type' not in item or item['type'].upper() != 'VIDEO': checks[0] = True
if 'is_ready' in item and item['is_ready'] != '1': checks[1] = True
if 'av_result' in item and item['av_result'] in ['warning', 'infected']: checks[2] = True
if 'video_info' not in item: checks[3] = True
if 'video_info' in item and item['video_info'] and not re.search('#0:(?:0|1)(?:\(eng\)|\(und\))?:\s*Audio:', item['video_info']): checks[4] = True
if video.video_type == VIDEO_TYPES.EPISODE:
sxe = '[. ][Ss]%02d[Ee]%02d[. ]' % (int(video.season), int(video.episode))
if not re.search(sxe, item['name']):
if video.ep_airdate:
airdate_pattern = '[. ]%s[. ]%02d[. ]%02d[. ]' % (video.ep_airdate.year, video.ep_airdate.month, video.ep_airdate.day)
if not re.search(airdate_pattern, item['name']): checks[5] = True
if any(checks):
log_utils.log('Furk.net result excluded: %s - |%s|' % (checks, item['name']), log_utils.LOGDEBUG)
continue
match = re.search('(\d{3,})\s?x\s?(\d{3,})', item['video_info'])
if match:
width, _ = match.groups()
quality = self._width_get_quality(width)
else:
if video.video_type == VIDEO_TYPES.MOVIE:
_, _, height, _ = self._parse_movie_link(item['name'])
quality = self._height_get_quality(height)
elif video.video_type == VIDEO_TYPES.EPISODE:
_, _, _, height, _ = self._parse_episode_link(item['name'])
if int(height) > -1:
quality = self._height_get_quality(height)
else:
quality = QUALITIES.HIGH
else:
quality = QUALITIES.HIGH
stream_url = item['url_pls']
host = self._get_direct_hostname(stream_url)
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
hoster['size'] = self.__format_size(int(item['size']), 'B')
hoster['extra'] = item['name']
hosters.append(hoster)
return hosters
def get_url(self, video):
url = None
self.create_db_connection()
result = self.db_connection.get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url))
else:
if video.video_type == VIDEO_TYPES.MOVIE:
query = 'title=%s&year=%s' % (urllib.quote_plus(video.title), video.year)
else:
query = 'title=%s&season=%s&episode=%s&air_date=%s' % (urllib.quote_plus(video.title), video.season, video.episode, video.ep_airdate)
url = '/search?%s' % (query)
self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url)
return url
def search(self, video_type, title, year):
return []
@classmethod
def get_settings(cls):
settings = super(Furk_Scraper, cls).get_settings()
settings = cls._disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-username" type="text" label=" %s" default="" visible="eq(-4,true)"/>' % (name, i18n('username')))
settings.append(' <setting id="%s-password" type="text" label=" %s" option="hidden" default="" visible="eq(-5,true)"/>' % (name, i18n('password')))
settings.append(' <setting id="%s-result_limit" label=" %s" type="slider" default="10" range="10,100" option="int" visible="eq(-6,true)"/>' % (name, i18n('result_limit')))
return settings
def _http_get(self, url, data=None, retry=True, allow_redirect=True, cache_limit=8):
if not self.username or not self.password:
return {}
result = super(Furk_Scraper, self)._http_get(url, data=data, allow_redirect=allow_redirect, cache_limit=cache_limit)
if result:
try:
js_result = json.loads(result)
except ValueError:
if 'msg_key=session_invalid' in result:
log_utils.log('Logging in for url (%s) (Session Expired)' % (url), log_utils.LOGDEBUG)
self.__login()
js_result = self._http_get(url, data=data, retry=False, allow_redirect=allow_redirect, cache_limit=0)
else:
log_utils.log('Invalid JSON returned: %s: %s' % (url, result), log_utils.LOGWARNING)
js_result = {}
else:
if js_result['status'] == 'error':
if retry and js_result['error'] == 'access denied':
log_utils.log('Logging in for url (%s)' % (url), log_utils.LOGDEBUG)
self.__login()
js_result = self._http_get(url, data=data, retry=False, allow_redirect=allow_redirect, cache_limit=0)
else:
log_utils.log('Error received from furk.net (%s)' % (js_result['error']), log_utils.LOGWARNING)
return js_result
def __login(self):
url = urlparse.urljoin(self.base_url, LOGIN_URL)
data = {'login': self.username, 'pwd': self.password}
result = self._http_get(url, data=data, cache_limit=0)
if result['status'] != 'ok':
raise Exception('furk.net login failed: %s' % (result.get('error', 'Unknown Error')))
def __translate_search(self, url):
query = {'sort': 'relevance', 'filter': 'all', 'moderated': 'yes', 'offset': 0, 'limit': self.max_results, 'match': 'all'}
query['q'] = urllib.quote_plus(urlparse.parse_qs(urlparse.urlparse(url).query)['query'][0])
return query
def __format_size(self, num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
|
gpl-2.0
|
cgwalters/gnome-ostree
|
src/ostbuild/pyostbuild/builtin_deploy_qemu.py
|
3
|
2370
|
# Copyright (C) 2012 Colin Walters <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os,sys,subprocess,tempfile,re,shutil
import argparse
import time
import urlparse
import json
from StringIO import StringIO
from . import builtins
from .ostbuildlog import log, fatal
from . import ostbuildrc
from . import privileged_subproc
class OstbuildDeployQemu(builtins.Builtin):
name = "deploy-qemu"
short_description = "Extract data from shadow repository to qemu"
def __init__(self):
builtins.Builtin.__init__(self)
def execute(self, argv):
parser = argparse.ArgumentParser(description=self.short_description)
parser.add_argument('--prefix')
parser.add_argument('--snapshot')
parser.add_argument('targets', nargs='*')
args = parser.parse_args(argv)
self.args = args
self.parse_config()
self.parse_snapshot(args.prefix, args.snapshot)
if len(args.targets) > 0:
targets = args.targets
else:
targets = []
prefix = self.snapshot['prefix']
for target_component_type in ['runtime', 'devel']:
for architecture in self.snapshot['architectures']:
name = '%s-%s-%s' % (prefix, architecture, target_component_type)
targets.append(name)
helper = privileged_subproc.PrivilegedSubprocess()
shadow_path = os.path.join(self.workdir, 'shadow-repo')
child_args = ['ostbuild', 'privhelper-deploy-qemu', shadow_path]
child_args.extend(targets)
helper.spawn_sync(child_args)
builtins.register(OstbuildDeployQemu)
|
lgpl-2.1
|
KrzysztofStachanczyk/Sensors-WWW-website
|
www/env/lib/python2.7/site-packages/django/conf/urls/__init__.py
|
133
|
3546
|
import warnings
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.urls import (
LocaleRegexURLResolver, RegexURLPattern, RegexURLResolver,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'url']
handler400 = 'django.views.defaults.bad_request'
handler403 = 'django.views.defaults.permission_denied'
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if app_name and not namespace:
raise ValueError('Must specify a namespace if specifying app_name.')
if app_name:
warnings.warn(
'The app_name argument to django.conf.urls.include() is deprecated. '
'Set the app_name in the included URLconf instead.',
RemovedInDjango20Warning, stacklevel=2
)
if isinstance(arg, tuple):
# callable returning a namespace hint
try:
urlconf_module, app_name = arg
except ValueError:
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that provides a namespace'
)
warnings.warn(
'Passing a 3-tuple to django.conf.urls.include() is deprecated. '
'Pass a 2-tuple containing the list of patterns and app_name, '
'and provide the namespace argument to include() instead.',
RemovedInDjango20Warning, stacklevel=2
)
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
if isinstance(urlconf_module, six.string_types):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
app_name = getattr(urlconf_module, 'app_name', app_name)
if namespace and not app_name:
warnings.warn(
'Specifying a namespace in django.conf.urls.include() without '
'providing an app_name is deprecated. Set the app_name attribute '
'in the included module, or pass a 2-tuple containing the list of '
'patterns and app_name instead.',
RemovedInDjango20Warning, stacklevel=2
)
namespace = namespace or app_name
# Make sure we can iterate through the patterns (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
# Test if the LocaleRegexURLResolver is used within the include;
# this should throw an error since this is not allowed!
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.')
return (urlconf_module, app_name, namespace)
def url(regex, view, kwargs=None, name=None):
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
elif callable(view):
return RegexURLPattern(regex, view, kwargs, name)
else:
raise TypeError('view must be a callable or a list/tuple in the case of include().')
|
gpl-3.0
|
gylian/sickbeard
|
bs4/__init__.py
|
82
|
12936
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson ([email protected])"
__version__ = "4.1.3"
__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import re
import warnings
from .builder import builder_registry
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
self.reset()
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
(self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) = (
self.builder.prepare_markup(markup, from_encoding))
try:
self._feed()
except StopParsing:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.currentData = []
self.currentTag = None
self.tagStack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s):
"""Create a new NavigableString associated with this soup."""
navigable = NavigableString(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.builder.preserve_whitespace_tags)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(currentData)):
return
o = containerClass(currentData)
self.object_was_parsed(o)
def object_was_parsed(self, o):
"""Add an object to the parse tree."""
o.setup(self.currentTag, self.previous_element)
if self.previous_element:
self.previous_element.next_element = o
self.previous_element = o
self.currentTag.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack) - 1, 0, -1):
if (name == self.tagStack[i].name
and nsprefix == self.tagStack[i].prefix):
numPops = len(self.tagStack) - i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self.previous_element)
if tag is None:
return tag
if self.previous_element:
self.previous_element.next_element = tag
self.previous_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.currentData.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
gpl-3.0
|
alfonsokim/nupic
|
tests/external/testfixture_test.py
|
15
|
1744
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Unit tests for our dependencies in the pytest package; at the time of this
writing, we were using an unreleased version of pytest that added support for
the unittest setUpModule fixture and friends. Some of our tests rely on
setUpModule. Once, there was a conflict with pytest installation in our build
system, and an older version of pytest was installed that didn't support
setUpModule, which resulted in subtle side-effects in some of these tests.
"""
import unittest2 as unittest
g_setUpModuleCalled = False
def setUpModule():
global g_setUpModuleCalled
g_setUpModuleCalled = True
class TestPytest(unittest.TestCase):
def testSetUpModuleCalled(self):
self.assertTrue(g_setUpModuleCalled)
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
thefinn93/CouchPotatoServer
|
libs/requests/packages/chardet/mbcharsetprober.py
|
215
|
3182
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from constants import eStart, eError, eItsMe
from charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = ['\x00', '\x00']
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = ['\x00', '\x00']
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mDistributionAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
gpl-3.0
|
BeyondTheClouds/nova
|
nova/tests/unit/virt/vmwareapi/test_vif.py
|
6
|
14654
|
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vm_util
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
self.flags(vlan_interface='vmnet0', group='vmware')
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self._network = network
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = fake.FakeSession()
self.cluster = None
def tearDown(self):
super(VMwareVifTestCase, self).tearDown()
def test_ensure_vlan_bridge(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
# FlatDHCP network mode without vlan - network doesn't exist with the host
def test_ensure_vlan_bridge_without_vlan(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# FlatDHCP network mode without vlan - network exists with the host
# Get vswitch and check vlan interface should not be called
def test_ensure_vlan_bridge_with_network(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
vm_network = {'name': 'VM Network', 'type': 'Network'}
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(vm_network)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# Flat network mode with DVS
def test_ensure_vlan_bridge_with_existing_dvs(self):
network_ref = {'dvpg': 'dvportgroup-2062',
'type': 'DistributedVirtualPortgroup'}
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(network_ref)
self.mox.ReplayAll()
ref = vif.ensure_vlan_bridge(self.session,
self.vif,
create_vlan=False)
self.assertThat(ref, matchers.DictMatches(network_ref))
def test_get_network_ref_flat_dhcp(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=False)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=True)
self.mox.ReplayAll()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True,
should_create_vlan=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_create_port_group_already_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.AlreadyExistsException()
with test.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
network_util.create_port_group(self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_create_port_group_exception(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.VMwareDriverException()
with test.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
self.assertRaises(vexc.VMwareDriverException,
network_util.create_port_group,
self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_get_vif_info_none(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', None)
self.assertEqual([], vif_info)
def test_get_vif_info_empty_list(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', [])
self.assertEqual([], vif_info)
@mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
def test_get_vif_info(self, mock_get_network_ref):
network_info = utils.get_test_network_info()
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', network_info)
expected = [{'iface_id': 'vif-xxx-yyy-zzz',
'mac_address': 'fake',
'network_name': 'fake',
'network_ref': 'fake_ref',
'vif_model': 'fake_model'}]
self.assertEqual(expected, vif_info)
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_neutron_network_ovs_integration_bridge(self,
mock_check):
self.flags(integration_bridge='fake-bridge-id', group='vmware')
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-bridge-id',
'network-type': 'opaque',
'use-external-id': False}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_neutron_network_ovs(self, mock_check):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 0,
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(network_util, 'get_network_with_the_name')
def test_get_neutron_network_dvs(self, mock_network_name):
fake_network_obj = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'fake-key',
'dvsw': 'fake-props'}
mock_network_name.return_value = fake_network_obj
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
mock_network_name.assert_called_once_with('fake-session',
'fa0',
'fake-cluster')
self.assertEqual(fake_network_obj, network_ref)
@mock.patch.object(network_util, 'get_network_with_the_name',
return_value=None)
def test_get_neutron_network_dvs_no_match(self, mock_network_name):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
self.assertRaises(exception.NetworkNotFoundForBridge,
vif._get_neutron_network,
'fake-session',
'fake-cluster',
vif_info)
def test_get_neutron_network_invalid_type(self):
vif_info = network_model.NetworkInfo([
network_model.VIF(address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
self.assertRaises(exception.InvalidInput,
vif._get_neutron_network,
'fake-session',
'fake-cluster',
vif_info)
@mock.patch.object(vif.LOG, 'warning')
@mock.patch.object(vim_util, 'get_vc_version',
return_value='5.0.0')
def test_check_invalid_ovs_version(self, mock_version, mock_warning):
vif._check_ovs_supported_version('fake_session')
# assert that the min version is in a warning message
expected_arg = {'version': constants.MIN_VC_OVS_VERSION}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
|
apache-2.0
|
ff94315/hiwifi-openwrt-HC5661-HC5761
|
staging_dir/host/lib/scons-2.1.0/SCons/Tool/javac.py
|
21
|
8571
|
"""SCons.Tool.javac
Tool-specific initialization for javac.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/javac.py 5357 2011/09/09 21:31:03 bdeegan"
import os
import os.path
import SCons.Action
import SCons.Builder
from SCons.Node.FS import _my_normcase
from SCons.Tool.JavaCommon import parse_java_file
import SCons.Util
def classname(path):
"""Turn a string (path name) into a Java class name."""
return os.path.normpath(path).replace(os.sep, '.')
def emit_java_classes(target, source, env):
"""Create and return lists of source java files
and their corresponding target class files.
"""
java_suffix = env.get('JAVASUFFIX', '.java')
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
s = source[0].rentry().disambiguate()
if isinstance(s, SCons.Node.FS.File):
sourcedir = s.dir.rdir()
elif isinstance(s, SCons.Node.FS.Dir):
sourcedir = s.rdir()
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__)
slist = []
js = _my_normcase(java_suffix)
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = SCons.Util.OrderedDict()
dirnode = entry.rdir()
def find_java_files(arg, dirpath, filenames):
java_files = sorted([n for n in filenames
if _my_normcase(n).endswith(js)])
mydir = dirnode.Dir(dirpath)
java_paths = [mydir.File(f) for f in java_files]
for jp in java_paths:
arg[jp] = True
for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()):
find_java_files(result, dirpath, filenames)
entry.walk(find_java_files, result)
slist.extend(list(result.keys()))
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__)
version = env.get('JAVAVERSION', '1.4')
full_tlist = []
for f in slist:
tlist = []
source_file_based = True
pkg_dir = None
if not f.is_derived():
pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version)
if classes:
source_file_based = False
if pkg_dir:
d = target[0].Dir(pkg_dir)
p = pkg_dir + os.sep
else:
d = target[0]
p = ''
for c in classes:
t = d.File(c + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = sourcedir
t.attributes.java_classname = classname(p + c)
tlist.append(t)
if source_file_based:
base = f.name[:-len(java_suffix)]
if pkg_dir:
t = target[0].Dir(pkg_dir).File(base + class_suffix)
else:
t = target[0].File(base + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = f.dir
t.attributes.java_classname = classname(base)
tlist.append(t)
for t in tlist:
t.set_specific_source([f])
full_tlist.extend(tlist)
return full_tlist, slist
JavaAction = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
JavaBuilder = SCons.Builder.Builder(action = JavaAction,
emitter = emit_java_classes,
target_factory = SCons.Node.FS.Entry,
source_factory = SCons.Node.FS.Entry)
class pathopt(object):
"""
Callable object for generating javac-style path options from
a construction variable (e.g. -classpath, -sourcepath).
"""
def __init__(self, opt, var, default=None):
self.opt = opt
self.var = var
self.default = default
def __call__(self, target, source, env, for_signature):
path = env[self.var]
if path and not SCons.Util.is_List(path):
path = [path]
if self.default:
path = path + [ env[self.default] ]
if path:
return [self.opt, os.pathsep.join(path)]
#return self.opt + " " + os.pathsep.join(path)
else:
return []
#return ""
def Java(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the separate JavaClass{File,Dir}
Builders.
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
# Pad the target list with repetitions of the last element in the
# list so we have a target for every source element.
target = target + ([target[-1]] * (len(source) - len(target)))
java_suffix = env.subst('$JAVASUFFIX')
result = []
for t, s in zip(target, source):
if isinstance(s, SCons.Node.FS.Base):
if isinstance(s, SCons.Node.FS.File):
b = env.JavaClassFile
else:
b = env.JavaClassDir
else:
if os.path.isfile(s):
b = env.JavaClassFile
elif os.path.isdir(s):
b = env.JavaClassDir
elif s[-len(java_suffix):] == java_suffix:
b = env.JavaClassFile
else:
b = env.JavaClassDir
result.extend(b(t, s, *args, **kw))
return result
def generate(env):
"""Add Builders and construction variables for javac to an Environment."""
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_class = SCons.Tool.CreateJavaClassFileBuilder(env)
java_class_dir = SCons.Tool.CreateJavaClassDirBuilder(env)
java_class.add_emitter(None, emit_java_classes)
java_class.add_emitter(env.subst('$JAVASUFFIX'), emit_java_classes)
java_class_dir.emitter = emit_java_classes
env.AddMethod(Java)
env['JAVAC'] = 'javac'
env['JAVACFLAGS'] = SCons.Util.CLVar('')
env['JAVABOOTCLASSPATH'] = []
env['JAVACLASSPATH'] = []
env['JAVASOURCEPATH'] = []
env['_javapathopt'] = pathopt
env['_JAVABOOTCLASSPATH'] = '${_javapathopt("-bootclasspath", "JAVABOOTCLASSPATH")} '
env['_JAVACLASSPATH'] = '${_javapathopt("-classpath", "JAVACLASSPATH")} '
env['_JAVASOURCEPATH'] = '${_javapathopt("-sourcepath", "JAVASOURCEPATH", "_JAVASOURCEPATHDEFAULT")} '
env['_JAVASOURCEPATHDEFAULT'] = '${TARGET.attributes.java_sourcedir}'
env['_JAVACCOM'] = '$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET.attributes.java_classdir} $_JAVASOURCEPATH $SOURCES'
env['JAVACCOM'] = "${TEMPFILE('$_JAVACCOM')}"
env['JAVACLASSSUFFIX'] = '.class'
env['JAVASUFFIX'] = '.java'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
Codefans-fan/odoo
|
addons/marketing_campaign/marketing_campaign.py
|
286
|
41776
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
import itertools
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from traceback import format_exception
from sys import exc_info
from openerp.tools.safe_eval import safe_eval as eval
import re
from openerp.addons.decimal_precision import decimal_precision as dp
from openerp import api
from openerp.osv import fields, osv
from openerp.report import render_report
from openerp.tools.translate import _
_intervalTypes = {
'hours': lambda interval: relativedelta(hours=interval),
'days': lambda interval: relativedelta(days=interval),
'months': lambda interval: relativedelta(months=interval),
'years': lambda interval: relativedelta(years=interval),
}
DT_FMT = '%Y-%m-%d %H:%M:%S'
class marketing_campaign(osv.osv):
_name = "marketing.campaign"
_description = "Marketing Campaign"
def _count_segments(self, cr, uid, ids, field_name, arg, context=None):
res = {}
try:
for segments in self.browse(cr, uid, ids, context=context):
res[segments.id] = len(segments.segment_ids)
except:
pass
return res
_columns = {
'name': fields.char('Name', required=True),
'object_id': fields.many2one('ir.model', 'Resource', required=True,
help="Choose the resource on which you want \
this campaign to be run"),
'partner_field_id': fields.many2one('ir.model.fields', 'Partner Field',
domain="[('model_id', '=', object_id), ('ttype', '=', 'many2one'), ('relation', '=', 'res.partner')]",
help="The generated workitems will be linked to the partner related to the record. "\
"If the record is the partner itself leave this field empty. "\
"This is useful for reporting purposes, via the Campaign Analysis or Campaign Follow-up views."),
'unique_field_id': fields.many2one('ir.model.fields', 'Unique Field',
domain="[('model_id', '=', object_id), ('ttype', 'in', ['char','int','many2one','text','selection'])]",
help='If set, this field will help segments that work in "no duplicates" mode to avoid '\
'selecting similar records twice. Similar records are records that have the same value for '\
'this unique field. For example by choosing the "email_from" field for CRM Leads you would prevent '\
'sending the same campaign to the same email address again. If not set, the "no duplicates" segments '\
"will only avoid selecting the same record again if it entered the campaign previously. "\
"Only easily comparable fields like textfields, integers, selections or single relationships may be used."),
'mode': fields.selection([('test', 'Test Directly'),
('test_realtime', 'Test in Realtime'),
('manual', 'With Manual Confirmation'),
('active', 'Normal')],
'Mode', required=True, help= \
"""Test - It creates and process all the activities directly (without waiting for the delay on transitions) but does not send emails or produce reports.
Test in Realtime - It creates and processes all the activities directly but does not send emails or produce reports.
With Manual Confirmation - the campaigns runs normally, but the user has to validate all workitem manually.
Normal - the campaign runs normally and automatically sends all emails and reports (be very careful with this mode, you're live!)"""),
'state': fields.selection([('draft', 'New'),
('running', 'Running'),
('cancelled', 'Cancelled'),
('done', 'Done')],
'Status', copy=False),
'activity_ids': fields.one2many('marketing.campaign.activity',
'campaign_id', 'Activities'),
'fixed_cost': fields.float('Fixed Cost', help="Fixed cost for running this campaign. You may also specify variable cost and revenue on each campaign activity. Cost and Revenue statistics are included in Campaign Reporting.", digits_compute=dp.get_precision('Product Price')),
'segment_ids': fields.one2many('marketing.campaign.segment', 'campaign_id', 'Segments', readonly=False),
'segments_count': fields.function(_count_segments, type='integer', string='Segments')
}
_defaults = {
'state': lambda *a: 'draft',
'mode': lambda *a: 'test',
}
def state_running_set(self, cr, uid, ids, *args):
# TODO check that all subcampaigns are running
campaign = self.browse(cr, uid, ids[0])
if not campaign.activity_ids:
raise osv.except_osv(_("Error"), _("The campaign cannot be started. There are no activities in it."))
has_start = False
has_signal_without_from = False
for activity in campaign.activity_ids:
if activity.start:
has_start = True
if activity.signal and len(activity.from_ids) == 0:
has_signal_without_from = True
if not has_start and not has_signal_without_from:
raise osv.except_osv(_("Error"), _("The campaign cannot be started. It does not have any starting activity. Modify campaign's activities to mark one as the starting point."))
return self.write(cr, uid, ids, {'state': 'running'})
def state_done_set(self, cr, uid, ids, *args):
# TODO check that this campaign is not a subcampaign in running mode.
segment_ids = self.pool.get('marketing.campaign.segment').search(cr, uid,
[('campaign_id', 'in', ids),
('state', '=', 'running')])
if segment_ids :
raise osv.except_osv(_("Error"), _("The campaign cannot be marked as done before all segments are closed."))
self.write(cr, uid, ids, {'state': 'done'})
return True
def state_cancel_set(self, cr, uid, ids, *args):
# TODO check that this campaign is not a subcampaign in running mode.
self.write(cr, uid, ids, {'state': 'cancelled'})
return True
# dead code
def signal(self, cr, uid, model, res_id, signal, run_existing=True, context=None):
record = self.pool[model].browse(cr, uid, res_id, context)
return self._signal(cr, uid, record, signal, run_existing, context)
#dead code
def _signal(self, cr, uid, record, signal, run_existing=True, context=None):
if not signal:
raise ValueError('Signal cannot be False.')
Workitems = self.pool.get('marketing.campaign.workitem')
domain = [('object_id.model', '=', record._name),
('state', '=', 'running')]
campaign_ids = self.search(cr, uid, domain, context=context)
for campaign in self.browse(cr, uid, campaign_ids, context=context):
for activity in campaign.activity_ids:
if activity.signal != signal:
continue
data = dict(activity_id=activity.id,
res_id=record.id,
state='todo')
wi_domain = [(k, '=', v) for k, v in data.items()]
wi_ids = Workitems.search(cr, uid, wi_domain, context=context)
if wi_ids:
if not run_existing:
continue
else:
partner = self._get_partner_for(campaign, record)
if partner:
data['partner_id'] = partner.id
wi_id = Workitems.create(cr, uid, data, context=context)
wi_ids = [wi_id]
Workitems.process(cr, uid, wi_ids, context=context)
return True
def _get_partner_for(self, campaign, record):
partner_field = campaign.partner_field_id.name
if partner_field:
return getattr(record, partner_field)
elif campaign.object_id.model == 'res.partner':
return record
return None
# prevent duplication until the server properly duplicates several levels of nested o2m
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_("Operation not supported"), _("You cannot duplicate a campaign, Not supported yet."))
def _find_duplicate_workitems(self, cr, uid, record, campaign_rec, context=None):
"""Finds possible duplicates workitems for a record in this campaign, based on a uniqueness
field.
:param record: browse_record to find duplicates workitems for.
:param campaign_rec: browse_record of campaign
"""
Workitems = self.pool.get('marketing.campaign.workitem')
duplicate_workitem_domain = [('res_id','=', record.id),
('campaign_id','=', campaign_rec.id)]
unique_field = campaign_rec.unique_field_id
if unique_field:
unique_value = getattr(record, unique_field.name, None)
if unique_value:
if unique_field.ttype == 'many2one':
unique_value = unique_value.id
similar_res_ids = self.pool[campaign_rec.object_id.model].search(cr, uid,
[(unique_field.name, '=', unique_value)], context=context)
if similar_res_ids:
duplicate_workitem_domain = [('res_id','in', similar_res_ids),
('campaign_id','=', campaign_rec.id)]
return Workitems.search(cr, uid, duplicate_workitem_domain, context=context)
class marketing_campaign_segment(osv.osv):
_name = "marketing.campaign.segment"
_description = "Campaign Segment"
_order = "name"
def _get_next_sync(self, cr, uid, ids, fn, args, context=None):
# next auto sync date is same for all segments
sync_job = self.pool.get('ir.model.data').get_object(cr, uid, 'marketing_campaign', 'ir_cron_marketing_campaign_every_day', context=context)
next_sync = sync_job and sync_job.nextcall or False
return dict.fromkeys(ids, next_sync)
_columns = {
'name': fields.char('Name', required=True),
'campaign_id': fields.many2one('marketing.campaign', 'Campaign', required=True, select=1, ondelete="cascade"),
'object_id': fields.related('campaign_id','object_id', type='many2one', relation='ir.model', string='Resource'),
'ir_filter_id': fields.many2one('ir.filters', 'Filter', ondelete="restrict",
help="Filter to select the matching resource records that belong to this segment. "\
"New filters can be created and saved using the advanced search on the list view of the Resource. "\
"If no filter is set, all records are selected without filtering. "\
"The synchronization mode may also add a criterion to the filter."),
'sync_last_date': fields.datetime('Last Synchronization', help="Date on which this segment was synchronized last time (automatically or manually)"),
'sync_mode': fields.selection([('create_date', 'Only records created after last sync'),
('write_date', 'Only records modified after last sync (no duplicates)'),
('all', 'All records (no duplicates)')],
'Synchronization mode',
help="Determines an additional criterion to add to the filter when selecting new records to inject in the campaign. "\
'"No duplicates" prevents selecting records which have already entered the campaign previously.'\
'If the campaign has a "unique field" set, "no duplicates" will also prevent selecting records which have '\
'the same value for the unique field as other records that already entered the campaign.'),
'state': fields.selection([('draft', 'New'),
('cancelled', 'Cancelled'),
('running', 'Running'),
('done', 'Done')],
'Status', copy=False),
'date_run': fields.datetime('Launch Date', help="Initial start date of this segment."),
'date_done': fields.datetime('End Date', help="Date this segment was last closed or cancelled."),
'date_next_sync': fields.function(_get_next_sync, string='Next Synchronization', type='datetime', help="Next time the synchronization job is scheduled to run automatically"),
}
_defaults = {
'state': lambda *a: 'draft',
'sync_mode': lambda *a: 'create_date',
}
def _check_model(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if not obj.ir_filter_id:
return True
if obj.campaign_id.object_id.model != obj.ir_filter_id.model_id:
return False
return True
_constraints = [
(_check_model, 'Model of filter must be same as resource model of Campaign ', ['ir_filter_id,campaign_id']),
]
def onchange_campaign_id(self, cr, uid, ids, campaign_id):
res = {'domain':{'ir_filter_id':[]}}
campaign_pool = self.pool.get('marketing.campaign')
if campaign_id:
campaign = campaign_pool.browse(cr, uid, campaign_id)
model_name = self.pool.get('ir.model').read(cr, uid, [campaign.object_id.id], ['model'])
if model_name:
mod_name = model_name[0]['model']
res['domain'] = {'ir_filter_id': [('model_id', '=', mod_name)]}
else:
res['value'] = {'ir_filter_id': False}
return res
def state_running_set(self, cr, uid, ids, *args):
segment = self.browse(cr, uid, ids[0])
vals = {'state': 'running'}
if not segment.date_run:
vals['date_run'] = time.strftime('%Y-%m-%d %H:%M:%S')
self.write(cr, uid, ids, vals)
return True
def state_done_set(self, cr, uid, ids, *args):
wi_ids = self.pool.get("marketing.campaign.workitem").search(cr, uid,
[('state', '=', 'todo'), ('segment_id', 'in', ids)])
self.pool.get("marketing.campaign.workitem").write(cr, uid, wi_ids, {'state':'cancelled'})
self.write(cr, uid, ids, {'state': 'done','date_done': time.strftime('%Y-%m-%d %H:%M:%S')})
return True
def state_cancel_set(self, cr, uid, ids, *args):
wi_ids = self.pool.get("marketing.campaign.workitem").search(cr, uid,
[('state', '=', 'todo'), ('segment_id', 'in', ids)])
self.pool.get("marketing.campaign.workitem").write(cr, uid, wi_ids, {'state':'cancelled'})
self.write(cr, uid, ids, {'state': 'cancelled','date_done': time.strftime('%Y-%m-%d %H:%M:%S')})
return True
def synchroniz(self, cr, uid, ids, *args):
self.process_segment(cr, uid, ids)
return True
@api.cr_uid_ids_context
def process_segment(self, cr, uid, segment_ids=None, context=None):
Workitems = self.pool.get('marketing.campaign.workitem')
Campaigns = self.pool.get('marketing.campaign')
if not segment_ids:
segment_ids = self.search(cr, uid, [('state', '=', 'running')], context=context)
action_date = time.strftime('%Y-%m-%d %H:%M:%S')
campaigns = set()
for segment in self.browse(cr, uid, segment_ids, context=context):
if segment.campaign_id.state != 'running':
continue
campaigns.add(segment.campaign_id.id)
act_ids = self.pool.get('marketing.campaign.activity').search(cr,
uid, [('start', '=', True), ('campaign_id', '=', segment.campaign_id.id)], context=context)
model_obj = self.pool[segment.object_id.model]
criteria = []
if segment.sync_last_date and segment.sync_mode != 'all':
criteria += [(segment.sync_mode, '>', segment.sync_last_date)]
if segment.ir_filter_id:
criteria += eval(segment.ir_filter_id.domain)
object_ids = model_obj.search(cr, uid, criteria, context=context)
# XXX TODO: rewrite this loop more efficiently without doing 1 search per record!
for record in model_obj.browse(cr, uid, object_ids, context=context):
# avoid duplicate workitem for the same resource
if segment.sync_mode in ('write_date','all'):
if Campaigns._find_duplicate_workitems(cr, uid, record, segment.campaign_id, context=context):
continue
wi_vals = {
'segment_id': segment.id,
'date': action_date,
'state': 'todo',
'res_id': record.id
}
partner = self.pool.get('marketing.campaign')._get_partner_for(segment.campaign_id, record)
if partner:
wi_vals['partner_id'] = partner.id
for act_id in act_ids:
wi_vals['activity_id'] = act_id
Workitems.create(cr, uid, wi_vals, context=context)
self.write(cr, uid, segment.id, {'sync_last_date':action_date}, context=context)
Workitems.process_all(cr, uid, list(campaigns), context=context)
return True
class marketing_campaign_activity(osv.osv):
_name = "marketing.campaign.activity"
_order = "name"
_description = "Campaign Activity"
_action_types = [
('email', 'Email'),
('report', 'Report'),
('action', 'Custom Action'),
# TODO implement the subcampaigns.
# TODO implement the subcampaign out. disallow out transitions from
# subcampaign activities ?
#('subcampaign', 'Sub-Campaign'),
]
_columns = {
'name': fields.char('Name', required=True),
'campaign_id': fields.many2one('marketing.campaign', 'Campaign',
required = True, ondelete='cascade', select=1),
'object_id': fields.related('campaign_id','object_id',
type='many2one', relation='ir.model',
string='Object', readonly=True),
'start': fields.boolean('Start', help= "This activity is launched when the campaign starts.", select=True),
'condition': fields.text('Condition', size=256, required=True,
help="Python expression to decide whether the activity can be executed, otherwise it will be deleted or cancelled."
"The expression may use the following [browsable] variables:\n"
" - activity: the campaign activity\n"
" - workitem: the campaign workitem\n"
" - resource: the resource object this campaign item represents\n"
" - transitions: list of campaign transitions outgoing from this activity\n"
"...- re: Python regular expression module"),
'type': fields.selection(_action_types, 'Type', required=True,
help="""The type of action to execute when an item enters this activity, such as:
- Email: send an email using a predefined email template
- Report: print an existing Report defined on the resource item and save it into a specific directory
- Custom Action: execute a predefined action, e.g. to modify the fields of the resource record
"""),
'email_template_id': fields.many2one('email.template', "Email Template", help='The email to send when this activity is activated'),
'report_id': fields.many2one('ir.actions.report.xml', "Report", help='The report to generate when this activity is activated', ),
'report_directory_id': fields.many2one('document.directory','Directory',
help="This folder is used to store the generated reports"),
'server_action_id': fields.many2one('ir.actions.server', string='Action',
help= "The action to perform when this activity is activated"),
'to_ids': fields.one2many('marketing.campaign.transition',
'activity_from_id',
'Next Activities'),
'from_ids': fields.one2many('marketing.campaign.transition',
'activity_to_id',
'Previous Activities'),
'variable_cost': fields.float('Variable Cost', help="Set a variable cost if you consider that every campaign item that has reached this point has entailed a certain cost. You can get cost statistics in the Reporting section", digits_compute=dp.get_precision('Product Price')),
'revenue': fields.float('Revenue', help="Set an expected revenue if you consider that every campaign item that has reached this point has generated a certain revenue. You can get revenue statistics in the Reporting section", digits_compute=dp.get_precision('Account')),
'signal': fields.char('Signal',
help='An activity with a signal can be called programmatically. Be careful, the workitem is always created when a signal is sent'),
'keep_if_condition_not_met': fields.boolean("Don't Delete Workitems",
help="By activating this option, workitems that aren't executed because the condition is not met are marked as cancelled instead of being deleted.")
}
_defaults = {
'type': lambda *a: 'email',
'condition': lambda *a: 'True',
}
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
if context == None:
context = {}
if 'segment_id' in context and context['segment_id']:
segment_obj = self.pool.get('marketing.campaign.segment').browse(cr,
uid, context['segment_id'])
act_ids = []
for activity in segment_obj.campaign_id.activity_ids:
act_ids.append(activity.id)
return act_ids
return super(marketing_campaign_activity, self).search(cr, uid, args,
offset, limit, order, context, count)
#dead code
def _process_wi_report(self, cr, uid, activity, workitem, context=None):
report_data, format = render_report(cr, uid, [], activity.report_id.report_name, {}, context=context)
attach_vals = {
'name': '%s_%s_%s'%(activity.report_id.report_name,
activity.name,workitem.partner_id.name),
'datas_fname': '%s.%s'%(activity.report_id.report_name,
activity.report_id.report_type),
'parent_id': activity.report_directory_id.id,
'datas': base64.encodestring(report_data),
'file_type': format
}
self.pool.get('ir.attachment').create(cr, uid, attach_vals)
return True
def _process_wi_email(self, cr, uid, activity, workitem, context=None):
return self.pool.get('email.template').send_mail(cr, uid,
activity.email_template_id.id,
workitem.res_id, context=context)
#dead code
def _process_wi_action(self, cr, uid, activity, workitem, context=None):
if context is None:
context = {}
server_obj = self.pool.get('ir.actions.server')
action_context = dict(context,
active_id=workitem.res_id,
active_ids=[workitem.res_id],
active_model=workitem.object_id.model,
workitem=workitem)
server_obj.run(cr, uid, [activity.server_action_id.id],
context=action_context)
return True
def process(self, cr, uid, act_id, wi_id, context=None):
activity = self.browse(cr, uid, act_id, context=context)
method = '_process_wi_%s' % (activity.type,)
action = getattr(self, method, None)
if not action:
raise NotImplementedError('Method %r is not implemented on %r object.' % (method, self))
workitem_obj = self.pool.get('marketing.campaign.workitem')
workitem = workitem_obj.browse(cr, uid, wi_id, context=context)
return action(cr, uid, activity, workitem, context=context)
class marketing_campaign_transition(osv.osv):
_name = "marketing.campaign.transition"
_description = "Campaign Transition"
_interval_units = [
('hours', 'Hour(s)'),
('days', 'Day(s)'),
('months', 'Month(s)'),
('years', 'Year(s)'),
]
def _get_name(self, cr, uid, ids, fn, args, context=None):
# name formatters that depend on trigger
formatters = {
'auto': _('Automatic transition'),
'time': _('After %(interval_nbr)d %(interval_type)s'),
'cosmetic': _('Cosmetic'),
}
# get the translations of the values of selection field 'interval_type'
fields = self.fields_get(cr, uid, ['interval_type'], context=context)
interval_type_selection = dict(fields['interval_type']['selection'])
result = dict.fromkeys(ids, False)
for trans in self.browse(cr, uid, ids, context=context):
values = {
'interval_nbr': trans.interval_nbr,
'interval_type': interval_type_selection.get(trans.interval_type, ''),
}
result[trans.id] = formatters[trans.trigger] % values
return result
def _delta(self, cr, uid, ids, context=None):
assert len(ids) == 1
transition = self.browse(cr, uid, ids[0], context=context)
if transition.trigger != 'time':
raise ValueError('Delta is only relevant for timed transition.')
return relativedelta(**{str(transition.interval_type): transition.interval_nbr})
_columns = {
'name': fields.function(_get_name, string='Name',
type='char', size=128),
'activity_from_id': fields.many2one('marketing.campaign.activity',
'Previous Activity', select=1,
required=True, ondelete="cascade"),
'activity_to_id': fields.many2one('marketing.campaign.activity',
'Next Activity',
required=True, ondelete="cascade"),
'interval_nbr': fields.integer('Interval Value', required=True),
'interval_type': fields.selection(_interval_units, 'Interval Unit',
required=True),
'trigger': fields.selection([('auto', 'Automatic'),
('time', 'Time'),
('cosmetic', 'Cosmetic'), # fake plastic transition
],
'Trigger', required=True,
help="How is the destination workitem triggered"),
}
_defaults = {
'interval_nbr': 1,
'interval_type': 'days',
'trigger': 'time',
}
def _check_campaign(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.activity_from_id.campaign_id != obj.activity_to_id.campaign_id:
return False
return True
_constraints = [
(_check_campaign, 'The To/From Activity of transition must be of the same Campaign ', ['activity_from_id,activity_to_id']),
]
_sql_constraints = [
('interval_positive', 'CHECK(interval_nbr >= 0)', 'The interval must be positive or zero')
]
class marketing_campaign_workitem(osv.osv):
_name = "marketing.campaign.workitem"
_description = "Campaign Workitem"
def _res_name_get(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '/')
for wi in self.browse(cr, uid, ids, context=context):
if not wi.res_id:
continue
proxy = self.pool[wi.object_id.model]
if not proxy.exists(cr, uid, [wi.res_id]):
continue
ng = proxy.name_get(cr, uid, [wi.res_id], context=context)
if ng:
res[wi.id] = ng[0][1]
return res
def _resource_search(self, cr, uid, obj, name, args, domain=None, context=None):
"""Returns id of workitem whose resource_name matches with the given name"""
if not len(args):
return []
condition_name = None
for domain_item in args:
# we only use the first domain criterion and ignore all the rest including operators
if isinstance(domain_item, (list,tuple)) and len(domain_item) == 3 and domain_item[0] == 'res_name':
condition_name = [None, domain_item[1], domain_item[2]]
break
assert condition_name, "Invalid search domain for marketing_campaign_workitem.res_name. It should use 'res_name'"
cr.execute("""select w.id, w.res_id, m.model \
from marketing_campaign_workitem w \
left join marketing_campaign_activity a on (a.id=w.activity_id)\
left join marketing_campaign c on (c.id=a.campaign_id)\
left join ir_model m on (m.id=c.object_id)
""")
res = cr.fetchall()
workitem_map = {}
matching_workitems = []
for id, res_id, model in res:
workitem_map.setdefault(model,{}).setdefault(res_id,set()).add(id)
for model, id_map in workitem_map.iteritems():
model_pool = self.pool[model]
condition_name[0] = model_pool._rec_name
condition = [('id', 'in', id_map.keys()), condition_name]
for res_id in model_pool.search(cr, uid, condition, context=context):
matching_workitems.extend(id_map[res_id])
return [('id', 'in', list(set(matching_workitems)))]
_columns = {
'segment_id': fields.many2one('marketing.campaign.segment', 'Segment', readonly=True),
'activity_id': fields.many2one('marketing.campaign.activity','Activity',
required=True, readonly=True),
'campaign_id': fields.related('activity_id', 'campaign_id',
type='many2one', relation='marketing.campaign', string='Campaign', readonly=True, store=True),
'object_id': fields.related('activity_id', 'campaign_id', 'object_id',
type='many2one', relation='ir.model', string='Resource', select=1, readonly=True, store=True),
'res_id': fields.integer('Resource ID', select=1, readonly=True),
'res_name': fields.function(_res_name_get, string='Resource Name', fnct_search=_resource_search, type="char", size=64),
'date': fields.datetime('Execution Date', help='If date is not set, this workitem has to be run manually', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, readonly=True),
'state': fields.selection([ ('todo', 'To Do'),
('cancelled', 'Cancelled'),
('exception', 'Exception'),
('done', 'Done'),
], 'Status', readonly=True, copy=False),
'error_msg' : fields.text('Error Message', readonly=True)
}
_defaults = {
'state': lambda *a: 'todo',
'date': False,
}
@api.cr_uid_ids_context
def button_draft(self, cr, uid, workitem_ids, context=None):
for wi in self.browse(cr, uid, workitem_ids, context=context):
if wi.state in ('exception', 'cancelled'):
self.write(cr, uid, [wi.id], {'state':'todo'}, context=context)
return True
@api.cr_uid_ids_context
def button_cancel(self, cr, uid, workitem_ids, context=None):
for wi in self.browse(cr, uid, workitem_ids, context=context):
if wi.state in ('todo','exception'):
self.write(cr, uid, [wi.id], {'state':'cancelled'}, context=context)
return True
def _process_one(self, cr, uid, workitem, context=None):
if workitem.state != 'todo':
return False
activity = workitem.activity_id
proxy = self.pool[workitem.object_id.model]
object_id = proxy.browse(cr, uid, workitem.res_id, context=context)
eval_context = {
'activity': activity,
'workitem': workitem,
'object': object_id,
'resource': object_id,
'transitions': activity.to_ids,
're': re,
}
try:
condition = activity.condition
campaign_mode = workitem.campaign_id.mode
if condition:
if not eval(condition, eval_context):
if activity.keep_if_condition_not_met:
workitem.write({'state': 'cancelled'})
else:
workitem.unlink()
return
result = True
if campaign_mode in ('manual', 'active'):
Activities = self.pool.get('marketing.campaign.activity')
result = Activities.process(cr, uid, activity.id, workitem.id,
context=context)
values = dict(state='done')
if not workitem.date:
values['date'] = datetime.now().strftime(DT_FMT)
workitem.write(values)
if result:
# process _chain
workitem.refresh() # reload
date = datetime.strptime(workitem.date, DT_FMT)
for transition in activity.to_ids:
if transition.trigger == 'cosmetic':
continue
launch_date = False
if transition.trigger == 'auto':
launch_date = date
elif transition.trigger == 'time':
launch_date = date + transition._delta()
if launch_date:
launch_date = launch_date.strftime(DT_FMT)
values = {
'date': launch_date,
'segment_id': workitem.segment_id.id,
'activity_id': transition.activity_to_id.id,
'partner_id': workitem.partner_id.id,
'res_id': workitem.res_id,
'state': 'todo',
}
wi_id = self.create(cr, uid, values, context=context)
# Now, depending on the trigger and the campaign mode
# we know whether we must run the newly created workitem.
#
# rows = transition trigger \ colums = campaign mode
#
# test test_realtime manual normal (active)
# time Y N N N
# cosmetic N N N N
# auto Y Y N Y
#
run = (transition.trigger == 'auto' \
and campaign_mode != 'manual') \
or (transition.trigger == 'time' \
and campaign_mode == 'test')
if run:
new_wi = self.browse(cr, uid, wi_id, context)
self._process_one(cr, uid, new_wi, context)
except Exception:
tb = "".join(format_exception(*exc_info()))
workitem.write({'state': 'exception', 'error_msg': tb})
@api.cr_uid_ids_context
def process(self, cr, uid, workitem_ids, context=None):
for wi in self.browse(cr, uid, workitem_ids, context=context):
self._process_one(cr, uid, wi, context=context)
return True
def process_all(self, cr, uid, camp_ids=None, context=None):
camp_obj = self.pool.get('marketing.campaign')
if camp_ids is None:
camp_ids = camp_obj.search(cr, uid, [('state','=','running')], context=context)
for camp in camp_obj.browse(cr, uid, camp_ids, context=context):
if camp.mode == 'manual':
# manual states are not processed automatically
continue
while True:
domain = [('campaign_id', '=', camp.id), ('state', '=', 'todo'), ('date', '!=', False)]
if camp.mode in ('test_realtime', 'active'):
domain += [('date','<=', time.strftime('%Y-%m-%d %H:%M:%S'))]
workitem_ids = self.search(cr, uid, domain, context=context)
if not workitem_ids:
break
self.process(cr, uid, workitem_ids, context=context)
return True
def preview(self, cr, uid, ids, context=None):
res = {}
wi_obj = self.browse(cr, uid, ids[0], context=context)
if wi_obj.activity_id.type == 'email':
view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'email_template', 'email_template_preview_form')
res = {
'name': _('Email Preview'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'email_template.preview',
'view_id': False,
'context': context,
'views': [(view_id and view_id[1] or 0, 'form')],
'type': 'ir.actions.act_window',
'target': 'new',
'nodestroy':True,
'context': "{'template_id':%d,'default_res_id':%d}"%
(wi_obj.activity_id.email_template_id.id,
wi_obj.res_id)
}
elif wi_obj.activity_id.type == 'report':
datas = {
'ids': [wi_obj.res_id],
'model': wi_obj.object_id.model
}
res = {
'type' : 'ir.actions.report.xml',
'report_name': wi_obj.activity_id.report_id.report_name,
'datas' : datas,
}
else:
raise osv.except_osv(_('No preview'),_('The current step for this item has no email or report to preview.'))
return res
class email_template(osv.osv):
_inherit = "email.template"
_defaults = {
'model_id': lambda obj, cr, uid, context: context.get('object_id',False),
}
# TODO: add constraint to prevent disabling / disapproving an email account used in a running campaign
class report_xml(osv.osv):
_inherit = 'ir.actions.report.xml'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
object_id = context.get('object_id')
if object_id:
model = self.pool.get('ir.model').browse(cr, uid, object_id, context=context).model
args.append(('model', '=', model))
return super(report_xml, self).search(cr, uid, args, offset, limit, order, context, count)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
nickmain/pyswip
|
pyswip/core.py
|
8
|
32565
|
# -*- coding: utf-8 -*-
# pyswip -- Python SWI-Prolog bridge
# Copyright (c) 2007-2012 Yüce Tekol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import sys
import glob
import warnings
from subprocess import Popen, PIPE
from ctypes import *
from ctypes.util import find_library
# To initialize the SWI-Prolog environment, two things need to be done: the
# first is to find where the SO/DLL is located and the second is to find the
# SWI-Prolog home, to get the saved state.
#
# The goal of the (entangled) process below is to make the library installation
# independent.
def _findSwiplPathFromFindLib():
"""
This function resorts to ctype's find_library to find the path to the
DLL. The biggest problem is that find_library does not give the path to the
resource file.
:returns:
A path to the swipl SO/DLL or None if it is not found.
:returns type:
{str, None}
"""
path = (find_library('swipl') or
find_library('pl') or
find_library('libswipl')) # This last one is for Windows
return path
def _findSwiplFromExec():
"""
This function tries to use an executable on the path to find SWI-Prolog
SO/DLL and the resource file.
:returns:
A tuple of (path to the swipl DLL, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
platform = sys.platform[:3]
fullName = None
swiHome = None
try: # try to get library path from swipl executable.
# We may have pl or swipl as the executable
try:
cmd = Popen(['swipl', '-dump-runtime-variables'], stdout=PIPE)
except OSError:
cmd = Popen(['pl', '-dump-runtime-variables'], stdout=PIPE)
ret = cmd.communicate()
# Parse the output into a dictionary
ret = ret[0].replace(';', '').splitlines()
ret = [line.split('=', 1) for line in ret]
rtvars = dict((name, value[1:-1]) for name, value in ret) # [1:-1] gets
# rid of the
# quotes
if rtvars['PLSHARED'] == 'no':
raise ImportError('SWI-Prolog is not installed as a shared '
'library.')
else: # PLSHARED == 'yes'
swiHome = rtvars['PLBASE'] # The environment is in PLBASE
if not os.path.exists(swiHome):
swiHome = None
# determine platform specific path
if platform == "win":
dllName = rtvars['PLLIB'][:-4] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'bin')
fullName = os.path.join(path, dllName)
if not os.path.exists(fullName):
fullName = None
elif platform == "cyg":
# e.g. /usr/lib/pl-5.6.36/bin/i686-cygwin/cygpl.dll
dllName = 'cygpl.dll'
path = os.path.join(rtvars['PLBASE'], 'bin', rtvars['PLARCH'])
fullName = os.path.join(path, dllName)
if not os.path.exists(fullName):
fullName = None
elif platform == "dar":
dllName = 'lib' + rtvars['PLLIB'][2:] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'lib', rtvars['PLARCH'])
baseName = os.path.join(path, dllName)
if os.path.exists(baseName):
fullName = baseName
else: # We will search for versions
fullName = None
else: # assume UNIX-like
# The SO name in some linuxes is of the form libswipl.so.5.10.2,
# so we have to use glob to find the correct one
dllName = 'lib' + rtvars['PLLIB'][2:] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'lib', rtvars['PLARCH'])
baseName = os.path.join(path, dllName)
if os.path.exists(baseName):
fullName = baseName
else: # We will search for versions
pattern = baseName + '.*'
files = glob.glob(pattern)
if len(files) == 0:
fullName = None
elif len(files) == 1:
fullName = files[0]
else: # Will this ever happen?
fullName = None
except (OSError, KeyError): # KeyError from accessing rtvars
pass
return (fullName, swiHome)
def _findSwiplWin():
"""
This function uses several heuristics to gues where SWI-Prolog is installed
in Windows. It always returns None as the path of the resource file because,
in Windows, the way to find it is more robust so the SWI-Prolog DLL is
always able to find it.
:returns:
A tuple of (path to the swipl DLL, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
dllNames = ('swipl.dll', 'libswipl.dll')
# First try: check the usual installation path (this is faster but
# hardcoded)
programFiles = os.getenv('ProgramFiles')
paths = [os.path.join(programFiles, r'pl\bin', dllName)
for dllName in dllNames]
for path in paths:
if os.path.exists(path):
return (path, None)
# Second try: use the find_library
path = _findSwiplPathFromFindLib()
if path is not None and os.path.exists(path):
return (path, None)
# Third try: use reg.exe to find the installation path in the registry
# (reg should be installed in all Windows XPs)
try:
cmd = Popen(['reg', 'query',
r'HKEY_LOCAL_MACHINE\Software\SWI\Prolog',
'/v', 'home'], stdout=PIPE)
ret = cmd.communicate()
# Result is like:
# ! REG.EXE VERSION 3.0
#
# HKEY_LOCAL_MACHINE\Software\SWI\Prolog
# home REG_SZ C:\Program Files\pl
# (Note: spaces may be \t or spaces in the output)
ret = ret[0].splitlines()
ret = [line for line in ret if len(line) > 0]
pattern = re.compile('[^h]*home[^R]*REG_SZ( |\t)*(.*)$')
match = pattern.match(ret[-1])
if match is not None:
path = match.group(2)
paths = [os.path.join(path, 'bin', dllName)
for dllName in dllNames]
for path in paths:
if os.path.exists(path):
return (path, None)
except OSError:
# reg.exe not found? Weird...
pass
# May the exec is on path?
(path, swiHome) = _findSwiplFromExec()
if path is not None:
return (path, swiHome)
# Last try: maybe it is in the current dir
for dllName in dllNames:
if os.path.exists(dllName):
return (dllName, None)
return (None, None)
def _findSwiplLin():
"""
This function uses several heuristics to guess where SWI-Prolog is
installed in Linuxes.
:returns:
A tuple of (path to the swipl so, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
# Maybe the exec is on path?
(path, swiHome) = _findSwiplFromExec()
if path is not None:
return (path, swiHome)
# If it is not, use find_library
path = _findSwiplPathFromFindLib()
if path is not None:
return (path, swiHome)
# Our last try: some hardcoded paths.
paths = ['/lib', '/usr/lib', '/usr/local/lib', '.', './lib']
names = ['libswipl.so', 'libpl.so']
path = None
for name in names:
for try_ in paths:
try_ = os.path.join(try_, name)
if os.path.exists(try_):
path = try_
break
if path is not None:
return (path, swiHome)
return (None, None)
def _findSwiplDar():
"""
This function uses several heuristics to guess where SWI-Prolog is
installed in MacOS.
:returns:
A tuple of (path to the swipl so, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
# If the exec is in path
(path, swiHome) = _findSwiplFromExec()
if path is not None:
return (path, swiHome)
# If it is not, use find_library
path = _findSwiplPathFromFindLib()
if path is not None:
return (path, swiHome)
# Last guess, searching for the file
paths = ['.', './lib', '/usr/lib/', '/usr/local/lib', '/opt/local/lib']
names = ['libswipl.dylib', 'libpl.dylib']
for name in names:
for path in paths:
path = os.path.join(path, name)
if os.path.exists(path):
return (path, None)
return (None, None)
def _findSwipl():
"""
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
"""
# Now begins the guesswork
platform = sys.platform[:3]
if platform == "win": # In Windows, we have the default installer
# path and the registry to look
(path, swiHome) = _findSwiplWin()
elif platform in ("lin", "cyg"):
(path, swiHome) = _findSwiplLin()
elif platform == "dar": # Help with MacOS is welcome!!
(path, swiHome) = _findSwiplDar()
else:
raise EnvironmentError('The platform %s is not supported by this '
'library. If you want it to be supported, '
'please open an issue.' % platform)
# This is a catch all raise
if path is None:
raise ImportError('Could not find the SWI-Prolog library in this '
'platform. If you are sure it is installed, please '
'open an issue.')
else:
return (path, swiHome)
def _fixWindowsPath(dll):
"""
When the path to the DLL is not in Windows search path, Windows will not be
able to find other DLLs on the same directory, so we have to add it to the
path. This function takes care of it.
:parameters:
- `dll` (str) - File name of the DLL
"""
if sys.platform[:3] != 'win':
return # Nothing to do here
pathToDll = os.path.dirname(dll)
currentWindowsPath = os.getenv('PATH')
if pathToDll not in currentWindowsPath:
# We will prepend the path, to avoid conflicts between DLLs
newPath = pathToDll + ';' + currentWindowsPath
os.putenv('PATH', newPath)
# Find the path and resource file. SWI_HOME_DIR shall be treated as a constant
# by users of this module
(_path, SWI_HOME_DIR) = _findSwipl()
_fixWindowsPath(_path)
# Load the library
_lib = CDLL(_path)
# PySWIP constants
PYSWIP_MAXSTR = 1024
c_int_p = c_void_p
c_long_p = c_void_p
c_double_p = c_void_p
c_uint_p = c_void_p
# constants (from SWI-Prolog.h)
# PL_unify_term() arguments
PL_VARIABLE = 1 # nothing
PL_ATOM = 2 # const char
PL_INTEGER = 3 # int
PL_FLOAT = 4 # double
PL_STRING = 5 # const char *
PL_TERM = 6 #
# PL_unify_term()
PL_FUNCTOR = 10 # functor_t, arg ...
PL_LIST = 11 # length, arg ...
PL_CHARS = 12 # const char *
PL_POINTER = 13 # void *
# /* PlArg::PlArg(text, type) */
#define PL_CODE_LIST (14) /* [ascii...] */
#define PL_CHAR_LIST (15) /* [h,e,l,l,o] */
#define PL_BOOL (16) /* PL_set_feature() */
#define PL_FUNCTOR_CHARS (17) /* PL_unify_term() */
#define _PL_PREDICATE_INDICATOR (18) /* predicate_t (Procedure) */
#define PL_SHORT (19) /* short */
#define PL_INT (20) /* int */
#define PL_LONG (21) /* long */
#define PL_DOUBLE (22) /* double */
#define PL_NCHARS (23) /* unsigned, const char * */
#define PL_UTF8_CHARS (24) /* const char * */
#define PL_UTF8_STRING (25) /* const char * */
#define PL_INT64 (26) /* int64_t */
#define PL_NUTF8_CHARS (27) /* unsigned, const char * */
#define PL_NUTF8_CODES (29) /* unsigned, const char * */
#define PL_NUTF8_STRING (30) /* unsigned, const char * */
#define PL_NWCHARS (31) /* unsigned, const wchar_t * */
#define PL_NWCODES (32) /* unsigned, const wchar_t * */
#define PL_NWSTRING (33) /* unsigned, const wchar_t * */
#define PL_MBCHARS (34) /* const char * */
#define PL_MBCODES (35) /* const char * */
#define PL_MBSTRING (36) /* const char * */
# /********************************
# * NON-DETERMINISTIC CALL/RETURN *
# *********************************/
#
# Note 1: Non-deterministic foreign functions may also use the deterministic
# return methods PL_succeed and PL_fail.
#
# Note 2: The argument to PL_retry is a 30 bits signed integer (long).
PL_FIRST_CALL = 0
PL_CUTTED = 1
PL_REDO = 2
PL_FA_NOTRACE = 0x01 # foreign cannot be traced
PL_FA_TRANSPARENT = 0x02 # foreign is module transparent
PL_FA_NONDETERMINISTIC = 0x04 # foreign is non-deterministic
PL_FA_VARARGS = 0x08 # call using t0, ac, ctx
PL_FA_CREF = 0x10 # Internal: has clause-reference */
# /*******************************
# * CALL-BACK *
# *******************************/
PL_Q_DEBUG = 0x01 # = TRUE for backward compatibility
PL_Q_NORMAL = 0x02 # normal usage
PL_Q_NODEBUG = 0x04 # use this one
PL_Q_CATCH_EXCEPTION = 0x08 # handle exceptions in C
PL_Q_PASS_EXCEPTION = 0x10 # pass to parent environment
PL_Q_DETERMINISTIC = 0x20 # call was deterministic
# /*******************************
# * BLOBS *
# *******************************/
#define PL_BLOB_MAGIC_B 0x75293a00 /* Magic to validate a blob-type */
#define PL_BLOB_VERSION 1 /* Current version */
#define PL_BLOB_MAGIC (PL_BLOB_MAGIC_B|PL_BLOB_VERSION)
#define PL_BLOB_UNIQUE 0x01 /* Blob content is unique */
#define PL_BLOB_TEXT 0x02 /* blob contains text */
#define PL_BLOB_NOCOPY 0x04 /* do not copy the data */
#define PL_BLOB_WCHAR 0x08 /* wide character string */
# /*******************************
# * CHAR BUFFERS *
# *******************************/
CVT_ATOM = 0x0001
CVT_STRING = 0x0002
CVT_LIST = 0x0004
CVT_INTEGER = 0x0008
CVT_FLOAT = 0x0010
CVT_VARIABLE = 0x0020
CVT_NUMBER = CVT_INTEGER | CVT_FLOAT
CVT_ATOMIC = CVT_NUMBER | CVT_ATOM | CVT_STRING
CVT_WRITE = 0x0040 # as of version 3.2.10
CVT_ALL = CVT_ATOMIC | CVT_LIST
CVT_MASK = 0x00ff
BUF_DISCARDABLE = 0x0000
BUF_RING = 0x0100
BUF_MALLOC = 0x0200
CVT_EXCEPTION = 0x10000 # throw exception on error
argv = (c_char_p*(len(sys.argv) + 1))()
for i, arg in enumerate(sys.argv):
argv[i] = arg
argv[-1] = None
argc = len(sys.argv)
# /*******************************
# * TYPES *
# *******************************/
#
# typedef uintptr_t atom_t; /* Prolog atom */
# typedef uintptr_t functor_t; /* Name/arity pair */
# typedef void * module_t; /* Prolog module */
# typedef void * predicate_t; /* Prolog procedure */
# typedef void * record_t; /* Prolog recorded term */
# typedef uintptr_t term_t; /* opaque term handle */
# typedef uintptr_t qid_t; /* opaque query handle */
# typedef uintptr_t PL_fid_t; /* opaque foreign context handle */
# typedef void * control_t; /* non-deterministic control arg */
# typedef void * PL_engine_t; /* opaque engine handle */
# typedef uintptr_t PL_atomic_t; /* same a word */
# typedef uintptr_t foreign_t; /* return type of foreign functions */
# typedef wchar_t pl_wchar_t; /* Prolog wide character */
# typedef foreign_t (*pl_function_t)(); /* foreign language functions */
atom_t = c_uint_p
functor_t = c_uint_p
module_t = c_void_p
predicate_t = c_void_p
record_t = c_void_p
term_t = c_uint_p
qid_t = c_uint_p
PL_fid_t = c_uint_p
fid_t = c_uint_p
control_t = c_void_p
PL_engine_t = c_void_p
PL_atomic_t = c_uint_p
foreign_t = c_uint_p
pl_wchar_t = c_wchar
PL_initialise = _lib.PL_initialise
#PL_initialise.argtypes = [c_int, c_c??
PL_open_foreign_frame = _lib.PL_open_foreign_frame
PL_open_foreign_frame.restype = fid_t
PL_new_term_ref = _lib.PL_new_term_ref
PL_new_term_ref.restype = term_t
PL_new_term_refs = _lib.PL_new_term_refs
PL_new_term_refs.argtypes = [c_int]
PL_new_term_refs.restype = term_t
PL_chars_to_term = _lib.PL_chars_to_term
PL_chars_to_term.argtypes = [c_char_p, term_t]
PL_chars_to_term.restype = c_int
PL_call = _lib.PL_call
PL_call.argtypes = [term_t, module_t]
PL_call.restype = c_int
PL_call_predicate = _lib.PL_call_predicate
PL_call_predicate.argtypes = [module_t, c_int, predicate_t, term_t]
PL_call_predicate.restype = c_int
PL_discard_foreign_frame = _lib.PL_discard_foreign_frame
PL_discard_foreign_frame.argtypes = [fid_t]
PL_discard_foreign_frame.restype = None
PL_put_list_chars = _lib.PL_put_list_chars
PL_put_list_chars.argtypes = [term_t, c_char_p]
PL_put_list_chars.restype = c_int
#PL_EXPORT(void) PL_register_atom(atom_t a);
PL_register_atom = _lib.PL_register_atom
PL_register_atom.argtypes = [atom_t]
PL_register_atom.restype = None
#PL_EXPORT(void) PL_unregister_atom(atom_t a);
PL_unregister_atom = _lib.PL_unregister_atom
PL_unregister_atom.argtypes = [atom_t]
PL_unregister_atom.restype = None
#PL_EXPORT(atom_t) PL_functor_name(functor_t f);
PL_functor_name = _lib.PL_functor_name
PL_functor_name.argtypes = [functor_t]
PL_functor_name.restype = atom_t
#PL_EXPORT(int) PL_functor_arity(functor_t f);
PL_functor_arity = _lib.PL_functor_arity
PL_functor_arity.argtypes = [functor_t]
PL_functor_arity.restype = c_int
# /* Get C-values from Prolog terms */
#PL_EXPORT(int) PL_get_atom(term_t t, atom_t *a);
PL_get_atom = _lib.PL_get_atom
PL_get_atom.argtypes = [term_t, POINTER(atom_t)]
PL_get_atom.restype = c_int
#PL_EXPORT(int) PL_get_bool(term_t t, int *value);
PL_get_bool = _lib.PL_get_bool
PL_get_bool.argtypes = [term_t, POINTER(c_int)]
PL_get_bool.restype = c_int
#PL_EXPORT(int) PL_get_atom_chars(term_t t, char **a);
PL_get_atom_chars = _lib.PL_get_atom_chars # FIXME
PL_get_atom_chars.argtypes = [term_t, POINTER(c_char_p)]
PL_get_atom_chars.restype = c_int
##define PL_get_string_chars(t, s, l) PL_get_string(t,s,l)
# /* PL_get_string() is depricated */
#PL_EXPORT(int) PL_get_string(term_t t, char **s, size_t *len);
PL_get_string = _lib.PL_get_string
PL_get_string_chars = PL_get_string
#PL_get_string_chars.argtypes = [term_t, POINTER(c_char_p), c_int_p]
#PL_EXPORT(int) PL_get_chars(term_t t, char **s, unsigned int flags);
PL_get_chars = _lib.PL_get_chars # FIXME:
#PL_EXPORT(int) PL_get_list_chars(term_t l, char **s,
# unsigned int flags);
#PL_EXPORT(int) PL_get_atom_nchars(term_t t, size_t *len, char **a);
#PL_EXPORT(int) PL_get_list_nchars(term_t l,
# size_t *len, char **s,
# unsigned int flags);
#PL_EXPORT(int) PL_get_nchars(term_t t,
# size_t *len, char **s,
# unsigned int flags);
#PL_EXPORT(int) PL_get_integer(term_t t, int *i);
PL_get_integer = _lib.PL_get_integer
PL_get_integer.argtypes = [term_t, POINTER(c_int)]
PL_get_integer.restype = c_int
#PL_EXPORT(int) PL_get_long(term_t t, long *i);
PL_get_long = _lib.PL_get_long
PL_get_long.argtypes = [term_t, POINTER(c_long)]
PL_get_long.restype = c_int
#PL_EXPORT(int) PL_get_pointer(term_t t, void **ptr);
#PL_EXPORT(int) PL_get_float(term_t t, double *f);
PL_get_float = _lib.PL_get_float
PL_get_float.argtypes = [term_t, c_double_p]
PL_get_float.restype = c_int
#PL_EXPORT(int) PL_get_functor(term_t t, functor_t *f);
PL_get_functor = _lib.PL_get_functor
PL_get_functor.argtypes = [term_t, POINTER(functor_t)]
PL_get_functor.restype = c_int
#PL_EXPORT(int) PL_get_name_arity(term_t t, atom_t *name, int *arity);
PL_get_name_arity = _lib.PL_get_name_arity
PL_get_name_arity.argtypes = [term_t, POINTER(atom_t), POINTER(c_int)]
PL_get_name_arity.restype = c_int
#PL_EXPORT(int) PL_get_module(term_t t, module_t *module);
#PL_EXPORT(int) PL_get_arg(int index, term_t t, term_t a);
PL_get_arg = _lib.PL_get_arg
PL_get_arg.argtypes = [c_int, term_t, term_t]
PL_get_arg.restype = c_int
#PL_EXPORT(int) PL_get_list(term_t l, term_t h, term_t t);
#PL_EXPORT(int) PL_get_head(term_t l, term_t h);
PL_get_head = _lib.PL_get_head
PL_get_head.argtypes = [term_t, term_t]
PL_get_head.restype = c_int
#PL_EXPORT(int) PL_get_tail(term_t l, term_t t);
PL_get_tail = _lib.PL_get_tail
PL_get_tail.argtypes = [term_t, term_t]
PL_get_tail.restype = c_int
#PL_EXPORT(int) PL_get_nil(term_t l);
PL_get_nil = _lib.PL_get_nil
PL_get_nil.argtypes = [term_t]
PL_get_nil.restype = c_int
#PL_EXPORT(int) PL_get_term_value(term_t t, term_value_t *v);
#PL_EXPORT(char *) PL_quote(int chr, const char *data);
PL_put_atom_chars = _lib.PL_put_atom_chars
PL_put_atom_chars.argtypes = [term_t, c_char_p]
PL_put_atom_chars.restype = c_int
PL_atom_chars = _lib.PL_atom_chars
PL_atom_chars.argtypes = [atom_t]
PL_atom_chars.restype = c_char_p
PL_predicate = _lib.PL_predicate
PL_predicate.argtypes = [c_char_p, c_int, c_char_p]
PL_predicate.restype = predicate_t
PL_pred = _lib.PL_pred
PL_pred.argtypes = [functor_t, module_t]
PL_pred.restype = predicate_t
PL_open_query = _lib.PL_open_query
PL_open_query.argtypes = [module_t, c_int, predicate_t, term_t]
PL_open_query.restype = qid_t
PL_next_solution = _lib.PL_next_solution
PL_next_solution.argtypes = [qid_t]
PL_next_solution.restype = c_int
PL_copy_term_ref = _lib.PL_copy_term_ref
PL_copy_term_ref.argtypes = [term_t]
PL_copy_term_ref.restype = term_t
PL_get_list = _lib.PL_get_list
PL_get_list.argtypes = [term_t, term_t, term_t]
PL_get_list.restype = c_int
PL_get_chars = _lib.PL_get_chars # FIXME
PL_close_query = _lib.PL_close_query
PL_close_query.argtypes = [qid_t]
PL_close_query.restype = None
#void PL_cut_query(qid)
PL_cut_query = _lib.PL_cut_query
PL_cut_query.argtypes = [qid_t]
PL_cut_query.restype = None
PL_halt = _lib.PL_halt
PL_halt.argtypes = [c_int]
PL_halt.restype = None
# PL_EXPORT(int) PL_cleanup(int status);
PL_cleanup = _lib.PL_cleanup
PL_cleanup.restype = c_int
PL_unify_integer = _lib.PL_unify_integer
PL_unify = _lib.PL_unify
PL_unify.restype = c_int
#PL_EXPORT(int) PL_unify_arg(int index, term_t t, term_t a) WUNUSED;
PL_unify_arg = _lib.PL_unify_arg
PL_unify_arg.argtypes = [c_int, term_t, term_t]
PL_unify_arg.restype = c_int
# Verify types
PL_term_type = _lib.PL_term_type
PL_term_type.argtypes = [term_t]
PL_term_type.restype = c_int
PL_is_variable = _lib.PL_is_variable
PL_is_variable.argtypes = [term_t]
PL_is_variable.restype = c_int
PL_is_ground = _lib.PL_is_ground
PL_is_ground.argtypes = [term_t]
PL_is_ground.restype = c_int
PL_is_atom = _lib.PL_is_atom
PL_is_atom.argtypes = [term_t]
PL_is_atom.restype = c_int
PL_is_integer = _lib.PL_is_integer
PL_is_integer.argtypes = [term_t]
PL_is_integer.restype = c_int
PL_is_string = _lib.PL_is_string
PL_is_string.argtypes = [term_t]
PL_is_string.restype = c_int
PL_is_float = _lib.PL_is_float
PL_is_float.argtypes = [term_t]
PL_is_float.restype = c_int
#PL_is_rational = _lib.PL_is_rational
#PL_is_rational.argtypes = [term_t]
#PL_is_rational.restype = c_int
PL_is_compound = _lib.PL_is_compound
PL_is_compound.argtypes = [term_t]
PL_is_compound.restype = c_int
PL_is_functor = _lib.PL_is_functor
PL_is_functor.argtypes = [term_t, functor_t]
PL_is_functor.restype = c_int
PL_is_list = _lib.PL_is_list
PL_is_list.argtypes = [term_t]
PL_is_list.restype = c_int
PL_is_atomic = _lib.PL_is_atomic
PL_is_atomic.argtypes = [term_t]
PL_is_atomic.restype = c_int
PL_is_number = _lib.PL_is_number
PL_is_number.argtypes = [term_t]
PL_is_number.restype = c_int
# /* Assign to term-references */
#PL_EXPORT(void) PL_put_variable(term_t t);
PL_put_variable = _lib.PL_put_variable
PL_put_variable.argtypes = [term_t]
PL_put_variable.restype = None
#PL_EXPORT(void) PL_put_atom(term_t t, atom_t a);
#PL_EXPORT(void) PL_put_atom_chars(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_string_chars(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_list_chars(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_list_codes(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_atom_nchars(term_t t, size_t l, const char *chars);
#PL_EXPORT(void) PL_put_string_nchars(term_t t, size_t len, const char *chars);
#PL_EXPORT(void) PL_put_list_nchars(term_t t, size_t l, const char *chars);
#PL_EXPORT(void) PL_put_list_ncodes(term_t t, size_t l, const char *chars);
#PL_EXPORT(void) PL_put_integer(term_t t, long i);
PL_put_integer = _lib.PL_put_integer
PL_put_integer.argtypes = [term_t, c_long]
PL_put_integer.restype = None
#PL_EXPORT(void) PL_put_pointer(term_t t, void *ptr);
#PL_EXPORT(void) PL_put_float(term_t t, double f);
#PL_EXPORT(void) PL_put_functor(term_t t, functor_t functor);
PL_put_functor = _lib.PL_put_functor
PL_put_functor.argtypes = [term_t, functor_t]
PL_put_functor.restype = None
#PL_EXPORT(void) PL_put_list(term_t l);
PL_put_list = _lib.PL_put_list
PL_put_list.argtypes = [term_t]
PL_put_list.restype = None
#PL_EXPORT(void) PL_put_nil(term_t l);
PL_put_nil = _lib.PL_put_nil
PL_put_nil.argtypes = [term_t]
PL_put_nil.restype = None
#PL_EXPORT(void) PL_put_term(term_t t1, term_t t2);
PL_put_term = _lib.PL_put_term
PL_put_term.argtypes = [term_t, term_t]
PL_put_term.restype = None
# /* construct a functor or list-cell */
#PL_EXPORT(void) PL_cons_functor(term_t h, functor_t f, ...);
#class _PL_cons_functor(object):
PL_cons_functor = _lib.PL_cons_functor # FIXME:
#PL_EXPORT(void) PL_cons_functor_v(term_t h, functor_t fd, term_t a0);
PL_cons_functor_v = _lib.PL_cons_functor_v
PL_cons_functor_v.argtypes = [term_t, functor_t, term_t]
PL_cons_functor_v.restype = None
#PL_EXPORT(void) PL_cons_list(term_t l, term_t h, term_t t);
PL_cons_list = _lib.PL_cons_list
PL_cons_list.argtypes = [term_t, term_t, term_t]
PL_cons_list.restype = None
#
# term_t PL_exception(qid_t qid)
PL_exception = _lib.PL_exception
PL_exception.argtypes = [qid_t]
PL_exception.restype = term_t
#
PL_register_foreign = _lib.PL_register_foreign
#
#PL_EXPORT(atom_t) PL_new_atom(const char *s);
PL_new_atom = _lib.PL_new_atom
PL_new_atom.argtypes = [c_char_p]
PL_new_atom.restype = atom_t
#PL_EXPORT(functor_t) PL_new_functor(atom_t f, int a);
PL_new_functor = _lib.PL_new_functor
PL_new_functor.argtypes = [atom_t, c_int]
PL_new_functor.restype = functor_t
# /*******************************
# * COMPARE *
# *******************************/
#
#PL_EXPORT(int) PL_compare(term_t t1, term_t t2);
#PL_EXPORT(int) PL_same_compound(term_t t1, term_t t2);
PL_compare = _lib.PL_compare
PL_compare.argtypes = [term_t, term_t]
PL_compare.restype = c_int
PL_same_compound = _lib.PL_same_compound
PL_same_compound.argtypes = [term_t, term_t]
PL_same_compound.restype = c_int
# /*******************************
# * RECORDED DATABASE *
# *******************************/
#
#PL_EXPORT(record_t) PL_record(term_t term);
PL_record = _lib.PL_record
PL_record.argtypes = [term_t]
PL_record.restype = record_t
#PL_EXPORT(void) PL_recorded(record_t record, term_t term);
PL_recorded = _lib.PL_recorded
PL_recorded.argtypes = [record_t, term_t]
PL_recorded.restype = None
#PL_EXPORT(void) PL_erase(record_t record);
PL_erase = _lib.PL_erase
PL_erase.argtypes = [record_t]
PL_erase.restype = None
#
#PL_EXPORT(char *) PL_record_external(term_t t, size_t *size);
#PL_EXPORT(int) PL_recorded_external(const char *rec, term_t term);
#PL_EXPORT(int) PL_erase_external(char *rec);
PL_new_module = _lib.PL_new_module
PL_new_module.argtypes = [atom_t]
PL_new_module.restype = module_t
intptr_t = c_long
ssize_t = intptr_t
wint_t = c_uint
#typedef struct
#{
# int __count;
# union
# {
# wint_t __wch;
# char __wchb[4];
# } __value; /* Value so far. */
#} __mbstate_t;
class _mbstate_t_value(Union):
_fields_ = [("__wch",wint_t),
("__wchb",c_char*4)]
class mbstate_t(Structure):
_fields_ = [("__count",c_int),
("__value",_mbstate_t_value)]
# stream related funcs
Sread_function = CFUNCTYPE(ssize_t, c_void_p, c_char_p, c_size_t)
Swrite_function = CFUNCTYPE(ssize_t, c_void_p, c_char_p, c_size_t)
Sseek_function = CFUNCTYPE(c_long, c_void_p, c_long, c_int)
Sseek64_function = CFUNCTYPE(c_int64, c_void_p, c_int64, c_int)
Sclose_function = CFUNCTYPE(c_int, c_void_p)
Scontrol_function = CFUNCTYPE(c_int, c_void_p, c_int, c_void_p)
# IOLOCK
IOLOCK = c_void_p
# IOFUNCTIONS
class IOFUNCTIONS(Structure):
_fields_ = [("read",Sread_function),
("write",Swrite_function),
("seek",Sseek_function),
("close",Sclose_function),
("seek64",Sseek64_function),
("reserved",intptr_t*2)]
# IOENC
ENC_UNKNOWN,ENC_OCTET,ENC_ASCII,ENC_ISO_LATIN_1,ENC_ANSI,ENC_UTF8,ENC_UNICODE_BE,ENC_UNICODE_LE,ENC_WCHAR = range(9)
IOENC = c_int
# IOPOS
class IOPOS(Structure):
_fields_ = [("byteno",c_int64),
("charno",c_int64),
("lineno",c_int),
("linepos",c_int),
("reserved", intptr_t*2)]
# IOSTREAM
class IOSTREAM(Structure):
_fields_ = [("bufp",c_char_p),
("limitp",c_char_p),
("buffer",c_char_p),
("unbuffer",c_char_p),
("lastc",c_int),
("magic",c_int),
("bufsize",c_int),
("flags",c_int),
("posbuf",IOPOS),
("position",POINTER(IOPOS)),
("handle",c_void_p),
("functions",IOFUNCTIONS),
("locks",c_int),
("mutex",IOLOCK),
("closure_hook",CFUNCTYPE(None, c_void_p)),
("closure",c_void_p),
("timeout",c_int),
("message",c_char_p),
("encoding",IOENC)]
IOSTREAM._fields_.extend([("tee",IOSTREAM),
("mbstate",POINTER(mbstate_t)),
("reserved",intptr_t*6)])
#PL_EXPORT(IOSTREAM *) Sopen_string(IOSTREAM *s, char *buf, size_t sz, const char *m);
Sopen_string = _lib.Sopen_string
Sopen_string.argtypes = [POINTER(IOSTREAM), c_char_p, c_size_t, c_char_p]
Sopen_string.restype = POINTER(IOSTREAM)
#PL_EXPORT(int) Sclose(IOSTREAM *s);
Sclose = _lib.Sclose
Sclose.argtypes = [POINTER(IOSTREAM)]
#PL_EXPORT(int) PL_unify_stream(term_t t, IOSTREAM *s);
PL_unify_stream = _lib.PL_unify_stream
PL_unify_stream.argtypes = [term_t, POINTER(IOSTREAM)]
|
mit
|
cod3monk/kerncraft
|
kerncraft/prefixedunit.py
|
2
|
6289
|
#!/usr/bin/env python3
import re
from ruamel import yaml
class PrefixedUnit(yaml.YAMLObject):
PREFIXES = {'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e13, 'P': 1e16, 'E': 1e19, 'Z': 1e21, 'Y': 1e24,
'': 1}
yaml_loader = yaml.Loader
yaml_dumper = yaml.Dumper
yaml_tag = u'!prefixed'
yaml_implicit_pattern = re.compile(re.compile(
r'^(?P<value>[0-9]+(?:\.[0-9]+)?) (?P<prefix>[kMGTP])?(?P<unit>.*)$'))
@classmethod
def from_yaml(cls, loader, node):
return PrefixedUnit(node.value)
@classmethod
def to_yaml(cls, dumper, data):
return dumper.represent_scalar(cls.yaml_tag, str(data))
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], str):
m = re.match(
r'^(?P<value>(?:[0-9]+(?:\.[0-9]+)?|inf)) (?P<prefix>[kMGTP])?(?P<unit>.*)$',
args[0])
assert m, "Could not parse unit parameter "+repr(args[0])
g = m.groups()
args = [float(g[0]), g[1], g[2]]
else:
args = [float(args[0]), '', '']
if len(args) == 2:
m = re.match(r'^(?P<prefix>[kMGTP])?(?P<unit>.+)$', args[1])
assert m, "Could not parse unit parameter"+repr(args)
gd = m.groupdict()
args = [float(args[0]), gd['prefix'], gd['unit']]
if args[1] is None:
args[1] = ''
assert args[1] in self.PREFIXES, "Unknown prefix: "+repr(args[1])
self.value, self.prefix, self.unit = args
def base_value(self):
"""gives value without prefix"""
return self.value*self.PREFIXES[self.prefix]
__float__ = base_value
def __int__(self):
return int(self.base_value())
def good_prefix(self, max_error=0.01, round_length=2, min_prefix='', max_prefix=None):
"""
returns the largest prefix where the relative error is bellow *max_error* although rounded
by *round_length*
if *max_prefix* is found in PrefixedUnit.PREFIXES, returned value will not exceed this
prefix.
if *min_prefix* is given, returned value will atleast be of that prefix (no matter the
error)
"""
good_prefix = min_prefix
base_value = self.base_value()
for k, v in list(self.PREFIXES.items()):
# Ignoring to large prefixes
if max_prefix is not None and v > self.PREFIXES[max_prefix]:
continue
# Check that differences is < relative error *max_error*
if abs(round(base_value/v, round_length)*v - base_value) > base_value*max_error:
continue
# Check that resulting number is >= 0.9
if abs(round(base_value/v, round_length)) < 0.9:
continue
# Check if prefix is larger then already chosen
if v < self.PREFIXES[good_prefix]:
continue
# seems to be okay
good_prefix = k
return good_prefix
def with_prefix(self, prefix):
return self.__class__(
self.base_value()/self.PREFIXES[prefix], prefix, self.unit)
def reduced(self):
return self.with_prefix(self.good_prefix(max_error=0.0))
def __str__(self):
good_prefix = self.good_prefix()
if self.prefix == good_prefix:
return '{0:.2f} {1}{2}'.format(self.value, self.prefix, self.unit).strip()
else:
return self.with_prefix(good_prefix).__str__()
def __repr__(self):
return '{0}({1!r}, {2!r}, {3!r})'.format(
self.__class__.__name__, self.value, self.prefix, self.unit)
def __mul__(self, other):
if isinstance(other, self.__class__):
unit = self.unit+other.unit
else:
unit = self.unit
v = self.__class__(float(self)*float(other), unit)
return v.reduced()
def __truediv__(self, other):
if isinstance(other, self.__class__):
unit = self.unit+'/'+other.unit
else:
unit = self.unit
v = self.__class__(float(self)/float(other), unit)
return v.reduced()
__div__ = __truediv__
def __floordiv__(self, other):
if isinstance(other, self.__class__):
unit = self.unit+'/'+other.unit
else:
unit = self.unit
v = self.__class__(float(self)//float(other), unit)
return v.reduced()
def __sub__(self, other):
if isinstance(other, self.__class__):
unit = other.unit
else:
unit = self.unit
if self.unit != unit:
raise ValueError("Units need to match for subtraction.")
v = self.__class__(float(self)-float(other), unit)
return v.reduced()
def __abs__(self):
v = self.__class__(abs(float(self)), self.unit)
return v.reduced()
def __add__(self, other):
if isinstance(other, self.__class__):
unit = other.unit
else:
unit = self.unit
if self.unit != unit:
raise ValueError("Units need to match for addition.")
v = self.__class__(float(self) + float(other), self.unit)
return v.reduced()
def __mod__(self, other):
if isinstance(other, self.__class__):
unit = other.unit
else:
unit = self.unit
if self.unit != unit:
raise ValueError("Units need to match for modulo.")
v = self.__class__(float(self) % float(other), self.unit)
return v.reduced()
def __lt__(self, other):
return float(self) < float(other)
def __gt__(self, other):
return float(self) > float(other)
def __eq__(self, other):
try:
return float(self) == float(other)
except TypeError:
return False
def __le__(self, other):
return float(self) <= float(other)
def __ge__(self, other):
return float(self) >= float(other)
def __ne__(self, other):
try:
return float(self) != float(other)
except TypeError:
return True
# Make this tag automatic
yaml.add_implicit_resolver(PrefixedUnit.yaml_tag, PrefixedUnit.yaml_implicit_pattern)
|
agpl-3.0
|
yupasik/AT
|
Tests/NPKT/manual_upd2.py
|
1
|
1536
|
import os
import sys
modeules_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(modeules_path)
from Devices.application import Application
test = os.path.split(os.path.dirname(os.path.abspath(__file__)))[-1]
print(test)
testintex = 2
testscript = os.path.basename(os.path.abspath(__file__)).split(".")[0]
print(testscript)
stream = "036E_LCN-25_20140418a.ts"
app = Application(test=test,
testintex=testintex,
testscript=testscript)
"""
app.modulator1.set_options(dvb="DVBS",
fec="3/4",
frequency=1476,
symbolrate=27500,
modulation="QPSK")
"""
# app.modulator1.open(stream)
app.capture.start()
# ------------------------------------------------------------------------
app.start_testcase(1)
# app.modulator1.play()
app.stb.default_settings()
app.stb.push(["exit"])
app.grabber.check_result(app.testcase)
# ------------------------------------------------------------------------
app.start_testcase(2)
app.stb.push(["menu 1 3000"])
app.grabber.check_result(app.testcase)
# ------------------------------------------------------------------------
app.start_testcase(3)
# app.modulator1.pause()
app.stb.reboot()
app.stb.launch("settings")
app.stb.push(["exit 1 2000", "menu 1 3000", "ok 1 2000", "ok 1 3000"])
app.grabber.check_result(app.testcase)
# ------------------------------------------------------------------------
app.capture.stop()
app.fin()
|
apache-2.0
|
blockstack/packaging
|
imported/future/src/future/backports/xmlrpc/server.py
|
82
|
37285
|
r"""
Ported using Python-Future from the Python 3.3 standard library.
XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise ValueError('bad method')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import int, str
# Written by Brian Quinlan ([email protected]).
# Based on code written by Fredrik Lundh.
from future.backports.xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
from future.backports.http.server import BaseHTTPRequestHandler
import future.backports.http.server as http_server
from future.backports import socketserver
import sys
import os
import re
import pydoc
import inspect
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
class SimpleXMLRPCDispatcher(object):
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer
"""
def __init__(self, allow_none=False, encoding=None,
use_builtin_types=False):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
self.use_builtin_types = use_builtin_types
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = loads(data, use_builtin_types=self.use_builtin_types)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response.encode(self.encoding)
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inherited
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate, use_builtin_types)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
response = response.encode(self.encoding)
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None, use_builtin_types=False):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http_server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http_server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args = inspect.getfullargspec(object)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args.args[1:],
args.varargs,
args.varkw,
args.defaults,
annotations=args.annotations,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args = inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args.args, args.varargs, args.varkw, args.defaults,
annotations=args.annotations,
formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', ''.join(contents))
return result
class XMLRPCDocGenerator(object):
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate,
use_builtin_types)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
import datetime
class ExampleService:
def getData(self):
return '42'
class currentTime:
@staticmethod
def getCurrentTime():
return datetime.datetime.now()
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.register_instance(ExampleService(), allow_dotted_names=True)
server.register_multicall_functions()
print('Serving XML-RPC on localhost port 8000')
print('It is advisable to run this example server within a secure, closed network.')
try:
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
server.server_close()
sys.exit(0)
|
gpl-3.0
|
40223201/40223201
|
static/Brython3.1.1-20150328-091302/Lib/_random.py
|
518
|
2451
|
from browser import window
def _randint(a, b):
return int(window.Math.random()*(b-a+1)+a)
def _urandom(n):
"""urandom(n) -> str
Return n random bytes suitable for cryptographic use."""
randbytes= [_randint(0,255) for i in range(n)]
return bytes(randbytes)
class Random:
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
#random
#seed
#getstate
#setstate
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self._state=x
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
self._state=a
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self._state
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
self._state=state
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return window.Math.random()
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
|
gpl-3.0
|
Johnzero/erp
|
openerp/addons/hr_payroll/wizard/hr_payroll_contribution_register_report.py
|
9
|
2115
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil import relativedelta
from osv import osv, fields
class payslip_lines_contribution_register(osv.osv_memory):
_name = 'payslip.lines.contribution.register'
_description = 'PaySlip Lines by Contribution Registers'
_columns = {
'date_from': fields.date('Date From', required=True),
'date_to': fields.date('Date To', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'date_to': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
}
def print_report(self, cr, uid, ids, context=None):
datas = {
'ids': context.get('active_ids', []),
'model': 'hr.contribution.register',
'form': self.read(cr, uid, ids, [], context=context)[0]
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'contribution.register.lines',
'datas': datas,
}
payslip_lines_contribution_register()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
maxlit/pyEdgeworthBox
|
pyEdgeworthBox/pyEdgeworthBox.py
|
1
|
10602
|
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, copysign
from scipy.optimize import brenth
from scipy.optimize import fsolve,fmin_l_bfgs_b,fmin_cg,fminbound
"""
sign of the number
"""
def sign(x):
if x==0:
return 0
else:
return copysign(1,x)
"""
if function f can't be computed, return None
"""
def f_None(f,x):
try:
return f(x)
except:
return None
"""
if the bound was touched returns None
L is the level of the function f
"""
def correct(x,y,f,L):
eps=10e-5
if abs(f(x,y)-L)>eps:
return None
else:
return y
"""
if output can't be produced, return 0, if there's division by zero, then it looks for the limit and returns it
"""
def _(f,*x):
try:
out=f(*x)
if out is None:
return float("inf")
else:
return out
except ZeroDivisionError:
l=len(x)
eps=abs(f(*[1e-02]*l)-f(*[1e-04]*l))
if abs(f(*[1e-04]*l)-f(*[1e-06]*l))<eps and abs(f(*[1e-06]*l)-f(*[1e-08]*l))<eps:
return f(*[1e-10]*l)
else:
return sign(f(*[1e-10]*l))*float("inf")
"""
produces the array of the first items of the element of the array
"""
def fst(X):
return list(map(lambda x: x[0],X))
"""
produces the array of the second items of the element of the array
"""
def snd(X):
return list(map(lambda x: x[1],X))
"""
unpacks [(X_1,Y_1),...,(X_k,Y_k),...,(X_n,Y_n)] into [(X_1,...,X_k,...,X_n),(Y_1,...,Y_k,...,Y_n)]
"""
def unpack(X):
return [fst(X),snd(X)]
"""
find the root of the function. If the ends of the interval have the same signs, try to make it smaller
"""
def rootalt(f,a,b):
eps=(b-a)/64.0
turn=0
N_iter=10
while abs(a-b)>eps and N_iter > 0:
N_iter-=1
try:
#return fmin_cg(f,(a+b)/2.0)[0]
return brenth(f,a,b)
except ValueError:
if turn==0:
a=a+eps
turn=1
else:
b=b+eps
turn=0
#return root2(f,a,b)
return None
def root(f,a,b):
a_init=a
b_init=b
eps=(b-a)/16.0
turn=0
N_iter=12
while abs(a-b)>eps and N_iter > 0 and f(a)*f(b)>0:
N_iter-=1
if turn==0:
a=a+eps
turn=1
else:
b=b-eps
turn=0
try:
return brenth(f,a,b)
except ValueError:
return fminbound(f,a_init,b_init)
def root2(f,a,b):
return fmin_cg(f,(a+b)/2.0,disp=False)[0]
def root3(f,a,b):
return fmin_l_bfgs_b(func=f,x0=(a+b)/2,bounds=[a,b])
"""
2-point numerical derivative
"""
def prime(f,dt=10e-3):
return lambda x: (f(x+dt)-f(x-dt))/(2*dt)
"""
Marginal rate of substitution of a utility function u(.)
"""
def MRS(u):
u_x=lambda x,y: prime(lambda z: u(z,y))(x)
u_y=lambda x,y: prime(lambda z: u(x,z))(y)
return lambda x,y: u_x(x,y)/u_y(x,y)
"""
Edgeworth Box parameter determine that to show on the plot
"""
class EdgeBoxParameter:
#def __init__(self,pareto,core,U1,U2,endow,walras,budget,N):
#boll_array=[pareto,core,U1,U2,endow,walras,budget]
def __init__(self,N,pareto=True,core=True,eq=True,budget=True):
self.N=N
self.pareto=pareto
self.core=core
self.eq=eq
self.budget=budget
defaultEBP=EdgeBoxParameter(100)
class EdgeBox():
def __init__(self,u1,u2,IE1,IE2,EBP=defaultEBP):
self.core=0
self.pareto=0
self.eq=0
self.p=[None,1]
self.p_weighted=[None,None]
self.u1=u1
self.u2=u2
self.u2_compl=lambda x,y: u2(self.IE[0]-x,self.IE[1]-y)
self.IE1=IE1
self.IE2=IE2
self.IE=[IE1[0]+IE2[0],IE1[1]+IE2[1]]
self.EBP=EBP
self.dt=min(self.IE)/float(EBP.N)
self.X=np.linspace(self.dt,self.IE[0]-self.dt,EBP.N)
self.Y=np.linspace(self.dt,self.IE[1]-self.dt,EBP.N)
self.calc_init()
self.calc()
def calc(self):
"""
calculate all solutions of the box
"""
self.calc_pareto()
self.calc_core()
self.calc_eq()
self.calc_budget()
def calc_init(self):
self.u1(*self.IE1)
self.UIE1=self.u1(*self.IE1) # utility of the 1-st player at her initial endowment
self.UIE2=self.u2(*self.IE2) # utility of the 2-nd player at her initial endowment
self.u_ie_1=lambda x: root(lambda y: self.u1(x,y)-self.UIE1,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 1-st participant
self.u_ie_2=lambda x: root(lambda y: self.u2(x,y)-self.UIE2,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 2-nd participant
self.u_ie_2_compl=lambda x: -self.u_ie_2(self.IE[0]-x)+self.IE[1] # utility function at initial endowment of the 2-nd participant in terms of the 1-st
U1 = list(map(lambda x: correct(x,f_None(self.u_ie_1,x),self.u1,self.UIE1),self.X))
U2 = list(map(lambda x: correct(x,f_None(self.u_ie_2_compl,x),self.u2_compl,self.UIE2),self.X))
self.U1 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1)))
self.U2 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2)))
U1_sort = sorted(self.U1,key=lambda x: x[1])
U2_sort = sorted(self.U2,key=lambda x: x[1])
if len(U1_sort)>0:
self.U1_min=U1_sort[0]
self.U1_max=U1_sort[-1]
else:
self.U1_min=None
self.U1_max=None
if len(U2_sort)>0:
self.U2_min=U2_sort[0]
self.U2_max=U2_sort[-1]
else:
self.U2_min=None
self.U2_max=None
self._B=lambda x,y,p: y-(p*(self.IE1[0]-x)+self.IE1[1]) # budget constraint
def calc_pareto(self):
self.MRS1=MRS(self.u1) # marginal rate of substitution of the 1st participant
self.MRS2=MRS(self.u2) # marginal rate of substitution of the 2nd participant
self._pareto=lambda x: root(lambda y: _(self.MRS1,x,y)-_(self.MRS2,self.IE[0]-x,self.IE[1]-y),self.Y[0],self.Y[-1]) # Pareto solutions in functional form
P = list(map(lambda x: f_None(self._pareto,x),self.X[1:-1]))
self.PARETO=list(zip(self.X[1:-1],P)) # set of some Pareto solution points (enough to draw it)
self._Bx=lambda x: root(lambda y: self._B(x,y,self.MRS1(x,y)),self.Y[0],self.Y[-1])
#plot_pareto,=plt.plot(X,P,linewidth=2)
PU1_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_1,x),self.U1_min[0],self.U1_max[0])
PU2_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_2_compl,x),self.U2_min[0],self.U2_max[0])
PU1_Y=self.u_ie_1(PU1_X)
PU2_Y=self.u_ie_2_compl(PU2_X)
self.PU1=[PU1_X,PU1_Y]
self.PU2=[PU2_X,PU2_Y]
self._Bx=lambda x: root(lambda y: _(self._B,x,y,_(self.MRS1,x,y)),self.Y[0],self.Y[-1])
def calc_core(self):
CORE_X = list(filter(lambda x: x>=self.PU1[0] and x<=self.PU2[0], self.X))
CORE_Y = list(map(lambda x: self._pareto(x), CORE_X))
self.CORE = list(zip(CORE_X,CORE_Y)) # set of some solutions in the core (could be one, could be many or none)
def calc_eq(self):
EQ_X1=root(lambda x: _(self._pareto,x)-_(self._Bx,x),self.PU1[0],self.PU2[0])
EQ_Y1=self._pareto(EQ_X1)
EQ_X2=self.IE[0]-EQ_X1
EQ_Y2=self.IE[1]-EQ_Y1
self.EQ1=[EQ_X1,EQ_Y1] # equilibrium solution for the 1st participant
self.EQ2=[EQ_X2,EQ_Y2] # equilibrium solution for the 2nd participant
self.p=self.MRS1(*self.EQ1) # price vector
self.p_weighted=[self.p/(self.p+1),1/(self.p+1)]
self.UEQ1=self.u1(*self.EQ1) # value of utility function of the 1st participant at her equilibrium point (functional form)
self.UEQ2=self.u2(*self.EQ2) # value of utility function of the 2nd participant at her equilibrium point (functional form)
self.u_eq_1=lambda x: root(lambda y: self.u1(x,y)-self.UEQ1,self.Y[0],self.Y[-1])
self.u_eq_2=lambda x: root(lambda y: self.u2(x,y)-self.UEQ2,self.Y[0],self.Y[-1])
self.u_eq_2_compl=lambda x: -self.u_eq_2(self.IE[0]-x)+self.IE[1]
U1_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_1,x),self.u1,self.UEQ1),self.X))
U2_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_2_compl,x),self.u2_compl,self.UEQ2),self.X))
self.U1_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1_EQ)))
self.U2_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2_EQ)))
def calc_budget(self,price=None):
if price is None:
price=self.p
self.Bp=lambda x: price*self.IE1[0]+self.IE1[1]-price*x # budget line (functional form)
Budget = list(map(self.Bp,self.X)) # set of some points from the budget line
self.BUDGET = list(zip(self.X,Budget))
def plot(self,fname=None):
plot_endow,=plt.plot(self.IE1[0],self.IE1[1],color="white",marker="o")
m=max(self.IE[0],self.IE[1])
plt.axis([0,m,0,m],autoscale=False)
plot_U1,=plt.plot(*unpack(self.U1),color="blue")
plot_U2,=plt.plot(*unpack(self.U2),color="brown")
plot_pareto,=plt.plot(*unpack(self.PARETO),linewidth=2,color="red")
plot_core,=plt.plot(*unpack(self.CORE),color="black",linewidth=4)
plot_U1_EQ,=plt.plot(*unpack(self.U1_EQ),ls='--',color="blue")
plot_U2_EQ,=plt.plot(*unpack(self.U2_EQ),ls='--',color="brown")
plot_budget,=plt.plot(*unpack(self.BUDGET),color="green")
plt.plot(self.PU1[0],self.PU1[1],color="blue",marker="o")
plt.plot(self.PU2[0],self.PU2[1],color="brown",marker="o")
plot_walras,=plt.plot(self.EQ1[0],self.EQ1[1],color="green",marker="o")
# annotation
plt.annotate("(%s;%s)"%(round(self.EQ1[0],2),round(self.EQ1[1],2)), xy=self.EQ1, xytext=(self.EQ1[0]+self.dt,self.EQ1[1]-self.dt))
plt.title("Edgeworth Box")
plt.legend([plot_pareto,plot_U1,plot_U2,plot_endow,plot_core,plot_walras,plot_budget,plot_U1_EQ,plot_U2_EQ]
,["Pareto","U1 before trade","U2 before trade","Init. endow.","Core","Equilibrium","Budget constraint","U1 at eq.","U2 at eq."])
#Axes Dscription
plt.xlabel("Units of 1-st good")
plt.ylabel("Units of 2-nd good")
if fname is not None:
plt.savefig(fname)
plt.close()
else:
plt.show(block=False)
|
mit
|
salilab/rmf
|
tools/dev_tools/test/test_cpp_format.py
|
3
|
9735
|
import unittest
import os
import sys
import utils
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, TOPDIR)
try:
from python_tools import cpp_format
finally:
del sys.path[0]
from pygments import token
# Tokens we'd expect to see in an OK header
ok_header_tokens = [
(token.Comment.Multiline, "/* Comment"),
(token.Text, "\n"),
(token.Text, "\n"),
(token.Comment.Preproc, "#"),
(token.Comment.Preproc, "ifndef IMPKERNEL_TEST_H"),
(token.Comment.Preproc, "\n"),
(token.Comment.Preproc, "#"),
(token.Comment.Preproc, "define IMPKERNEL_TEST_H"),
(token.Comment.Preproc, "\n"),
(token.Text, "\n"),
(token.Comment.Preproc, '#'),
(token.Comment.Preproc, 'endif '),
(token.Comment.Multiline, '/* IMPKERNEL_TEST_H */'),
(token.Comment.Preproc, '\n')
]
class Tests(unittest.TestCase):
def test_have_header_guard(self):
"""Test have_header_guard()"""
self.assertTrue(cpp_format.have_header_guard(ok_header_tokens))
bad_tokens = ok_header_tokens[:]
del bad_tokens[1]
self.assertFalse(cpp_format.have_header_guard(bad_tokens))
def test_get_header_guard(self):
"""Test get_header_guard()"""
self.assertEqual(cpp_format.get_header_guard("/foo/bar/FooHeader.h",
"IMP.mytest"),
("IMPMYTEST", "FOOHEADER_H"))
def check_header_start_end(self, tokens):
errors = []
cpp_format.check_header_start_end(tokens, "/foo/bar/Test.h",
"IMP.kernel", errors)
return errors
def assertMissingGuard(self, error):
self.assertTrue(":1: Missing or incomplete header guard." in error)
def test_check_header_start_end_ok(self):
"""Test check_header_start_end with an OK header"""
errors = self.check_header_start_end(ok_header_tokens)
self.assertEqual(len(errors), 0)
def test_check_header_start_end_too_short(self):
"""Test check_header_start_end with too few tokens"""
errors = self.check_header_start_end([])
self.assertEqual(len(errors), 1)
self.assertMissingGuard(errors[0])
def test_check_header_start_end_missing_ifndef(self):
"""Test check_header_start_end with missing ifndef"""
bad_tokens = ok_header_tokens[:]
bad_tokens[4] = (token.Comment.Preproc, "garbage IMPKERNEL_TEST_H")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 5)
self.assertTrue(":1: Header guard missing #ifndef." in errors[0])
self.assertMissingGuard(errors[4])
def test_check_header_start_end_missing_ifndef_preproc(self):
"""Test check_header_start_end with missing ifndef preproc"""
bad_tokens = ok_header_tokens[:]
bad_tokens[4] = (token.Text, "ifndef IMPKERNEL_TEST_H")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 1)
self.assertMissingGuard(errors[0])
def test_check_header_start_end_missing_define(self):
"""Test check_header_start_end with missing define"""
bad_tokens = ok_header_tokens[:]
bad_tokens[7] = (token.Comment.Preproc, "garbage IMPKERNEL_TEST_H")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 3)
self.assertTrue(":1: Header guard missing #define." in errors[0])
self.assertTrue('1: Header guard does not define "IMPKERNEL_TEST_H"'
in errors[1])
self.assertMissingGuard(errors[2])
def test_check_header_start_end_missing_define_preproc(self):
"""Test check_header_start_end with missing define preproc"""
bad_tokens = ok_header_tokens[:]
bad_tokens[7] = (token.Text, "define IMPKERNEL_TEST_H")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 1)
self.assertMissingGuard(errors[0])
def test_check_header_start_end_missing_endif(self):
"""Test check_header_start_end with missing endif"""
bad_tokens = ok_header_tokens[:]
bad_tokens[-3] = (token.Comment.Preproc, "garbage")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 2)
self.assertTrue(":1: Header guard missing #endif." in errors[0])
self.assertMissingGuard(errors[1])
def test_check_header_start_end_missing_endif_preproc(self):
"""Test check_header_start_end with missing endif preproc"""
bad_tokens = ok_header_tokens[:]
bad_tokens[-4] = (token.Text, "#")
bad_tokens[-3] = (token.Text, "endif")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 1)
self.assertMissingGuard(errors[0])
def test_check_header_start_end_missing_close_comment(self):
"""Test check_header_start_end with missing closing comment"""
bad_tokens = ok_header_tokens[:]
bad_tokens[-2] = (token.Text, '/* IMPKERNEL_TEST_H */')
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 2)
self.assertTrue(":1: Header guard missing closing comment."
in errors[0])
self.assertMissingGuard(errors[1])
def test_check_header_start_end_wrong_close_comment(self):
"""Test check_header_start_end with wrong closing comment"""
bad_tokens = ok_header_tokens[:]
bad_tokens[-2] = (token.Comment.Multiline, '/* IMPKERNEL_FOO_H */')
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 2)
self.assertTrue(':1: Header guard close does not have a comment '
'of "/* IMPKERNEL_TEST_H */".' in errors[0])
self.assertMissingGuard(errors[1])
def test_check_header_start_end_bad_guard_prefix(self):
"""Test check_header_start_end with bad header guard prefix"""
bad_tokens = ok_header_tokens[:]
bad_tokens[4] = (token.Comment.Preproc, "ifndef bad_FOO_H")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 5)
self.assertTrue(':1: Header guard does not start with "IMPKERNEL".'
in errors[0])
self.assertMissingGuard(errors[4])
def test_check_header_start_end_bad_guard_suffix(self):
"""Test check_header_start_end with bad header guard suffix"""
bad_tokens = ok_header_tokens[:]
bad_tokens[4] = (token.Comment.Preproc, "ifndef IMPKERNEL_bad")
errors = self.check_header_start_end(bad_tokens)
self.assertEqual(len(errors), 4)
self.assertTrue(':1: Header guard does not end with "TEST_H".'
in errors[0])
self.assertMissingGuard(errors[3])
def test_check_comment_header_ok(self):
"""Test check_comment_header with OK comment"""
errors = []
cpp_format.check_comment_header(ok_header_tokens, "/foo/bar/Test.h",
errors)
self.assertEqual(errors, [])
def test_check_comment_header_empty(self):
"""Test check_comment_header with empty file"""
errors = []
cpp_format.check_comment_header([], "/foo/bar/Test.h", errors)
self.assertEqual(errors, ['/foo/bar/Test.h:1: First line should be '
'a comment with a copyright notice and a '
'description of the file'])
def test_check_comment_header_not_comment(self):
"""Test check_comment_header with something not a comment"""
errors = []
cpp_format.check_comment_header([(token.Text, "\n")],
"/foo/bar/Test.h", errors)
self.assertEqual(errors, ['/foo/bar/Test.h:1: First line should be '
'a comment with a copyright notice and a '
'description of the file'])
def test_tokenize_file(self):
"""Test tokenize_file()"""
class DummyFile(object):
def read(self):
return None
fh = DummyFile()
self.assertEqual(cpp_format.tokenize_file(fh), [("tok1", "val1"),
("tok2", "val2")])
def test_check_header_file(self):
"""Test check_header_file()"""
def ok_tokenize(fh):
return ok_header_tokens
with utils.mocked_function(cpp_format, 'tokenize_file', ok_tokenize):
errors = []
cpp_format.check_header_file((None, "/foo/bar/test.h"),
"IMP.kernel", errors)
self.assertEqual(errors, [])
def test_check_cpp_file(self):
"""Test check_cpp_file()"""
def ok_tokenize(fh):
return ok_header_tokens
with utils.mocked_function(cpp_format, 'tokenize_file', ok_tokenize):
errors = []
cpp_format.check_cpp_file((None, "/foo/bar/test.cpp"),
"IMP.kernel", errors)
self.assertEqual(errors, [])
def test_check_cpp_file_test(self):
"""Test check_cpp_file() with a test file"""
def empty_tokenize(fh):
return []
with utils.mocked_function(cpp_format, 'tokenize_file',
empty_tokenize):
errors = []
cpp_format.check_cpp_file((None, "/foo/bar/test_foo.cpp"),
"IMP.kernel", errors)
self.assertEqual(errors, [])
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
lmprice/ansible
|
lib/ansible/modules/remote_management/ucs/ucs_wwn_pool.py
|
49
|
8651
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_wwn_pool
short_description: Configures WWNN or WWPN pools on Cisco UCS Manager
description:
- Configures WWNNs or WWPN pools on Cisco UCS Manager.
- Examples can be used with the UCS Platform Emulator U(https://communities.cisco.com/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- If C(present), will verify WWNNs/WWPNs are present and will create if needed.
- If C(absent), will verify WWNNs/WWPNs are absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name of the World Wide Node Name (WWNN) or World Wide Port Name (WWPN) pool.
- This name can be between 1 and 32 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the WWNN or WWPN pool is created.
required: yes
purpose:
description:
- Specify whether this is a node (WWNN) or port (WWPN) pool.
- Optional if state is absent.
choices: [node, port]
required: yes
description:
description:
- A description of the WWNN or WWPN pool.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
order:
description:
- The Assignment Order field.
- "This can be one of the following:"
- "default - Cisco UCS Manager selects a random identity from the pool."
- "sequential - Cisco UCS Manager selects the lowest available identity from the pool."
choices: [default, sequential]
default: default
first_addr:
description:
- The first initiator in the World Wide Name (WWN) block.
- This is the From field in the UCS Manager Add WWN Blocks menu.
last_addr:
description:
- The last initiator in the Worlde Wide Name (WWN) block.
- This is the To field in the UCS Manager Add WWN Blocks menu.
- For WWxN pools, the pool size must be a multiple of ports-per-node + 1.
- For example, if there are 7 ports per node, the pool size must be a multiple of 8.
- If there are 63 ports per node, the pool size must be a multiple of 64.
org_dn:
description:
- Org dn (distinguished name)
default: org-root
requirements:
- ucsmsdk
author:
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Configure WWNN/WWPN pools
ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWNN-Pool
purpose: node
first_addr: 20:00:00:25:B5:48:00:00
last_addr: 20:00:00:25:B5:48:00:0F
- ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWPN-Pool-A
purpose: port
order: sequential
first_addr: 20:00:00:25:B5:48:0A:00
last_addr: 20:00:00:25:B5:48:0A:0F
- name: Remove WWNN/WWPN pools
ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWNN-Pool
state: absent
- ucs_wwn_pool:
hostname: 172.16.143.150
username: admin
password: password
name: WWPN-Pool-A
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
org_dn=dict(type='str', default='org-root'),
name=dict(type='str'),
purpose=dict(type='str', choices=['node', 'port']),
descr=dict(type='str'),
order=dict(type='str', default='default', choices=['default', 'sequential']),
first_addr=dict(type='str'),
last_addr=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
wwn_list=dict(type='list'),
)
# Note that use of wwn_list is an experimental feature which allows multiple resource updates with a single UCSM connection.
# Support for wwn_list may change or be removed once persistent UCS connections are supported.
# Either wwn_list or name is required (user can specify either a list or single resource).
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_one_of=[
['wwn_list', 'name']
],
mutually_exclusive=[
['wwn_list', 'name']
],
)
ucs = UCSModule(module)
err = False
from ucsmsdk.mometa.fcpool.FcpoolInitiators import FcpoolInitiators
from ucsmsdk.mometa.fcpool.FcpoolBlock import FcpoolBlock
changed = False
try:
# Only documented use is a single resource, but to also support experimental
# feature allowing multiple updates all params are converted to a wwn_list below.
if module.params['wwn_list']:
# directly use the list (single resource and list are mutually exclusive
wwn_list = module.params['wwn_list']
else:
# single resource specified, create list from the current params
wwn_list = [module.params]
for wwn in wwn_list:
mo_exists = False
props_match = False
# set default params. Done here to set values for lists which can't be done in the argument_spec
if not wwn.get('descr'):
wwn['descr'] = ''
if not wwn.get('order'):
wwn['order'] = 'default'
# dn is <org_dn>/wwn-pool-<name> for WWNN or WWPN
dn = module.params['org_dn'] + '/wwn-pool-' + wwn['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
# append purpose param with suffix used by UCSM
purpose_param = wwn['purpose'] + '-wwn-assignment'
if mo_exists:
# check top-level mo props
kwargs = dict(assignment_order=wwn['order'])
kwargs['descr'] = wwn['descr']
kwargs['purpose'] = purpose_param
if (mo.check_prop_match(**kwargs)):
# top-level props match, check next level mo/props
if 'last_addr' in wwn and 'first_addr' in wwn:
block_dn = dn + '/block-' + wwn['first_addr'].upper() + '-' + wwn['last_addr'].upper()
mo_1 = ucs.login_handle.query_dn(block_dn)
if mo_1:
props_match = True
else:
props_match = True
if not props_match:
if not module.check_mode:
# create if mo does not already exist
mo = FcpoolInitiators(
parent_mo_or_dn=module.params['org_dn'],
name=wwn['name'],
descr=wwn['descr'],
assignment_order=wwn['order'],
purpose=purpose_param,
)
if 'last_addr' in wwn and 'first_addr' in wwn:
mo_1 = FcpoolBlock(
parent_mo_or_dn=mo,
to=wwn['last_addr'],
r_from=wwn['first_addr'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
dmccloskey/SBaaS_LIMS
|
SBaaS_LIMS/lims_msMethod_query.py
|
1
|
27469
|
# sbaas
from .lims_msMethod_postgresql_models import *
from SBaaS_base.postgresql_dataType_converter import postgresql_dataType_converter
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class lims_msMethod_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'ms_component_list':MS_component_list,
'ms_components':MS_components,
'ms_information':MS_information,
'ms_method':MS_method,
'ms_sourceparameters':MS_sourceParameters,
};
self.set_supportedTables(tables_supported);
def update_componentNames(self):
'''update component names for quant and isotopomer methods'''
return
#table initializations:
def drop_lims_msMethod(self):
try:
MS_components.__table__.drop(self.engine,True);
MS_sourceParameters.__table__.drop(self.engine,True);
MS_information.__table__.drop(self.engine,True);
MS_method.__table__.drop(self.engine,True);
MS_component_list.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
def reset_lims_msMethod(self):
try:
reset = self.session.query(MS_components).delete(synchronize_session=False);
reset = self.session.query(MS_sourceParameters).delete(synchronize_session=False);
reset = self.session.query(MS_information).delete(synchronize_session=False);
reset = self.session.query(MS_method).delete(synchronize_session=False);
reset = self.session.query(MS_component_list).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def initialize_lims_msMethod(self):
try:
MS_components.__table__.create(self.engine,True);
MS_sourceParameters.__table__.create(self.engine,True);
MS_information.__table__.create(self.engine,True);
MS_method.__table__.create(self.engine,True);
MS_component_list.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e);
def update_MSComponents_precursorFormulaAndMass(self,data_I):
'''update rows of ms_components
for columns of precursor_formula and precursor_exact_mass'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(MS_components).filter(
MS_components.met_id.like(d['met_id']),
MS_components.q1_mass == d['q1_mass'],
MS_components.q3_mass == d['q3_mass']).update(
{'precursor_formula':d['precursor_formula'],
'precursor_exactmass':d['precursor_exactmass']},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_MSComponents(self, data_I):
'''add rows of ms_components'''
pgdatatypeconverter = postgresql_dataType_converter();
if data_I:
for d in data_I:
try:
data_add = MS_components(d
#d['q1_mass'],
#d['q3_mass'],
#d['ms3_mass'],
#d['met_name'],
#d['dp'],
#d['ep'],
#d['ce'],
#d['cxp'],
#d['af'],
#d['quantifier'],
#d['ms_mode'],
#d['ion_intensity_rank'],
#d['ion_abundance'],
#d['precursor_formula'],
#d['product_ion_reference'],
#d['product_formula'],
#d['production_ion_notes'],
#d['met_id'],
#d['external_reference'],
#d['q1_mass_units'],
#d['q3_mass_units'],
#d['ms3_mass_units'],
#d['threshold_units'],
#d['dp_units'],
#d['ep_units'],
#d['ce_units'],
#d['cxp_units'],
#d['af_units'],
#d['ms_group'],
#d['threshold'],
#d['dwell_weight'],
#d['component_name'],
#pgdatatypeconverter.convert_text2PostgresqlDataType(d['ms_include']),
#pgdatatypeconverter.convert_text2PostgresqlDataType(d['ms_is']),
#pgdatatypeconverter.convert_text2List(d['precursor_fragment']),
#pgdatatypeconverter.convert_text2List(d['product_fragment']),
#d['precursor_exactmass'],
#d['product_exactmass'],
#d['ms_methodtype'],
#pgdatatypeconverter.convert_text2List(d['precursor_fragment_elements']),
#pgdatatypeconverter.convert_text2List(d['product_fragment_elements'])
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_MSInformation(self, data_I):
'''add rows of ms_information'''
if data_I:
for d in data_I:
try:
data_add = MS_information(d
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_MSSourceParameters(self, data_I):
'''add rows of ms_sourceparameters'''
if data_I:
for d in data_I:
try:
data_add = MS_sourceParameters(d
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_MSComponentList(self, data_I):
'''add rows of ms_component_list'''
if data_I:
for d in data_I:
try:
data_add = MS_component_list(d
#d['ms_method_id'],
#d['q1_mass'],
#d['q3_mass'],
#d['met_id'],
#d['component_name'],
#d['ms_methodtype']
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_MSMethod(self, data_I):
'''add rows of ms_method'''
if data_I:
for d in data_I:
try:
data_add = MS_method(d
#d['id'],
#d['ms_sourceparameters_id'],
#d['ms_information_id'],
#d['ms_experiment_id']
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def get_Q1AndQ3MassAndMode_MSComponents(self,met_id_I):
'''Querry q1 mass, q3 mass, and ms_mode from ms_components'''
try:
mscomponents = self.session.query(MS_components.q1_mass,
MS_components.q3_mass,
MS_components.ms_mode).filter(
MS_components.met_id.like(met_id_I)).order_by(
MS_components.ms_mode.asc(),
MS_components.q1_mass.asc(),
MS_components.q3_mass.asc()).all();
mscomponents_O = [];
for msc in mscomponents:
mscomponents_1 = {};
mscomponents_1['met_id'] = met_id_I;
mscomponents_1['q1_mass'] = msc.q1_mass;
mscomponents_1['q3_mass'] = msc.q3_mass;
mscomponents_1['ms_mode'] = msc.ms_mode;
mscomponents_O.append(mscomponents_1);
return mscomponents_O;
except SQLAlchemyError as e:
print(e);
def get_row_MSComponents(self,met_id_I,ms_mode_I,ms_methodtype_I):
'''Querry row from ms_components by met_id, ms_mode, and ms_methodtype'''
try:
mscomponents = self.session.query(MS_components.q1_mass,MS_components.q3_mass,
MS_components.ms3_mass,MS_components.met_name,MS_components.dp,
MS_components.ep,MS_components.ce,MS_components.cxp,MS_components.af,
MS_components.quantifier,MS_components.ms_mode,MS_components.ion_intensity_rank,
MS_components.ion_abundance,MS_components.precursor_formula,
MS_components.product_ion_reference,MS_components.product_formula,
MS_components.production_ion_notes,MS_components.met_id,
MS_components.external_reference,MS_components.q1_mass_units,
MS_components.q3_mass_units,MS_components.ms3_mass_units,
MS_components.threshold_units,MS_components.dp_units,
MS_components.ep_units,MS_components.ce_units,
MS_components.cxp_units,MS_components.af_units,
MS_components.ms_group,MS_components.threshold,
MS_components.dwell_weight,MS_components.component_name,
MS_components.ms_include,MS_components.ms_is,MS_components.precursor_fragment,
MS_components.product_fragment,MS_components.precursor_exactmass,
MS_components.product_exactmass,MS_components.ms_methodtype).filter(
MS_components.met_id.like(met_id_I),
MS_components.ms_mode.like(ms_mode_I),
MS_components.ms_methodtype.like(ms_methodtype_I)).all();
mscomponents_O = [];
if not mscomponents:
print('bad query for row in ms_components: ')
print('met_id: ' + met_id_I + ', ms_mode_I: ' + ms_mode_I + ', ms_methodtype_I: ' + ms_methodtype_I);
exit(-1)
for msc in mscomponents:
mscomponents_1 = {};
mscomponents_1["q1_mass"] = msc.q1_mass;
mscomponents_1["q3_mass"] = msc.q3_mass;
mscomponents_1["ms3_mass"] = msc.ms3_mass;
mscomponents_1["met_name"] = msc.met_name;
mscomponents_1["dp"] = msc.dp;
mscomponents_1["ep"] = msc.ep;
mscomponents_1["ce"] = msc.ce;
mscomponents_1["cxp"] = msc.cxp;
mscomponents_1["af"] = msc.af;
mscomponents_1["quantifier"] = msc.quantifier;
mscomponents_1["ms_mode"] = msc.ms_mode;
mscomponents_1["ion_intensity_rank"] = msc.ion_intensity_rank;
mscomponents_1["ion_abundance"] = msc.ion_abundance;
mscomponents_1["precursor_formula"] = msc.precursor_formula;
mscomponents_1["product_ion_reference"] = msc.product_ion_reference;
mscomponents_1["product_formula"] = msc.product_formula;
mscomponents_1["production_ion_notes"] = msc.production_ion_notes;
mscomponents_1["met_id"] = msc.met_id;
mscomponents_1["external_reference"] = msc.external_reference;
mscomponents_1["q1_mass_units"] = msc.q1_mass_units;
mscomponents_1["q3_mass_units"] = msc.q3_mass_units;
mscomponents_1["ms3_mass_units"] = msc.ms3_mass_units;
mscomponents_1["threshold_units"] = msc.threshold_units;
mscomponents_1["dp_units"] = msc.dp_units;
mscomponents_1["ep_units"] = msc.ep_units;
mscomponents_1["ce_units"] = msc.ce_units;
mscomponents_1["cxp_units"] = msc.cxp_units;
mscomponents_1["af_units"] = msc.af_units;
mscomponents_1["ms_group"] = msc.ms_group;
mscomponents_1["threshold"] = msc.threshold;
mscomponents_1["dwell_weight"] = msc.dwell_weight;
mscomponents_1["component_name"] = msc.component_name;
mscomponents_1["ms_include"] = msc.ms_include;
mscomponents_1["ms_is"] = msc.ms_is;
mscomponents_1["precursor_fragment"] = msc.precursor_fragment;
mscomponents_1["product_fragment"] = msc.product_fragment;
mscomponents_1["precursor_exactmass"] = msc.precursor_exactmass;
mscomponents_1["product_exactmass"] = msc.product_exactmass;
mscomponents_1["ms_methodtype"] = msc.ms_methodtype;
mscomponents_O.append(mscomponents_1);
return mscomponents_O;
except SQLAlchemyError as e:
print(e);
def get_row_MSComponents_metIDAndFormula(self,met_id_I,precursor_formula_I,
product_formula_I,ms_methodtype_I):
'''Querry row from ms_components by met_id, precursor_formula, product_formula'''
try:
mscomponents = self.session.query(MS_components.q1_mass,MS_components.q3_mass,
MS_components.ms3_mass,MS_components.met_name,MS_components.dp,
MS_components.ep,MS_components.ce,MS_components.cxp,MS_components.af,
MS_components.quantifier,MS_components.ms_mode,MS_components.ion_intensity_rank,
MS_components.ion_abundance,MS_components.precursor_formula,
MS_components.product_ion_reference,MS_components.product_formula,
MS_components.production_ion_notes,MS_components.met_id,
MS_components.external_reference,MS_components.q1_mass_units,
MS_components.q3_mass_units,MS_components.ms3_mass_units,
MS_components.threshold_units,MS_components.dp_units,
MS_components.ep_units,MS_components.ce_units,
MS_components.cxp_units,MS_components.af_units,
MS_components.ms_group,MS_components.threshold,
MS_components.dwell_weight,MS_components.component_name,
MS_components.ms_include,MS_components.ms_is,MS_components.precursor_fragment,
MS_components.product_fragment,MS_components.precursor_exactmass,
MS_components.product_exactmass,MS_components.ms_methodtype).filter(
MS_components.met_id.like(met_id_I),
MS_components.precursor_formula.like(precursor_formula_I),
MS_components.product_formula.like(product_formula_I),
MS_components.ms_methodtype.like(ms_methodtype_I)).all();
mscomponents_O = [];
if not mscomponents:
print('bad query for row in ms_components: ')
print('met_id: ' + met_id_I + ', precursor_formula_I: ' + precursor_formula_I + ', product_formula_I: ' + product_formula_I + ', ms_methodtype_I: ' + ms_methodtype_I);
exit(-1)
for msc in mscomponents:
mscomponents_1 = {};
mscomponents_1["q1_mass"] = msc.q1_mass;
mscomponents_1["q3_mass"] = msc.q3_mass;
mscomponents_1["ms3_mass"] = msc.ms3_mass;
mscomponents_1["met_name"] = msc.met_name;
mscomponents_1["dp"] = msc.dp;
mscomponents_1["ep"] = msc.ep;
mscomponents_1["ce"] = msc.ce;
mscomponents_1["cxp"] = msc.cxp;
mscomponents_1["af"] = msc.af;
mscomponents_1["quantifier"] = msc.quantifier;
mscomponents_1["ms_mode"] = msc.ms_mode;
mscomponents_1["ion_intensity_rank"] = msc.ion_intensity_rank;
mscomponents_1["ion_abundance"] = msc.ion_abundance;
mscomponents_1["precursor_formula"] = msc.precursor_formula;
mscomponents_1["product_ion_reference"] = msc.product_ion_reference;
mscomponents_1["product_formula"] = msc.product_formula;
mscomponents_1["production_ion_notes"] = msc.production_ion_notes;
mscomponents_1["met_id"] = msc.met_id;
mscomponents_1["external_reference"] = msc.external_reference;
mscomponents_1["q1_mass_units"] = msc.q1_mass_units;
mscomponents_1["q3_mass_units"] = msc.q3_mass_units;
mscomponents_1["ms3_mass_units"] = msc.ms3_mass_units;
mscomponents_1["threshold_units"] = msc.threshold_units;
mscomponents_1["dp_units"] = msc.dp_units;
mscomponents_1["ep_units"] = msc.ep_units;
mscomponents_1["ce_units"] = msc.ce_units;
mscomponents_1["cxp_units"] = msc.cxp_units;
mscomponents_1["af_units"] = msc.af_units;
mscomponents_1["ms_group"] = msc.ms_group;
mscomponents_1["threshold"] = msc.threshold;
mscomponents_1["dwell_weight"] = msc.dwell_weight;
mscomponents_1["component_name"] = msc.component_name;
mscomponents_1["ms_include"] = msc.ms_include;
mscomponents_1["ms_is"] = msc.ms_is;
mscomponents_1["precursor_fragment"] = msc.precursor_fragment;
mscomponents_1["product_fragment"] = msc.product_fragment;
mscomponents_1["precursor_exactmass"] = msc.precursor_exactmass;
mscomponents_1["product_exactmass"] = msc.product_exactmass;
mscomponents_1["ms_methodtype"] = msc.ms_methodtype;
mscomponents_O.append(mscomponents_1);
return mscomponents_O[0];
except SQLAlchemyError as e:
print(e);
def get_msGroup_componentName_MSComponents(self,component_name_I):
'''Query component group names from the component name
NOTE: intended to be used within a for loop'''
try:
component_group_name = self.session.query(MS_components.ms_group).filter(
MS_components.component_name.like(component_name_I)).group_by(
MS_components.ms_group).all();
if len(component_group_name)>1:
print('more than 1 component_group_name retrieved per component_name')
component_group_name_O = component_group_name[0].ms_group;
return component_group_name_O;
except SQLAlchemyError as e:
print(e);
# Query data from ms_components
def get_precursorFormulaAndProductFormulaAndCMapsAndPositions_metID(self,met_id_I,ms_mode_I,ms_methodtype_I):
'''Querry precursor formulas for the ms_mode and ms_methodtype experiment'''
try:
component_names = self.session.query(MS_components.precursor_formula,
MS_components.product_formula,
MS_components.precursor_fragment,
MS_components.product_fragment,
MS_components.precursor_fragment_elements,
MS_components.product_fragment_elements).filter(
MS_components.met_id.like(met_id_I),
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.ms_mode.like(ms_mode_I)).group_by(
MS_components.precursor_formula,
MS_components.product_formula,
MS_components.precursor_fragment,
MS_components.product_fragment,
MS_components.precursor_fragment_elements,
MS_components.product_fragment_elements).all();
data_O = {};
if not component_names: exit('bad query result: get_precursorFormulaAndProductFormulaAndCMaps_metID');
for cn in component_names:
data_O[cn.product_formula] = {'fragment':cn.product_fragment,
'fragment_elements':cn.product_fragment_elements};
data_O[cn.precursor_formula] = {'fragment':cn.precursor_fragment,
'fragment_elements':cn.precursor_fragment_elements};
return data_O;
except SQLAlchemyError as e:
print(e);
# query precursor and product formulas from MS_components
def get_precursorAndProductFormulas_metID(self,met_id_I,ms_mode_I,ms_methodtype_I):
'''Querry precursor product formulas for the ms_mode and ms_methodtype experiment'''
try:
component_names = self.session.query(MS_components.precursor_formula,
MS_components.product_formula).filter(
MS_components.met_id.like(met_id_I),
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.ms_mode.like(ms_mode_I)).group_by(
MS_components.precursor_formula,
MS_components.product_formula).order_by(
MS_components.precursor_formula.asc(),
MS_components.product_formula.asc()).all();
precursor_formulas_O = [];
product_formulas_O = [];
if not component_names: exit('bad query result: get_productFormulas_metID');
for cn in component_names:
if cn.product_formula: # skip unknown fragments
precursor_formulas_O.append(cn.precursor_formula);
product_formulas_O.append(cn.product_formula);
return precursor_formulas_O, product_formulas_O;
except SQLAlchemyError as e:
print(e);
# query precursor formula from MS_components
def get_precursorFormula_metID(self,met_id_I,ms_mode_I,ms_methodtype_I):
'''Querry precursor formulas for the ms_mode and ms_methodtype experiment'''
try:
component_names = self.session.query(MS_components.precursor_formula).filter(
MS_components.met_id.like(met_id_I),
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.ms_mode.like(ms_mode_I)).group_by(
MS_components.precursor_formula).all();
precursor_formula_O = None;
if not component_names: exit('bad query result: get_precursorFormula_metID');
for cn in component_names:
precursor_formula_O = cn[0];
return precursor_formula_O;
except SQLAlchemyError as e:
print(e);
def get_precursorFormulaAndProductFormulaAndCMaps_metID(self,met_id_I,ms_mode_I,ms_methodtype_I):
'''Querry precursor/product formulas and fragments for the ms_mode and ms_methodtype experiment'''
try:
component_names = self.session.query(MS_components.precursor_formula,
MS_components.product_formula,
MS_components.precursor_fragment,
MS_components.product_fragment).filter(
MS_components.met_id.like(met_id_I),
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.ms_mode.like(ms_mode_I)).group_by(
MS_components.precursor_formula,
MS_components.product_formula,
MS_components.precursor_fragment,
MS_components.product_fragment).all();
data_O = {};
if not component_names: exit('bad query result: get_precursorFormulaAndProductFormulaAndCMaps_metID');
for cn in component_names:
data_O[cn.product_formula] = cn.product_fragment;
data_O[cn.precursor_formula] = cn.precursor_fragment;
return data_O;
except SQLAlchemyError as e:
print(e);
def get_precursorFormulaAndProductFormula_metID(self,met_id_I,ms_mode_I,ms_methodtype_I):
'''Query the first precursor/product formula for the ms_mode and ms_methodtype experiment'''
try:
component_names = self.session.query(MS_components.precursor_formula,
MS_components.product_formula,
MS_components.q1_mass,
MS_components.q3_mass).filter(
MS_components.met_id.like(met_id_I),
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.ms_mode.like(ms_mode_I)).group_by(
MS_components.precursor_formula,
MS_components.product_formula,
MS_components.q1_mass,
MS_components.q3_mass).order_by(
MS_components.q1_mass.asc(),
MS_components.q3_mass.asc(),
MS_components.precursor_formula.desc(),
MS_components.product_formula.desc()).all();
data_O = {};
product_formula_O = None;
precursor_formula_O = None;
if not component_names: exit('bad query result: get_precursorFormulaAndProductFormula');
# only need the first precursor and product formulas (i.e. monoisotopic)
product_formula_O = component_names[0].product_formula;
precursor_formula_O = component_names[0].precursor_formula;
for cn in component_names:
data_O[cn.product_formula] = cn.q1_mass;
data_O[cn.precursor_formula] = cn.q3_mass;
return precursor_formula_O,product_formula_O;
except SQLAlchemyError as e:
print(e);
def get_metIDs_msModeAndMsMethodType(self,ms_mode_I,ms_methodtype_I):
'''Query met ids for the ms_mode and ms_methodtype experiment'''
try:
component_names = self.session.query(MS_components.met_id).filter(
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.ms_mode.like(ms_mode_I)).group_by(
MS_components.met_id).order_by(
MS_components.met_id.asc()).all();
met_id_O = [];
if not component_names: exit('bad query result: get_metIDs_msModeAndMsMethodType');
for cn in component_names:
met_id_O.append(cn.met_id);
return met_id_O;
except SQLAlchemyError as e:
print(e);
|
mit
|
flh/odoo
|
addons/website_sale/models/sale_order.py
|
8
|
10376
|
# -*- coding: utf-8 -*-
import random
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.addons.web.http import request
class payment_transaction(orm.Model):
_inherit = 'payment.transaction'
_columns = {
# link with the sale order
'sale_order_id': fields.many2one('sale.order', 'Sale Order'),
}
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_qty(self, cr, uid, ids, field_name, arg, context=None):
res = dict()
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = int(sum(l.product_uom_qty for l in (order.website_order_line or [])))
return res
_columns = {
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'),
'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null'),
'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null'),
}
def _get_errors(self, cr, uid, order, context=None):
return []
def _get_website_data(self, cr, uid, order, context):
return {
'partner': order.partner_id.id,
'order': order
}
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
for so in self.browse(cr, uid, ids, context=context):
domain = [('order_id', '=', so.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, line_id=None, context=None):
so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [],
pricelist=so.pricelist_id.id,
product=product_id,
partner_id=so.partner_id.id,
context=context
)['value']
if line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
values['name'] = line.name
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
values['name'] = product.description_sale or product.name
values['product_id'] = product_id
values['order_id'] = order_id
if values.get('tax_id') != None:
values['tax_id'] = [(6, 0, values['tax_id'])]
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
sol = self.pool.get('sale.order.line')
quantity = 0
for so in self.browse(cr, uid, ids, context=context):
if line_id != False:
line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs)
if line_ids:
line_id = line_ids[0]
# Create line if no line with product_id can be located
if not line_id:
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, context=context)
line_id = sol.create(cr, SUPERUSER_ID, values, context=context)
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty != None:
quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
sol.unlink(cr, SUPERUSER_ID, [line_id], context=context)
else:
# update line
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, line_id, context=context)
values['product_uom_qty'] = quantity
sol.write(cr, SUPERUSER_ID, [line_id], values, context=context)
return {'line_id': line_id, 'quantity': quantity}
def _cart_accessories(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or []))
s -= set(l.product_id.id for l in order.order_line)
product_ids = random.sample(s, min(len(s),3))
return self.pool['product.product'].browse(cr, uid, product_ids, context=context)
class website(orm.Model):
_inherit = 'website'
_columns = {
'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist',
type='many2one', relation='product.pricelist', string='Default Pricelist'),
'currency_id': fields.related('pricelist_id','currency_id',
type='many2one', relation='res.currency', string='Default Currency'),
}
def sale_product_domain(self, cr, uid, ids, context=None):
return [("sale_ok", "=", True)]
def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None):
sale_order_obj = self.pool['sale.order']
sale_order_id = request.session.get('sale_order_id')
sale_order = None
# create so if needed
if not sale_order_id and (force_create or code):
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
for w in self.browse(cr, uid, ids):
values = {
'user_id': w.user_id.id,
'partner_id': partner.id,
'pricelist_id': partner.property_product_pricelist.id,
'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1],
}
sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context)
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value']
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
request.session['sale_order_id'] = sale_order_id
if sale_order_id:
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context)
if not sale_order.exists():
request.session['sale_order_id'] = None
return None
# check for change of pricelist with a coupon
if code and code != sale_order.pricelist_id.code:
pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context)
if pricelist_ids:
pricelist_id = pricelist_ids[0]
request.session['sale_order_code_pricelist_id'] = pricelist_id
update_pricelist = True
request.session['sale_order_code_pricelist_id'] = False
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
# check for change of partner_id ie after signup
if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id:
flag_pricelist = False
if pricelist_id != sale_order.pricelist_id.id:
flag_pricelist = True
fiscal_position = sale_order.fiscal_position and sale_order.fiscal_position.id or False
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [sale_order_id], partner.id, context=context)['value']
order_lines = map(int,sale_order.order_line)
values.update(sale_order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [],
values['fiscal_position'], [[6, 0, order_lines]], context=context)['value'])
values['partner_id'] = partner.id
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
if flag_pricelist or values.get('fiscal_position') != fiscal_position:
update_pricelist = True
# update the pricelist
if update_pricelist:
values = {'pricelist_id': pricelist_id}
values.update(sale_order.onchange_pricelist_id(pricelist_id, None)['value'])
sale_order.write(values)
for line in sale_order.order_line:
sale_order._cart_update(product_id=line.product_id.id, add_qty=0)
# update browse record
if (code and code != sale_order.pricelist_id.code) or sale_order.partner_id.id != partner.id:
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order.id, context=context)
return sale_order
def sale_get_transaction(self, cr, uid, ids, context=None):
transaction_obj = self.pool.get('payment.transaction')
tx_id = request.session.get('sale_transaction_id')
if tx_id:
tx_ids = transaction_obj.search(cr, uid, [('id', '=', tx_id), ('state', 'not in', ['cancel'])], context=context)
if tx_ids:
return transaction_obj.browse(cr, uid, tx_ids[0], context=context)
else:
request.session['sale_transaction_id'] = False
return False
def sale_reset(self, cr, uid, ids, context=None):
request.session.update({
'sale_order_id': False,
'sale_transaction_id': False,
'sale_order_code_pricelist_id': False,
})
|
agpl-3.0
|
errx/django
|
tests/m2m_and_m2o/tests.py
|
61
|
2463
|
from django.db.models import Q
from django.test import TestCase
from .models import Issue, User, UnicodeReferenceModel
class RelatedObjectTests(TestCase):
def test_m2m_and_m2o(self):
r = User.objects.create(username="russell")
g = User.objects.create(username="gustav")
i1 = Issue(num=1)
i1.client = r
i1.save()
i2 = Issue(num=2)
i2.client = r
i2.save()
i2.cc.add(r)
i3 = Issue(num=3)
i3.client = g
i3.save()
i3.cc.add(r)
self.assertQuerysetEqual(
Issue.objects.filter(client=r.id), [
1,
2,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(client=g.id), [
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=g.id), []
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=r.id), [
2,
3,
],
lambda i: i.num
)
# These queries combine results from the m2m and the m2o relationships.
# They're three ways of saying the same thing.
self.assertQuerysetEqual(
Issue.objects.filter(Q(cc__id__exact=r.id) | Q(client=r.id)), [
1,
2,
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(cc__id__exact=r.id) | Issue.objects.filter(client=r.id), [
1,
2,
3,
],
lambda i: i.num
)
self.assertQuerysetEqual(
Issue.objects.filter(Q(client=r.id) | Q(cc__id__exact=r.id)), [
1,
2,
3,
],
lambda i: i.num
)
class RelatedObjectUnicodeTests(TestCase):
def test_m2m_with_unicode_reference(self):
"""
Regression test for #6045: references to other models can be unicode
strings, providing they are directly convertible to ASCII.
"""
m1 = UnicodeReferenceModel.objects.create()
m2 = UnicodeReferenceModel.objects.create()
m2.others.add(m1) # used to cause an error (see ticket #6045)
m2.save()
list(m2.others.all()) # Force retrieval.
|
bsd-3-clause
|
vrv/tensorflow
|
tensorflow/python/saved_model/constants.py
|
114
|
1994
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for SavedModel save and restore operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.all_util import remove_undocumented
# Subdirectory name containing the asset files.
ASSETS_DIRECTORY = "assets"
# CollectionDef key containing SavedModel assets.
ASSETS_KEY = "saved_model_assets"
# CollectionDef key for the legacy init op.
LEGACY_INIT_OP_KEY = "legacy_init_op"
# CollectionDef key for the SavedModel main op.
MAIN_OP_KEY = "saved_model_main_op"
# Schema version for SavedModel.
SAVED_MODEL_SCHEMA_VERSION = 1
# File name for SavedModel protocol buffer.
SAVED_MODEL_FILENAME_PB = "saved_model.pb"
# File name for text version of SavedModel protocol buffer.
SAVED_MODEL_FILENAME_PBTXT = "saved_model.pbtxt"
# Subdirectory name containing the variables/checkpoint files.
VARIABLES_DIRECTORY = "variables"
# File name used for variables.
VARIABLES_FILENAME = "variables"
_allowed_symbols = [
"ASSETS_DIRECTORY",
"ASSETS_KEY",
"LEGACY_INIT_OP_KEY",
"MAIN_OP_KEY",
"SAVED_MODEL_SCHEMA_VERSION",
"SAVED_MODEL_FILENAME_PB",
"SAVED_MODEL_FILENAME_PBTXT",
"VARIABLES_DIRECTORY",
"VARIABLES_FILENAME",
]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
steveklabnik/servo
|
tests/jquery/run_jquery.py
|
215
|
9582
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import subprocess
import sys
import BaseHTTPServer
import SimpleHTTPServer
import SocketServer
import threading
import urlparse
# List of jQuery modules that will be tested.
# TODO(gw): Disabled most of them as something has been
# introduced very recently that causes the resource task
# to panic - and hard fail doesn't exit the servo
# process when this happens.
# See https://github.com/servo/servo/issues/6210 and
# https://github.com/servo/servo/issues/6211
JQUERY_MODULES = [
# "ajax", # panics
# "attributes",
# "callbacks",
# "core", # mozjs crash
# "css",
# "data",
# "deferred",
# "dimensions",
# "effects",
# "event", # panics
# "manipulation", # mozjs crash
# "offset",
# "queue",
"selector",
# "serialize",
# "support",
# "traversing",
# "wrap"
]
# Port to run the HTTP server on for jQuery.
TEST_SERVER_PORT = 8192
# A regex for matching console.log output lines from the test runner.
REGEX_PATTERN = "^\[jQuery test\] \[([0-9]+)/([0-9]+)/([0-9]+)] (.*)"
# The result of a single test group.
class TestResult:
def __init__(self, success, fail, total, text):
self.success = int(success)
self.fail = int(fail)
self.total = int(total)
self.text = text
def __key(self):
return (self.success, self.fail, self.total, self.text)
def __eq__(self, other):
return self.__key() == other.__key()
def __ne__(self, other):
return self.__key() != other.__key()
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return "ok={0} fail={1} total={2}".format(self.success, self.fail, self.total)
# Parse a line, producing a TestResult.
# Throws if unable to parse.
def parse_line_to_result(line):
match = re.match(REGEX_PATTERN, line)
success, fail, total, name = match.groups()
return name, TestResult(success, fail, total, line)
# Parse an entire buffer of lines to a dictionary
# of test results, keyed by the test name.
def parse_string_to_results(buffer):
test_results = {}
lines = buffer.splitlines()
for line in lines:
name, test_result = parse_line_to_result(line)
test_results[name] = test_result
return test_results
# Run servo and print / parse the results for a specific jQuery test module.
def run_servo(servo_exe, module):
url = "http://localhost:{0}/jquery/test/?module={1}".format(TEST_SERVER_PORT, module)
args = [servo_exe, url, "-z", "-f"]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if len(line) == 0:
break
line = line.rstrip()
try:
name, test_result = parse_line_to_result(line)
yield name, test_result
except AttributeError:
pass
# Build the filename for an expected results file.
def module_filename(module):
return 'expected_{0}.txt'.format(module)
# Read an existing set of expected results to compare against.
def read_existing_results(module):
with open(module_filename(module), 'r') as file:
buffer = file.read()
return parse_string_to_results(buffer)
# Write a set of results to file
def write_results(module, results):
with open(module_filename(module), 'w') as file:
for result in test_results.itervalues():
file.write(result.text + '\n')
# Print usage if command line args are incorrect
def print_usage():
print("USAGE: {0} test|update servo_binary jquery_base_dir".format(sys.argv[0]))
# Run a simple HTTP server to serve up the jQuery test suite
def run_http_server():
class ThreadingSimpleServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
allow_reuse_address = True
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# TODO(gw): HACK copy the fixed version from python
# main repo - due to https://bugs.python.org/issue23112
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
parts = urlparse.urlsplit(self.path)
if not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urlparse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def log_message(self, format, *args):
return
server = ThreadingSimpleServer(('', TEST_SERVER_PORT), RequestHandler)
while True:
sys.stdout.flush()
server.handle_request()
if __name__ == '__main__':
if len(sys.argv) == 4:
cmd = sys.argv[1]
servo_exe = sys.argv[2]
base_dir = sys.argv[3]
os.chdir(base_dir)
# Ensure servo binary can be found
if not os.path.isfile(servo_exe):
print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe))
sys.exit(1)
# Start the test server
httpd_thread = threading.Thread(target=run_http_server)
httpd_thread.setDaemon(True)
httpd_thread.start()
if cmd == "test":
print("Testing jQuery on Servo!")
test_count = 0
unexpected_count = 0
individual_success = 0
individual_total = 0
# Test each module separately
for module in JQUERY_MODULES:
print("\t{0}".format(module))
prev_test_results = read_existing_results(module)
for name, current_result in run_servo(servo_exe, module):
test_count += 1
individual_success += current_result.success
individual_total += current_result.total
# If this test was in the previous results, compare them.
if name in prev_test_results:
prev_result = prev_test_results[name]
if prev_result == current_result:
print("\t\tOK: {0}".format(name))
else:
unexpected_count += 1
print("\t\tFAIL: {0}: WAS {1} NOW {2}".format(name, prev_result, current_result))
del prev_test_results[name]
else:
# There was a new test that wasn't expected
unexpected_count += 1
print("\t\tNEW: {0}".format(current_result.text))
# Check what's left over, these are tests that were expected but didn't run this time.
for name in prev_test_results:
test_count += 1
unexpected_count += 1
print("\t\tMISSING: {0}".format(prev_test_results[name].text))
print("\tRan {0} test groups. {1} unexpected results.".format(test_count, unexpected_count))
print("\t{0} tests succeeded of {1} ({2:.2f}%)".format(individual_success,
individual_total,
100.0 * individual_success / individual_total))
if unexpected_count > 0:
sys.exit(1)
elif cmd == "update":
print("Updating jQuery expected results")
for module in JQUERY_MODULES:
print("\t{0}".format(module))
test_results = {}
for name, test_result in run_servo(servo_exe, module):
print("\t\t{0} {1}".format(name, test_result))
test_results[name] = test_result
write_results(module, test_results)
else:
print_usage()
else:
print_usage()
|
mpl-2.0
|
Almad/sneakylang
|
sneakylang/test/test_register.py
|
2
|
5586
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test context-sensitive Parser registry.
"""
from unittest import TestCase
#logging.basicConfig(level=logging.DEBUG)
from sneakylang.register import *
from sneakylang.macro import Macro
from sneakylang.parser import Parser
from sneakylang.treebuilder import TreeBuilder
class DummyMacro(Macro):
name = 'dummy_macro'
class DummyParser(Parser):
start = ['(#){4}']
macro = DummyMacro
class DummyParserWithTwoPossibleStarts(Parser):
start = ['^(####)$', '^(||||)$']
macro = DummyMacro
class AnotherDummyMacro(Macro):
name = 'another_dummy_macro'
class AnotherDummyParser(Parser):
start = ['--']
macro = AnotherDummyMacro
class NationalParser(Parser):
start = ['(\w){3}']
macro = AnotherDummyMacro
class NotAllowedParserCreatingCollisionWithMacro(Parser):
# already in register
start = ['^(\(){2}$']
class NotAllowedParserHavingBadRegexp(Parser):
# already in register
start = ['(\(){2}$']
class NotAllowedParserHavingBadRegexp2(Parser):
# already in register
start = ['^(\(){2}']
class TestParserRegister(TestCase):
def testProperRegexpRetrieving(self):
reg = ParserRegister([DummyParser])
self.assertEquals(DummyParser, reg.get_parser('(#){4}'))
r2 = ParserRegister([DummyParserWithTwoPossibleStarts])
self.assertEquals(DummyParserWithTwoPossibleStarts, r2.get_parser('^(####)$'))
self.assertEquals(DummyParserWithTwoPossibleStarts, r2.get_parser('^(||||)$'))
self.assertRaises(ValueError, lambda:r2.get_parser('some regexp that do not exists'))
def testRetrievingFromStream(self):
reg = ParserRegister([DummyParser])
self.assertEquals(reg.resolve_parser('####', Register()).__class__, DummyParser)
def testResolvingOfOverlappingMacrosFromStream(self):
class DummyMacroTwo(Macro):
name = 'dummy_macro_two'
class DummyParserTwo(Parser):
start = ['^(#####)$']
macro = DummyMacroTwo
reg = ParserRegister([DummyParser, DummyParserTwo])
self.assertEquals(reg.resolve_parser('#### 123', Register()).__class__, DummyParser)
self.assertEquals(reg.resolve_parser('#####', Register()).__class__, DummyParserTwo)
def testResolvingOfOverlappingMacrosFromStreamGivenInOtherOrder(self):
class DummyMacroTwo(Macro):
name = 'dummy_macro_two'
class DummyParserTwo(Parser):
start = ['^(#####)$']
macro = DummyMacroTwo
reg = ParserRegister([DummyParserTwo, DummyParser])
self.assertEquals(reg.resolve_parser('#### 123', Register()).__class__, DummyParser)
self.assertEquals(reg.resolve_parser('#####', Register()).__class__, DummyParserTwo)
def testProperResolvingWithNationalChars(self):
reg = ParserRegister([NationalParser])
self.assertEquals(reg.resolve_parser('žšť', Register()).__class__, NationalParser)
def testAcceptingUnicodeString(self):
reg = ParserRegister([NationalParser])
self.assertEquals(reg.resolve_parser(u'žšť', Register()).__class__, NationalParser)
class TestRegister(TestCase):
def setUp(self):
self.r = Register()
self.builder = TreeBuilder()
def testMacroHolding(self):
self.r.add(DummyMacro)
self.assertEquals(DummyMacro, self.r.get_macro('dummy_macro'))
self.r.add_parser(AnotherDummyParser)
self.assertEquals(False, AnotherDummyParser.start[0] in self.r.parser_register.parser_start)
self.assertEquals((None,None), self.r.resolve_macro('####', self.builder))
self.r.add_parser(DummyParser)
self.assertEquals(DummyMacro, self.r.resolve_macro('####', self.builder)[0].__class__)
self.r.add_parser(AnotherDummyParser)
self.assertEquals((None,None), self.r.resolve_macro('--', self.builder))
self.r.add(AnotherDummyMacro)
self.assertEquals((None,None), self.r.resolve_macro('--', self.builder))
self.r.add_parser(AnotherDummyParser)
self.assertEquals(AnotherDummyMacro, self.r.resolve_macro('--', self.builder)[0].__class__)
def testEasyParserAdding(self):
reg = Register([DummyMacro, AnotherDummyMacro], [DummyParser, AnotherDummyParser])
self.assertEquals(DummyMacro, reg.resolve_macro('####', self.builder)[0].__class__)
self.assertEquals(AnotherDummyMacro, reg.resolve_macro('--', self.builder)[0].__class__)
def testNotAddingParserWhichHasNotMacroAlreadyInRegister(self):
reg = Register([DummyMacro], [DummyParser])
self.assertEquals(False, AnotherDummyParser.start[0] in reg.parser_register.parser_start)
self.assertEquals((None, None), reg.resolve_macro('--', self.builder))
def testNotAddingParserWhichHasNotMacroAlreadyInRegisterWithEasyAdding(self):
reg = Register([DummyMacro], [DummyParser, AnotherDummyParser])
self.assertEquals(False, AnotherDummyParser.start[0] in reg.parser_register.parser_start)
self.assertEquals((None, None), reg.resolve_macro('--', self.builder))
class TestRegisterMap(TestCase):
def testProperVisit(self):
map = RegisterMap()
map[DummyParser] = Register([])
self.assertEquals(map[DummyParser].register_map, map)
self.assertEquals(repr(map[DummyParser].register_map), repr(map))
map = RegisterMap({DummyParser : Register([])})
self.assertEquals(map[DummyParser].register_map, map)
self.assertEquals(repr(map[DummyParser].register_map), repr(map))
|
bsd-3-clause
|
openstack/sahara
|
sahara/cli/image_pack/api.py
|
1
|
4206
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara import conductor # noqa
from sahara.plugins import base as plugins_base
from sahara.utils import remote
try:
import guestfs
except ImportError:
raise Exception("The image packing API depends on the system package "
"python-libguestfs (and libguestfs itself.) Please "
"install these packages to proceed.")
LOG = None
CONF = None
# This is broken out to support testability
def set_logger(log):
global LOG
LOG = log
# This is broken out to support testability
def set_conf(conf):
global CONF
CONF = conf
# This is a local exception class that is used to exit routines
# in cases where error information has already been logged.
# It is caught and suppressed everywhere it is used.
class Handled(Exception):
pass
class Context(object):
'''Create a pseudo Context object
Since this tool does not use the REST interface, we
do not have a request from which to build a Context.
'''
def __init__(self, is_admin=False, tenant_id=None):
self.is_admin = is_admin
self.tenant_id = tenant_id
class ImageRemote(remote.TerminalOnlyRemote):
def __init__(self, image_path, root_drive):
guest = guestfs.GuestFS(python_return_dict=True)
guest.add_drive_opts(image_path, format="qcow2")
guest.set_network(True)
self.guest = guest
self.root_drive = root_drive
def __enter__(self):
self.guest.launch()
if not self.root_drive:
self.root_drive = self.guest.inspect_os()[0]
self.guest.mount(self.root_drive, '/')
try:
cmd = "echo Testing sudo without tty..."
self.execute_command(cmd, run_as_root=True)
except RuntimeError:
cmd = "sed -i 's/requiretty/!requiretty/' /etc/sudoers"
self.guest.execute_command(cmd)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.guest.sync()
self.guest.umount_all()
self.guest.close()
def execute_command(self, cmd, run_as_root=False, get_stderr=False,
raise_when_error=True, timeout=300):
try:
LOG.info("Issuing command: {cmd}".format(cmd=cmd))
stdout = self.guest.sh(cmd)
LOG.info("Received response: {stdout}".format(stdout=stdout))
return 0, stdout
except RuntimeError as ex:
if raise_when_error:
raise
else:
return 1, ex.message
def get_os_distrib(self):
return self.guest.inspect_get_distro(self.root_drive)
def write_file_to(self, path, script, run_as_root):
LOG.info("Writing script to : {path}".format(path=path))
stdout = self.guest.write(path, script)
return 0, stdout
def setup_plugins():
plugins_base.setup_plugins()
def get_loaded_plugins():
return plugins_base.PLUGINS.plugins
def get_plugin_arguments(plugin_name):
"""Gets plugin arguments, as a dict of version to argument list."""
plugin = plugins_base.PLUGINS.get_plugin(plugin_name)
versions = plugin.get_versions()
return {version: plugin.get_image_arguments(version)
for version in versions}
def pack_image(image_path, plugin_name, plugin_version, image_arguments,
root_drive=None, test_only=False):
with ImageRemote(image_path, root_drive) as image_remote:
plugin = plugins_base.PLUGINS.get_plugin(plugin_name)
plugin.pack_image(plugin_version, image_remote, test_only=test_only,
image_arguments=image_arguments)
|
apache-2.0
|
rit-sse/openbci
|
openbci_2015/scripts/udp_server.py
|
6
|
2039
|
"""A server that handles a connection with an OpenBCI board and serves that
data over both a UDP socket server and a WebSocket server.
Requires:
- pyserial
- asyncio
- websockets
"""
import argparse
import cPickle as pickle
import json
import sys; sys.path.append('..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as open_bci
import socket
parser = argparse.ArgumentParser(
description='Run a UDP server streaming OpenBCI data.')
parser.add_argument(
'--json',
action='store_true',
help='Send JSON data rather than pickled Python objects.')
parser.add_argument(
'--filter_data',
action='store_true',
help='Enable onboard filtering.')
parser.add_argument(
'--host',
help='The host to listen on.',
default='127.0.0.1')
parser.add_argument(
'--port',
help='The port to listen on.',
default='8888')
parser.add_argument(
'--serial',
help='The serial port to communicate with the OpenBCI board.',
default='/dev/tty.usbmodem1421')
parser.add_argument(
'--baud',
help='The baud of the serial connection with the OpenBCI board.',
default='115200')
class UDPServer(object):
def __init__(self, ip, port, json):
self.ip = ip
self.port = port
self.json = json
print "Selecting raw UDP streaming. IP: ", self.ip, ", port: ", str(self.port)
self.server = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM)
def send_data(self, data):
self.server.sendto(data, (self.ip, self.port))
def handle_sample(self, sample):
if self.json:
# Just send channel data.
self.send_data(json.dumps(sample.channel_data))
else:
# Pack up and send the whole OpenBCISample object.
self.send_data(pickle.dumps(sample))
args = parser.parse_args()
obci = open_bci.OpenBCIBoard(args.serial, int(args.baud))
if args.filter_data:
obci.filter_data = True
sock_server = UDPServer(args.host, int(args.port), args.json)
obci.start_streaming(sock_server.handle_sample)
|
mit
|
js0701/chromium-crosswalk
|
tools/metrics/histograms/extract_histograms.py
|
40
|
16108
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extract histogram names from the description XML file.
For more information on the format of the XML file, which is self-documenting,
see histograms.xml; however, here is a simple example to get you started. The
XML below will generate the following five histograms:
HistogramTime
HistogramEnum
HistogramEnum_Chrome
HistogramEnum_IE
HistogramEnum_Firefox
<histogram-configuration>
<histograms>
<histogram name="HistogramTime" units="milliseconds">
<summary>A brief description.</summary>
<details>This is a more thorough description of this histogram.</details>
</histogram>
<histogram name="HistogramEnum" enum="MyEnumType">
<summary>This histogram sports an enum value type.</summary>
</histogram>
</histograms>
<enums>
<enum name="MyEnumType">
<summary>This is an example enum type, where the values mean little.</summary>
<int value="1" label="FIRST_VALUE">This is the first value.</int>
<int value="2" label="SECOND_VALUE">This is the second value.</int>
</enum>
</enums>
<histogram_suffixes_list>
<histogram_suffixes name="BrowserType">
<suffix name="Chrome"/>
<suffix name="IE"/>
<suffix name="Firefox"/>
<affected-histogram name="HistogramEnum"/>
</histogram_suffixes>
</histogram_suffixes_list>
</histogram-configuration>
"""
import copy
import logging
import xml.dom.minidom
OWNER_FIELD_PLACEHOLDER = (
'Please list the metric\'s owners. Add more owner tags as needed.')
MAX_HISTOGRAM_SUFFIX_DEPENDENCY_DEPTH = 5
class Error(Exception):
pass
def _JoinChildNodes(tag):
"""Join child nodes into a single text.
Applicable to leafs like 'summary' and 'detail'.
Args:
tag: parent node
Returns:
a string with concatenated nodes' text representation.
"""
return ''.join(c.toxml() for c in tag.childNodes).strip()
def _NormalizeString(s):
"""Replaces all whitespace sequences with a single space.
The function properly handles multi-line strings.
Args:
s: The string to normalize, (' \\n a b c\\n d ').
Returns:
The normalized string (a b c d).
"""
return ' '.join(s.split())
def _NormalizeAllAttributeValues(node):
"""Recursively normalizes all tag attribute values in the given tree.
Args:
node: The minidom node to be normalized.
Returns:
The normalized minidom node.
"""
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
for a in node.attributes.keys():
node.attributes[a].value = _NormalizeString(node.attributes[a].value)
for c in node.childNodes:
_NormalizeAllAttributeValues(c)
return node
def _ExpandHistogramNameWithSuffixes(suffix_name, histogram_name,
histogram_suffixes_node):
"""Creates a new histogram name based on a histogram suffix.
Args:
suffix_name: The suffix string to apply to the histogram name. May be empty.
histogram_name: The name of the histogram. May be of the form
Group.BaseName or BaseName.
histogram_suffixes_node: The histogram_suffixes XML node.
Returns:
A string with the expanded histogram name.
Raises:
Error: if the expansion can't be done.
"""
if histogram_suffixes_node.hasAttribute('separator'):
separator = histogram_suffixes_node.getAttribute('separator')
else:
separator = '_'
if histogram_suffixes_node.hasAttribute('ordering'):
ordering = histogram_suffixes_node.getAttribute('ordering')
else:
ordering = 'suffix'
if ordering not in ['prefix', 'suffix']:
logging.error('ordering needs to be prefix or suffix, value is %s',
ordering)
raise Error()
if not suffix_name:
return histogram_name
if ordering == 'suffix':
return histogram_name + separator + suffix_name
# For prefixes, the suffix_name is inserted between the "cluster" and the
# "remainder", e.g. Foo.BarHist expanded with gamma becomes Foo.gamma_BarHist.
sections = histogram_name.split('.')
if len(sections) <= 1:
logging.error(
'Prefix Field Trial expansions require histogram names which include a '
'dot separator. Histogram name is %s, and Field Trial is %s',
histogram_name, histogram_suffixes_node.getAttribute('name'))
raise Error()
cluster = sections[0] + '.'
remainder = '.'.join(sections[1:])
return cluster + suffix_name + separator + remainder
def _ExtractEnumsFromXmlTree(tree):
"""Extract all <enum> nodes in the tree into a dictionary."""
enums = {}
have_errors = False
last_name = None
for enum in tree.getElementsByTagName('enum'):
if enum.getAttribute('type') != 'int':
logging.error('Unknown enum type %s', enum.getAttribute('type'))
have_errors = True
continue
name = enum.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('Enums %s and %s are not in alphabetical order',
last_name, name)
have_errors = True
last_name = name
if name in enums:
logging.error('Duplicate enum %s', name)
have_errors = True
continue
last_int_value = None
enum_dict = {}
enum_dict['name'] = name
enum_dict['values'] = {}
for int_tag in enum.getElementsByTagName('int'):
value_dict = {}
int_value = int(int_tag.getAttribute('value'))
if last_int_value is not None and int_value < last_int_value:
logging.error('Enum %s int values %d and %d are not in numerical order',
name, last_int_value, int_value)
have_errors = True
last_int_value = int_value
if int_value in enum_dict['values']:
logging.error('Duplicate enum value %d for enum %s', int_value, name)
have_errors = True
continue
value_dict['label'] = int_tag.getAttribute('label')
value_dict['summary'] = _JoinChildNodes(int_tag)
enum_dict['values'][int_value] = value_dict
summary_nodes = enum.getElementsByTagName('summary')
if summary_nodes:
enum_dict['summary'] = _NormalizeString(_JoinChildNodes(summary_nodes[0]))
enums[name] = enum_dict
return enums, have_errors
def _ExtractOwners(xml_node):
"""Extract all owners into a list from owner tag under |xml_node|."""
owners = []
for owner_node in xml_node.getElementsByTagName('owner'):
owner_entry = _NormalizeString(_JoinChildNodes(owner_node))
if OWNER_FIELD_PLACEHOLDER not in owner_entry:
owners.append(owner_entry)
return owners
def _ExtractHistogramsFromXmlTree(tree, enums):
"""Extract all <histogram> nodes in the tree into a dictionary."""
# Process the histograms. The descriptions can include HTML tags.
histograms = {}
have_errors = False
last_name = None
for histogram in tree.getElementsByTagName('histogram'):
name = histogram.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('Histograms %s and %s are not in alphabetical order',
last_name, name)
have_errors = True
last_name = name
if name in histograms:
logging.error('Duplicate histogram definition %s', name)
have_errors = True
continue
histograms[name] = histogram_entry = {}
# Find <owner> tag.
owners = _ExtractOwners(histogram)
if owners:
histogram_entry['owners'] = owners
# Find <summary> tag.
summary_nodes = histogram.getElementsByTagName('summary')
if summary_nodes:
histogram_entry['summary'] = _NormalizeString(
_JoinChildNodes(summary_nodes[0]))
else:
histogram_entry['summary'] = 'TBD'
# Find <obsolete> tag.
obsolete_nodes = histogram.getElementsByTagName('obsolete')
if obsolete_nodes:
reason = _JoinChildNodes(obsolete_nodes[0])
histogram_entry['obsolete'] = reason
# Handle units.
if histogram.hasAttribute('units'):
histogram_entry['units'] = histogram.getAttribute('units')
# Find <details> tag.
details_nodes = histogram.getElementsByTagName('details')
if details_nodes:
histogram_entry['details'] = _NormalizeString(
_JoinChildNodes(details_nodes[0]))
# Handle enum types.
if histogram.hasAttribute('enum'):
enum_name = histogram.getAttribute('enum')
if enum_name not in enums:
logging.error('Unknown enum %s in histogram %s', enum_name, name)
have_errors = True
else:
histogram_entry['enum'] = enums[enum_name]
return histograms, have_errors
# Finds an <obsolete> node amongst |node|'s immediate children and returns its
# content as a string. Returns None if no such node exists.
def _GetObsoleteReason(node):
for child in node.childNodes:
if child.localName == 'obsolete':
# There can be at most 1 obsolete element per node.
return _JoinChildNodes(child)
return None
def _UpdateHistogramsWithSuffixes(tree, histograms):
"""Process <histogram_suffixes> tags and combine with affected histograms.
The histograms dictionary will be updated in-place by adding new histograms
created by combining histograms themselves with histogram_suffixes targeting
these histograms.
Args:
tree: XML dom tree.
histograms: a dictionary of histograms previously extracted from the tree;
Returns:
True if any errors were found.
"""
have_errors = False
histogram_suffix_tag = 'histogram_suffixes'
suffix_tag = 'suffix'
with_tag = 'with-suffix'
# Verify order of histogram_suffixes fields first.
last_name = None
for histogram_suffixes in tree.getElementsByTagName(histogram_suffix_tag):
name = histogram_suffixes.getAttribute('name')
if last_name is not None and name.lower() < last_name.lower():
logging.error('histogram_suffixes %s and %s are not in alphabetical '
'order', last_name, name)
have_errors = True
last_name = name
# histogram_suffixes can depend on other histogram_suffixes, so we need to be
# careful. Make a temporary copy of the list of histogram_suffixes to use as a
# queue. histogram_suffixes whose dependencies have not yet been processed
# will get relegated to the back of the queue to be processed later.
reprocess_queue = []
def GenerateHistogramSuffixes():
for f in tree.getElementsByTagName(histogram_suffix_tag):
yield 0, f
for r, f in reprocess_queue:
yield r, f
for reprocess_count, histogram_suffixes in GenerateHistogramSuffixes():
# Check dependencies first
dependencies_valid = True
affected_histograms = histogram_suffixes.getElementsByTagName(
'affected-histogram')
for affected_histogram in affected_histograms:
histogram_name = affected_histogram.getAttribute('name')
if histogram_name not in histograms:
# Base histogram is missing
dependencies_valid = False
missing_dependency = histogram_name
break
if not dependencies_valid:
if reprocess_count < MAX_HISTOGRAM_SUFFIX_DEPENDENCY_DEPTH:
reprocess_queue.append((reprocess_count + 1, histogram_suffixes))
continue
else:
logging.error('histogram_suffixes %s is missing its dependency %s',
histogram_suffixes.getAttribute('name'),
missing_dependency)
have_errors = True
continue
# If the suffix group has an obsolete tag, all suffixes it generates inherit
# its reason.
group_obsolete_reason = _GetObsoleteReason(histogram_suffixes)
name = histogram_suffixes.getAttribute('name')
suffix_nodes = histogram_suffixes.getElementsByTagName(suffix_tag)
suffix_labels = {}
for suffix in suffix_nodes:
suffix_labels[suffix.getAttribute('name')] = suffix.getAttribute('label')
# Find owners list under current histogram_suffixes tag.
owners = _ExtractOwners(histogram_suffixes)
last_histogram_name = None
for affected_histogram in affected_histograms:
histogram_name = affected_histogram.getAttribute('name')
if (last_histogram_name is not None
and histogram_name.lower() < last_histogram_name.lower()):
logging.error('Affected histograms %s and %s of histogram_suffixes %s '
'are not in alphabetical order',
last_histogram_name, histogram_name, name)
have_errors = True
last_histogram_name = histogram_name
with_suffixes = affected_histogram.getElementsByTagName(with_tag)
if with_suffixes:
suffixes_to_add = with_suffixes
else:
suffixes_to_add = suffix_nodes
for suffix in suffixes_to_add:
suffix_name = suffix.getAttribute('name')
try:
new_histogram_name = _ExpandHistogramNameWithSuffixes(
suffix_name, histogram_name, histogram_suffixes)
if new_histogram_name != histogram_name:
histograms[new_histogram_name] = copy.deepcopy(
histograms[histogram_name])
suffix_label = suffix_labels.get(suffix_name, '')
# TODO(yiyaoliu): Rename these to be consistent with the new naming.
# It is kept unchanged for now to be it's used by dashboards.
if 'fieldtrial_groups' not in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_groups'] = []
histograms[new_histogram_name]['fieldtrial_groups'].append(
suffix_name)
if 'fieldtrial_names' not in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_names'] = []
histograms[new_histogram_name]['fieldtrial_names'].append(name)
if 'fieldtrial_labels' not in histograms[new_histogram_name]:
histograms[new_histogram_name]['fieldtrial_labels'] = []
histograms[new_histogram_name]['fieldtrial_labels'].append(
suffix_label)
# If no owners are added for this histogram-suffixes, it inherits the
# owners of its parents.
if owners:
histograms[new_histogram_name]['owners'] = owners
# If a suffix has an obsolete node, it's marked as obsolete for the
# specified reason, overwriting its group's obsoletion reason if the
# group itself was obsolete as well.
obsolete_reason = _GetObsoleteReason(suffix)
if not obsolete_reason:
obsolete_reason = group_obsolete_reason
# If the suffix has an obsolete tag, all histograms it generates
# inherit it.
if obsolete_reason:
histograms[new_histogram_name]['obsolete'] = obsolete_reason
except Error:
have_errors = True
return have_errors
def ExtractHistogramsFromFile(file_handle):
"""Compute the histogram names and descriptions from the XML representation.
Args:
file_handle: A file or file-like with XML content.
Returns:
a tuple of (histograms, status) where histograms is a dictionary mapping
histogram names to dictionaries containing histogram descriptions and status
is a boolean indicating if errros were encoutered in processing.
"""
tree = xml.dom.minidom.parse(file_handle)
_NormalizeAllAttributeValues(tree)
enums, enum_errors = _ExtractEnumsFromXmlTree(tree)
histograms, histogram_errors = _ExtractHistogramsFromXmlTree(tree, enums)
update_errors = _UpdateHistogramsWithSuffixes(tree, histograms)
return histograms, enum_errors or histogram_errors or update_errors
def ExtractHistograms(filename):
"""Load histogram definitions from a disk file.
Args:
filename: a file path to load data from.
Returns:
a dictionary of histogram descriptions.
Raises:
Error: if the file is not well-formatted.
"""
with open(filename, 'r') as f:
histograms, had_errors = ExtractHistogramsFromFile(f)
if had_errors:
logging.error('Error parsing %s', filename)
raise Error()
return histograms
def ExtractNames(histograms):
return sorted(histograms.keys())
|
bsd-3-clause
|
arborh/tensorflow
|
tensorflow/python/eager/remote_benchmarks_test.py
|
4
|
6795
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for remote worker eager execution.
To run CPU benchmarks:
bazel run -c opt remote_benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" remote_benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import server_lib
def run_benchmark(func, num_iters, execution_mode=None):
ctx = context.context()
with context.execution_mode(execution_mode):
# call func to maybe warm up the GPU
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
end = time.time()
return end - start
class Foo(object):
def __init__(self, num_vars):
self._num_vars = num_vars
self._v = []
def __call__(self, inputs):
if not self._v:
for _ in range(self._num_vars):
self._v.append(variables.Variable(
random_ops.random_uniform([]), shape=[]))
for v in self._v:
inputs = inputs * v
return inputs
class RemoteWorkerMicroBenchmarks(test.Benchmark):
def __init__(self):
# used for remote benchmarks
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server_target1 = self._cached_server1.target[len("grpc://"):]
self._cached_server2 = server_lib.Server.create_local_server()
self._cached_server_target2 = self._cached_server2.target[len("grpc://"):]
def _run(self, func, num_iters=1000, execution_mode=None):
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / total_time})
def benchmark_send_mirroring_off(self):
remote.connect_to_remote_host(self._cached_server_target1)
x = random_ops.random_uniform((2, 2)).cpu()
@def_function.function
def remote_func(m):
return math_ops.matmul(m, m)
def func(m):
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
return remote_func(m)
context.context().mirroring_policy = context.MIRRORING_NONE
self._run(lambda: func(x))
# NOTE(b/136184459): Force garbage collecting hanging resources before
# subsequent calls to set_server_def, to ensure the destroy resource ops are
# executed when their corresponding device and manager are still available.
gc.collect()
def benchmark_send_mirroring_on(self):
remote.connect_to_remote_host(self._cached_server_target1)
x = random_ops.random_uniform((2, 2)).cpu()
@def_function.function
def remote_func(m):
return math_ops.matmul(m, m)
def func(m):
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
return remote_func(m)
context.context().mirroring_policy = context.MIRRORING_ALL
self._run(lambda: func(x))
# NOTE(b/136184459): Force garbage collecting hanging resources before
# subsequent calls to set_server_def, to ensure the destroy resource ops are
# executed when their corresponding device and manager are still available.
gc.collect()
def benchmark_worker_mirroring_off(self):
remote.connect_to_remote_host(
[self._cached_server_target1, self._cached_server_target2])
with ops.device("job:worker/replica:0/task:1/device:CPU:0"):
v = variables.Variable(1.0)
@def_function.function
def remote_func():
return 1.0 + v
def func():
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
return remote_func()
context.context().mirroring_policy = context.MIRRORING_NONE
self._run(func)
# NOTE(b/136184459): Force garbage collecting hanging resources before
# subsequent calls to set_server_def, to ensure the destroy resource ops are
# executed when their corresponding device and manager are still available.
gc.collect()
def benchmark_worker_mirroring_on(self):
remote.connect_to_remote_host(
[self._cached_server_target1, self._cached_server_target2])
with ops.device("job:worker/replica:0/task:1/device:CPU:0"):
v = variables.Variable(1.0)
@def_function.function
def remote_func():
return 1.0 + v
def func():
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
return remote_func()
context.context().mirroring_policy = context.MIRRORING_ALL
self._run(func)
# NOTE(b/136184459): Force garbage collecting hanging resources before
# subsequent calls to set_server_def, to ensure the destroy resource ops are
# executed when their corresponding device and manager are still available.
gc.collect()
def benchmark_create_vars_inside_function(self):
remote.connect_to_remote_host(self._cached_server_target1)
def func():
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
layer = Foo(50)
@def_function.function
def remote_func():
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
return layer(random_ops.random_uniform([]))
return remote_func()
self._run(func, execution_mode=context.ASYNC, num_iters=100)
# NOTE(b/136184459): Force garbage collecting hanging resources before
# subsequent calls to set_server_def, to ensure the destroy resource ops are
# executed when their corresponding device and manager are still available.
gc.collect()
if __name__ == "__main__":
test.main()
|
apache-2.0
|
FusionSP/android_external_chromium_org
|
tools/python/google/platform_utils_mac.py
|
183
|
5676
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Platform-specific utility methods shared by several scripts."""
import os
import subprocess
import google.path_utils
class PlatformUtility(object):
def __init__(self, base_dir):
"""Args:
base_dir: the base dir for running tests.
"""
self._base_dir = base_dir
self._httpd_cmd_string = None # used for starting/stopping httpd
self._bash = "/bin/bash"
def _UnixRoot(self):
"""Returns the path to root."""
return "/"
def GetFilesystemRoot(self):
"""Returns the root directory of the file system."""
return self._UnixRoot()
def GetTempDirectory(self):
"""Returns the file system temp directory
Note that this does not use a random subdirectory, so it's not
intrinsically secure. If you need a secure subdir, use the tempfile
package.
"""
return os.getenv("TMPDIR", "/tmp")
def FilenameToUri(self, path, use_http=False, use_ssl=False, port=8000):
"""Convert a filesystem path to a URI.
Args:
path: For an http URI, the path relative to the httpd server's
DocumentRoot; for a file URI, the full path to the file.
use_http: if True, returns a URI of the form http://127.0.0.1:8000/.
If False, returns a file:/// URI.
use_ssl: if True, returns HTTPS URL (https://127.0.0.1:8000/).
This parameter is ignored if use_http=False.
port: The port number to append when returning an HTTP URI
"""
if use_http:
protocol = 'http'
if use_ssl:
protocol = 'https'
return "%s://127.0.0.1:%d/%s" % (protocol, port, path)
return "file://" + path
def GetStartHttpdCommand(self, output_dir,
httpd_conf_path, mime_types_path,
document_root=None, apache2=False):
"""Prepares the config file and output directory to start an httpd server.
Returns a list of strings containing the server's command line+args.
Args:
output_dir: the path to the server's output directory, for log files.
It will be created if necessary.
httpd_conf_path: full path to the httpd.conf file to be used.
mime_types_path: full path to the mime.types file to be used.
document_root: full path to the DocumentRoot. If None, the DocumentRoot
from the httpd.conf file will be used. Note that the httpd.conf
file alongside this script does not specify any DocumentRoot, so if
you're using that one, be sure to specify a document_root here.
apache2: boolean if true will cause this function to return start
command for Apache 2.x as opposed to Apache 1.3.x. This flag
is ignored on Mac (but preserved here for compatibility in
function signature with win), where httpd2 is used always
"""
exe_name = "httpd"
cert_file = google.path_utils.FindUpward(self._base_dir, 'tools',
'python', 'google',
'httpd_config', 'httpd2.pem')
ssl_enabled = os.path.exists('/etc/apache2/mods-enabled/ssl.conf')
httpd_vars = {
"httpd_executable_path":
os.path.join(self._UnixRoot(), "usr", "sbin", exe_name),
"httpd_conf_path": httpd_conf_path,
"ssl_certificate_file": cert_file,
"document_root" : document_root,
"server_root": os.path.join(self._UnixRoot(), "usr"),
"mime_types_path": mime_types_path,
"output_dir": output_dir,
"ssl_mutex": "file:"+os.path.join(output_dir, "ssl_mutex"),
"user": os.environ.get("USER", "#%d" % os.geteuid()),
"lock_file": os.path.join(output_dir, "accept.lock"),
}
google.path_utils.MaybeMakeDirectory(output_dir)
# We have to wrap the command in bash
# -C: process directive before reading config files
# -c: process directive after reading config files
# Apache wouldn't run CGIs with permissions==700 unless we add
# -c User "<username>"
httpd_cmd_string = (
'%(httpd_executable_path)s'
' -f %(httpd_conf_path)s'
' -c \'TypesConfig "%(mime_types_path)s"\''
' -c \'CustomLog "%(output_dir)s/access_log.txt" common\''
' -c \'ErrorLog "%(output_dir)s/error_log.txt"\''
' -c \'PidFile "%(output_dir)s/httpd.pid"\''
' -C \'User "%(user)s"\''
' -C \'ServerRoot "%(server_root)s"\''
' -c \'LockFile "%(lock_file)s"\''
)
if document_root:
httpd_cmd_string += ' -C \'DocumentRoot "%(document_root)s"\''
if ssl_enabled:
httpd_cmd_string += (
' -c \'SSLCertificateFile "%(ssl_certificate_file)s"\''
' -c \'SSLMutex "%(ssl_mutex)s"\''
)
# Save a copy of httpd_cmd_string to use for stopping httpd
self._httpd_cmd_string = httpd_cmd_string % httpd_vars
httpd_cmd = [self._bash, "-c", self._httpd_cmd_string]
return httpd_cmd
def GetStopHttpdCommand(self):
"""Returns a list of strings that contains the command line+args needed to
stop the http server used in the http tests.
This tries to fetch the pid of httpd (if available) and returns the
command to kill it. If pid is not available, kill all httpd processes
"""
if not self._httpd_cmd_string:
return ["true"] # Haven't been asked for the start cmd yet. Just pass.
# Add a sleep after the shutdown because sometimes it takes some time for
# the port to be available again.
return [self._bash, "-c", self._httpd_cmd_string + ' -k stop && sleep 5']
|
bsd-3-clause
|
plesiv/hac
|
hac/util_data.py
|
1
|
10044
|
# -*- coding: utf-8 -*-
"""Utilities for application plugin-system.
NOTE: for simplicity, language and runner templates are referred to as plug-ins
in this file (along with site plug-ins).
"""
import imp
import os
import sys
import re
import requests
from string import Template
from difflib import SequenceMatcher
if sys.version_info.major == 2:
from urlparse import urlparse
else:
from urllib.parse import urlparse
import hac
from hac import DataType
from hac.data import ISiteRegistry
from hac.util_common import indent_distribute
_plugin_fname_regex = {
DataType.LANG: r"^(?P<temp>[^.]+)\.(?P<prio>[^.]+)\.(?P<ext>[^.]+)$",
DataType.RUNNER: {
'temp': r"^(?P<temp>[^.]+)\.(?P<prio>[^.]+)\.(?P<ext>[^.]+)$",
'part_u': r"^(?P<lang>[^.]+)\.(?P<part>[^.]+)\.{prio}\.{ext}$",
}
}
# -- Languages ----------------------------------------------------------------
def _plugin_discover_langs(dirs):
"""Discovers all available programming language templates in a given list
of directories.
Programming language template file-names are expected to be in the format:
<PLUGIN_TEMP>.<PRIORITY>.<LANGUAGE-EXTENSION>
Where:
* <PLUGIN_TEMP> is application constant setting (from SETTINGS_CONST).
* <PRIORITY> is the integer higher or equal to zero. Lower <PRIORITY>
number indicates higher priority,
* <LANGUAGE-EXTENSION> denotes the programming language of the
template.
For example, highest priority Python programming language template would
have fname:
temp.0.py
Returns dictionary mapping "<LANGUAGE-EXTENSION>.<PRIORITY>" to the
contents of the given programming language template file.
"""
fname_pat = re.compile(_plugin_fname_regex[DataType.LANG])
sep_l = hac.SETTINGS_CONST['plugin_temp_sep'][DataType.LANG]
langs = {}
for cdir in dirs:
if os.path.isdir(cdir):
for fname in os.listdir(cdir):
token = fname_pat.search(fname)
if token:
# Filename matches specified regular expression.
key = token.group("ext") + sep_l + token.group("prio")
if (key not in langs):
with open(os.path.join(cdir, fname), 'r') as f:
contents = f.read()
langs[key] = contents
return langs
# -- Runners ------------------------------------------------------------------
def _plugin_discover_runners(dirs):
"""In a given list of directories discovers all available:
* runner templates and
* runner templating parts.
Generates runners' contents by applying templating parts to the templates.
Each template must have at least one templating part for each corresponding
programming language.
Runner template file-names are expected to be in the format:
<PLUGIN_TEMP>.<PRIORITY>.<RUNNER-EXTENSION>
Where:
* <PLUGIN_TEMP> is application constant setting (from SETTINGS_CONST).
* <PRIORITY> is the integer higher or equal to zero. Lower <PRIORITY>
number indicates higher priority,
* <RUNNER-EXTENSION> denotes runner file-type of the template.
For example, highest priority shell runner template would have filename:
temp.0.sh
Runner template part file-names are expected to be in the format:
<LANGUAGE-EXTENSION>.<PART>.<PRIORITY>.<RUNNER-EXTENSION>
Where:
* <LANGUAGE-EXTENSION> denotes which programming language is this
runner template part for,
* <PART> is the name of the runner template part,
* <PRIORITY>.<RUNNER-EXTENSION> should correspond to the end of some
existing <PLUGIN_TEMP>.<PRIORITY>.<RUNNER-EXTENSION> template.
Matching to the appropriate template is done according to this
part.
For example, parts "compile" and "execute" for Python programming language
and temp.0.sh template would be:
py.compile.0.sh
py.execute.0.sh
Returns dictionary mapping "<RUNNER-EXTENSION>.<PRIORITY>" to the
corresponding runner dictionaries. Each runner dictionary maps from
"LANGUAGE-EXTENSION" to the contents of the prepared for that programming
language.
"""
ftemp_pat = re.compile(_plugin_fname_regex[DataType.RUNNER]['temp'])
fpart_regex_u =_plugin_fname_regex[DataType.RUNNER]['part_u']
sep_r = hac.SETTINGS_CONST['plugin_temp_sep'][DataType.RUNNER]
pref_r = hac.SETTINGS_CONST['plugin_temp_part_prefix'][DataType.RUNNER]
runners = {}
for cdir_r in dirs:
if os.path.isdir(cdir_r):
for fname_r in os.listdir(cdir_r):
# Is filename in proper runner-template format?
tok_r = ftemp_pat.search(fname_r)
if tok_r:
ext = tok_r.group("ext")
prio = tok_r.group("prio")
key_r = ext + sep_r + prio
fpart_regex = fpart_regex_u.format(ext=ext, prio=prio)
fpart_pat = re.compile(fpart_regex)
fpath_r = os.path.join(cdir_r, fname_r)
with open(fpath_r, 'r') as f:
contents_r = f.read()
# Take first occurrence of runner template.
if key_r not in runners:
# Get available parts (get the first occurence).
parts = {}
for cdir_p in dirs:
if os.path.isdir(cdir_p):
for fname_p in os.listdir(cdir_p):
# Is filename in proper part format?
tok_p = fpart_pat.search(fname_p)
if tok_p:
lang = tok_p.group("lang")
part = tok_p.group("part")
fpath_p = os.path.join(cdir_p, fname_p)
with open(fpath_p, 'r') as f:
contents_p = f.read()
if lang not in parts:
parts[lang] = {}
if part not in parts[lang]:
parts[lang][part] = contents_p
# Do the templating.
langs = {}
for lang in parts:
rtemp, rparts = indent_distribute(contents_r,
parts[lang],
pref_r)
template = Template(rtemp)
rendered = template.safe_substitute(rparts)
if lang not in langs:
langs[lang] = rendered
runners[key_r] = langs
return runners
# -- Sites --------------------------------------------------------------------
def _plugin_discover_sites(dirs):
"""Dynamically discovers all web-site processors in a given list of
directories.
Returns list of site-processor objects. When site processors in different
locations have the same filename, site processors occurring in an location
specified earlier in the input list take precedence.
All discovered classes are instantiated with empty constructor.
"""
registered = set()
for cdir in dirs:
if os.path.isdir(cdir):
for filename in os.listdir(cdir):
froot, fext = os.path.splitext(filename)
if (fext == ".py") and (froot not in registered):
fname, fpath, fdescr = imp.find_module(froot, [cdir])
if fname:
# Register discovered site-processor module in
# ISiteRegistry.sites
mod = imp.load_module(froot, fname, fpath, fdescr)
# Track registered sites
registered.add(froot)
return [site() for site in ISiteRegistry.sites]
def plugin_match_site(sites, conf):
"""From all avaiable sites retrieve the url of the site that matches
location member of the config the best.
Currently, Ratcliff-Obershelp string matching algorithm from difflib is
used for this.
"""
matcher = SequenceMatcher(None)
matcher.set_seq2(conf['location'].lower())
urls_ranked = []
for site in sites:
url = site.url.lower()
matcher.set_seq1(url)
urls_ranked.append((matcher.ratio(), url))
_, best_matching_url = sorted(urls_ranked, reverse=True)[0]
return best_matching_url
# -- Common data utilities ----------------------------------------------------
_plugin_discover_funcs = {
DataType.LANG: _plugin_discover_langs,
DataType.RUNNER: _plugin_discover_runners,
DataType.SITE: _plugin_discover_sites,
}
def plugin_collect(paths, data_type):
"""Collects plug-ins of specified type from the list of directories.
IMPORTANT: Paths that appear earlier in the list take precedence over the
paths that appear later, i.e. if the plug-in appears in multiple locations,
one that appears in the location specified earlier will be collected
(others will be ignored).
"""
plugin_dir = hac.SETTINGS_CONST["plugin_dir"][data_type]
plugin_discover = _plugin_discover_funcs[data_type]
return plugin_discover([os.path.join(path, plugin_dir) for path in paths])
# -- Web-data utilities -------------------------------------------------------
class RequestsCache(object):
def __init__(self):
self._store = {}
def get(self, url):
if url not in self._store:
self._store[url] = requests.get(url)
return self._store[url]
|
gpl-2.0
|
tarthy6/dozer-thesis
|
py/tests/shapepack.py
|
3
|
4138
|
# encoding: utf-8
# 2013 © Václav Šmilauer <[email protected]>
import unittest
from minieigen import *
import woo._customConverters
import woo.core
import woo.dem
import woo.utils
import math
from woo.dem import *
class TestShapePack(unittest.TestCase):
# def setUp(self):
def testLoadSampleTxt(self):
'ShapePack: load sample text file'
data='''##PERIODIC:: 1. 1. 1.
0 Sphere 0 0 0 .1
1 Sphere .1 .1 .1 .1
1 Sphere .1 .1 .2 .1
2 Sphere .2 .2 .2 .2
2 Sphere .3 .3 .3 .3
2 Capsule .4 .4 .4 .3 .05 .05 .05
'''
tmp=woo.master.tmpFilename()
f=open(tmp,'w'); f.write(data); f.close()
sp=ShapePack(loadFrom=tmp)
self.assert_(len(sp.raws)==3)
self.assert_(type(sp.raws[1])==SphereClumpGeom) # automatic conversion for sphere-only clumps
self.assert_(sp.raws[2].rawShapes[2].className=='Capsule')
self.assert_(sp.cellSize[0]==1.)
# print sp.raws
def testSingle(self):
'ShapePack: single particles not clumped when inserted into simulation'
S=woo.core.Scene(fields=[DemField()])
sp=ShapePack(raws=[SphereClumpGeom(centers=[(1,1,1)],radii=[.1]),RawShapeClump(rawShapes=[RawShape(className='Sphere',center=(2,2,2),radius=.2,raw=[])])])
mat=woo.utils.defaultMaterial()
sp.toDem(S,S.dem,mat=mat)
self.assert_(len(S.dem.nodes)==2)
print S.dem.par[0].pos
self.assert_(S.dem.par[0].pos==(1,1,1))
self.assert_(S.dem.par[1].shape.radius==.2)
self.assert_(not S.dem.par[0].shape.nodes[0].dem.clumped)
self.assert_(not S.dem.par[1].shape.nodes[0].dem.clumped)
def testFromSim(self):
'ShapePack: from/to simulation with particles'
S=woo.core.Scene(fields=[DemField()])
# add two clumped spheres first
r1,r2,p0,p1=1,.5,Vector3.Zero,Vector3(0,0,3)
# adds clump node to S.dem.nodes automatically
S.dem.par.addClumped([
woo.utils.sphere((0,0,0),1),
woo.utils.sphere((0,0,3),.5)
])
# add a capsule
c=woo.utils.capsule(center=(5,5,5),shaft=.3,radius=.3)
S.dem.par.add(c,nodes=False)
S.dem.nodesAppend(c.shape.nodes[0])
# from DEM
sp=ShapePack()
sp.fromDem(S,S.dem,mask=0)
self.assert_(len(sp.raws)==2)
self.assert_(type(sp.raws[0])==SphereClumpGeom)
self.assert_(sp.raws[1].rawShapes[0].className=='Capsule')
#print sp.raws
# to DEM
mat=woo.utils.defaultMaterial()
S2=woo.core.Scene(fields=[DemField()])
sp.cellSize=(.2,.2,.2)
sp.toDem(S2,S2.dem,mat=mat)
# test that periodicity is used
self.assert_(S2.periodic==True)
self.assert_(S2.cell.size[0]==.2)
# for p in S2.dem.par: print p, p.shape, p.shape.nodes[0]
# for n in S2.dem.nodes: print n
self.assert_(len(S2.dem.par)==3) # two spheres and capsule
self.assert_(S2.dem.par[0].shape.nodes[0].dem.clumped) # sphere node is in clump
self.assert_(S2.dem.par[0].shape.nodes[0] not in S2.dem.nodes) # sphere node not in dem.nodes
self.assert_(S2.dem.nodes[0].dem.clump) # two-sphere clump node
self.assert_(not S2.dem.nodes[1].dem.clump) # capsule node
# TODO: test that particle positions are as they should be
def assertAlmostEqualRel(self,a,b,relerr,abserr=0):
self.assertAlmostEqual(a,b,delta=max(max(abs(a),abs(b))*relerr,abserr))
def testGridSamping(self):
'ShapePack: grid samping gives good approximations of mass+inertia'
# take a single shape, compare with clump of zero-sized sphere (to force grid samping) and that shape
m=woo.utils.defaultMaterial()
zeroSphere=woo.utils.sphere((0,0,0),.4) # sphere which is entirely inside the thing
for p in [woo.utils.sphere((0,0,0),1,mat=m),woo.utils.ellipsoid((0,0,0),semiAxes=(.8,1,1.2),mat=m),woo.utils.capsule((0,0,0),radius=.8,shaft=.6,mat=m)]:
# print p.shape
sp=woo.dem.ShapePack()
sp.add([p.shape,zeroSphere.shape])
r=sp.raws[0]
r.recompute(div=10)
# this depends on how we define equivalent radius, which is not clear yet; so just skip it
# self.assertAlmostEqualRel(r.equivRad,p.shape.equivRadius,1e-2)
self.assertAlmostEqualRel(r.volume,p.mass/m.density,1e-2)
# sorted since axes may be swapped
ii1,ii2=sorted(r.inertia),sorted(p.inertia/m.density)
for i1,i2 in zip(ii1,ii2): self.assertAlmostEqualRel(i1,i2,1e-2)
for ax in (0,1,2): self.assertAlmostEqualRel(r.pos[ax],p.pos[ax],0,1e-2)
|
gpl-2.0
|
echanna/EdxNotAFork
|
lms/djangoapps/django_comment_client/tests.py
|
69
|
2053
|
import string # pylint: disable=W0402
import random
from django.contrib.auth.models import User
from django.test import TestCase
from student.models import CourseEnrollment
from django_comment_client.permissions import has_permission
from django_comment_common.models import Role
class PermissionsTestCase(TestCase):
def random_str(self, length=15, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(length))
def setUp(self):
self.course_id = "edX/toy/2012_Fall"
self.moderator_role = Role.objects.get_or_create(name="Moderator", course_id=self.course_id)[0]
self.student_role = Role.objects.get_or_create(name="Student", course_id=self.course_id)[0]
self.student = User.objects.create(username=self.random_str(),
password="123456", email="[email protected]")
self.moderator = User.objects.create(username=self.random_str(),
password="123456", email="[email protected]")
self.moderator.is_staff = True
self.moderator.save()
self.student_enrollment = CourseEnrollment.enroll(self.student, self.course_id)
self.moderator_enrollment = CourseEnrollment.enroll(self.moderator, self.course_id)
def tearDown(self):
self.student_enrollment.delete()
self.moderator_enrollment.delete()
# Do we need to have this? We shouldn't be deleting students, ever
# self.student.delete()
# self.moderator.delete()
def testDefaultRoles(self):
self.assertTrue(self.student_role in self.student.roles.all())
self.assertTrue(self.moderator_role in self.moderator.roles.all())
def testPermission(self):
name = self.random_str()
self.moderator_role.add_permission(name)
self.assertTrue(has_permission(self.moderator, name, self.course_id))
self.student_role.add_permission(name)
self.assertTrue(has_permission(self.student, name, self.course_id))
|
agpl-3.0
|
defance/edx-platform
|
lms/djangoapps/class_dashboard/tests/test_dashboard_data.py
|
88
|
13672
|
"""
Tests for class dashboard (Metrics tab in instructor dashboard)
"""
import json
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.tests.factories import StudentModuleFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory, AdminFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from class_dashboard.dashboard_data import (
get_problem_grade_distribution, get_sequential_open_distrib,
get_problem_set_grade_distrib, get_d3_problem_grade_distrib,
get_d3_sequential_open_distrib, get_d3_section_grade_distrib,
get_section_display_name, get_array_section_has_problem,
get_students_opened_subsection, get_students_problem_grades,
)
from class_dashboard.views import has_instructor_access_for_class
USER_COUNT = 11
@attr('shard_1')
class TestGetProblemGradeDistribution(SharedModuleStoreTestCase):
"""
Tests related to class_dashboard/dashboard_data.py
"""
@classmethod
def setUpClass(cls):
super(TestGetProblemGradeDistribution, cls).setUpClass()
cls.course = CourseFactory.create(
display_name=u"test course omega \u03a9",
)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
section = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name=u"test factory section omega \u03a9",
)
cls.sub_section = ItemFactory.create(
parent_location=section.location,
category="sequential",
display_name=u"test subsection omega \u03a9",
)
cls.unit = ItemFactory.create(
parent_location=cls.sub_section.location,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega \u03a9",
)
cls.items = []
for i in xrange(USER_COUNT - 1):
item = ItemFactory.create(
parent_location=cls.unit.location,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'},
display_name=u"test problem omega \u03a9 " + str(i)
)
cls.items.append(item)
cls.item = item
def setUp(self):
super(TestGetProblemGradeDistribution, self).setUp()
self.request_factory = RequestFactory()
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.attempts = 3
self.users = [
UserFactory.create(username="metric" + str(__))
for __ in xrange(USER_COUNT)
]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
for i, item in enumerate(self.items):
for j, user in enumerate(self.users):
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1 if i < j else 0.5,
student=user,
course_id=self.course.id,
module_state_key=item.location,
state=json.dumps({'attempts': self.attempts}),
)
for j, user in enumerate(self.users):
StudentModuleFactory.create(
course_id=self.course.id,
module_type='sequential',
module_state_key=item.location,
)
def test_get_problem_grade_distribution(self):
prob_grade_distrib, total_student_count = get_problem_grade_distribution(self.course.id)
for problem in prob_grade_distrib:
max_grade = prob_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
for val in total_student_count.values():
self.assertEquals(USER_COUNT, val)
def test_get_sequential_open_distibution(self):
sequential_open_distrib = get_sequential_open_distrib(self.course.id)
for problem in sequential_open_distrib:
num_students = sequential_open_distrib[problem]
self.assertEquals(USER_COUNT, num_students)
def test_get_problemset_grade_distrib(self):
prob_grade_distrib, __ = get_problem_grade_distribution(self.course.id)
probset_grade_distrib = get_problem_set_grade_distrib(self.course.id, prob_grade_distrib)
for problem in probset_grade_distrib:
max_grade = probset_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
grade_distrib = probset_grade_distrib[problem]['grade_distrib']
sum_attempts = 0
for item in grade_distrib:
sum_attempts += item[1]
self.assertEquals(USER_COUNT, sum_attempts)
def test_get_d3_problem_grade_distrib(self):
d3_data = get_d3_problem_grade_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_d3_sequential_open_distrib(self):
d3_data = get_d3_sequential_open_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
for problem in stack_data['stackData']:
value = problem['value']
self.assertEquals(0, value)
def test_get_d3_section_grade_distrib(self):
d3_data = get_d3_section_grade_distrib(self.course.id, 0)
for stack_data in d3_data:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_students_problem_grades(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_content = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_content))
self.assertEquals(False, response_max_exceeded)
for item in response_content:
if item['grade'] == 0:
self.assertEquals(0, item['percent'])
else:
self.assertEquals(100, item['percent'])
def test_get_students_problem_grades_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_problem_grades_csv(self):
tooltip = 'P1.2.1 Q1 - 3382 Students (100%: 1/1 questions)'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
# Check header and a row for each student in csv response
self.assertContains(response, '"Name","Username","Grade","Percent"')
self.assertContains(response, '"metric0","0.0","0.0"')
self.assertContains(response, '"metric1","0.0","0.0"')
self.assertContains(response, '"metric2","0.0","0.0"')
self.assertContains(response, '"metric3","0.0","0.0"')
self.assertContains(response, '"metric4","0.0","0.0"')
self.assertContains(response, '"metric5","0.0","0.0"')
self.assertContains(response, '"metric6","0.0","0.0"')
self.assertContains(response, '"metric7","0.0","0.0"')
self.assertContains(response, '"metric8","0.0","0.0"')
self.assertContains(response, '"metric9","0.0","0.0"')
self.assertContains(response, '"metric10","1.0","100.0"')
def test_get_students_opened_subsection(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_results))
self.assertEquals(False, response_max_exceeded)
def test_get_students_opened_subsection_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_opened_subsection_csv(self):
tooltip = '4162 students opened Subsection 5: Relational Algebra Exercises'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
self.assertContains(response, '"Name","Username"')
# Check response contains 1 line for each user +1 for the header
self.assertEquals(USER_COUNT + 1, len(response.content.splitlines()))
def test_post_metrics_data_subsections_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[{"subsection_name": "Pre-Course Survey", "subsection_num": 1, "type": "subsection", "num_students": 18963}]])
course_id = self.course.id
data_type = 'subsection'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Section and 1 line for Subsection
self.assertEquals(3, len(response.content.splitlines()))
def test_post_metrics_data_problems_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[[
{'student_count_percent': 0,
'problem_name': 'Q1',
'grade': 0,
'percent': 0,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 26,
'type': u'problem'},
{'student_count_percent': 99,
'problem_name': 'Q1',
'grade': 1,
'percent': 100,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 4763,
'type': 'problem'},
]]])
course_id = self.course.id
data_type = 'problem'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Sections and 2 lines for problems
self.assertEquals(4, len(response.content.splitlines()))
def test_get_section_display_name(self):
section_display_name = get_section_display_name(self.course.id)
self.assertMultiLineEqual(section_display_name[0], u"test factory section omega \u03a9")
def test_get_array_section_has_problem(self):
b_section_has_problem = get_array_section_has_problem(self.course.id)
self.assertEquals(b_section_has_problem[0], True)
def test_has_instructor_access_for_class(self):
"""
Test for instructor access
"""
ret_val = bool(has_instructor_access_for_class(self.instructor, self.course.id))
self.assertEquals(ret_val, True)
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.