repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
willu47/SALib
|
tests/test_cli_analyze.py
|
1
|
7726
|
import sys
import subprocess
from SALib.test_functions import Ishigami
import numpy as np
import re
salib_cli = "./src/SALib/scripts/salib.py"
ishigami_fp = "./src/SALib/test_functions/params/Ishigami.txt"
if sys.version_info[0] == 2:
subprocess.run = subprocess.call
def test_delta():
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1000"\
.format(cli=salib_cli, fn=ishigami_fp) +\
" --precision 8 --max-order 2 --seed=100"
subprocess.run(cmd.split())
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze delta -p {fn} -X model_input.txt \
-Y model_output.txt -c 0 -r 10 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'Parameterdeltadelta_confS1S1_confx10.2104780.0060910.3113620.012291x20.3540230.0062380.4283650.017972x30.1609860.0047180.0011110.002995'
assert len(result) > 0 and result in expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_dgsm():
# Generate inputs
cmd = "python {cli} sample finite_diff -p {fn} -o model_input.txt -d 0.001\
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze dgsm -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "Parametervivi_stddgsmdgsm_confx17.62237816.1981232.2075541.034173x224.48775717.3385567.0920191.090835x311.18125824.0621273.2382591.477114"
assert len(result) > 0 and result == expected, \
"Unexpected DGSM results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_fast():
# Generate inputs
cmd = "python {cli} sample fast_sampler -p {fn} -o model_input.txt \
--precision=8 -n 1000 -M 4 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze fast -p {fn} \
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstTotalx10.3104030.555603x20.4425530.469546x30.0000000.239155"
assert len(result) > 0 and result == expected, \
"Unexpected FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_ff():
# Generate inputs
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze ff -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterMEx10.000000x20.000000x30.000000dummy_00.000000('x1','x2')0.000000('x1','x3')0.000000('x2','x3')0.000000('x1','dummy_0')0.000000('x2','dummy_0')0.000000('x3','dummy_0')0.000000"
assert len(result) > 0 and result == expected, \
"Unexpected FF results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_morris():
# Generate inputs
cmd = "python {cli} sample morris -p {fn} -o model_input.txt -n 100\
--precision=8 --levels=10 --seed=100 -lo False"\
.format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
# run analysis
analyze_cmd = "python {cli} analyze morris -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 -l 10 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = """ParameterMu_StarMuMu_Star_ConfSigmax13.3753.3750.5903.003x21.4740.1180.0001.477x32.6980.4200.5954.020"""
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_rbd_fast():
# Generate inputs
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze rbd_fast -p {fn} -X model_input.txt\
-Y model_output.txt --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstx10.437313x20.129825x30.000573789"
assert len(result) > 0 and result == expected, \
"Unexpected RBD-FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_sobol():
# Generate inputs
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1000\
--precision 8 --max-order 2 --seed=100".format(cli=salib_cli,
fn=ishigami_fp)
cmd = cmd.split()
result = subprocess.check_output(cmd, universal_newlines=True)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze sobol -p {fn}\
-Y model_output.txt -c 0 --max-order 2\
-r 1000 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'ParameterS1S1_confSTST_confx10.3079750.0630470.5601370.091908x20.4477670.0533230.4387220.040634x3-0.0042550.0596670.2428450.026578Parameter_1Parameter_2S2S2_confx1x20.0122050.086177x1x30.2515260.108147x2x3-0.0099540.065569'
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
if __name__ == '__main__':
test_delta()
test_dgsm()
test_fast()
test_ff()
test_morris()
test_rbd_fast()
test_sobol()
|
mit
|
yjxtogo/horizon
|
horizon/__init__.py
|
85
|
2217
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The Horizon interface.
Contains the core Horizon classes--:class:`~horizon.Dashboard` and
:class:`horizon.Panel`--the dynamic URLconf for Horizon, and common interface
methods like :func:`~horizon.register` and :func:`~horizon.unregister`.
"""
# Because this module is compiled by setup.py before Django may be installed
# in the environment we try importing Django and issue a warning but move on
# should that fail.
Horizon = None
try:
from horizon.base import Dashboard # noqa
from horizon.base import Horizon # noqa
from horizon.base import Panel # noqa
from horizon.base import PanelGroup # noqa
except ImportError:
import warnings
def simple_warn(message, category, filename, lineno, file=None, line=None):
return '%s: %s' % (category.__name__, message)
msg = ("Could not import Horizon dependencies. "
"This is normal during installation.\n")
warnings.formatwarning = simple_warn
warnings.warn(msg, Warning)
if Horizon:
register = Horizon.register
unregister = Horizon.unregister
get_absolute_url = Horizon.get_absolute_url
get_user_home = Horizon.get_user_home
get_dashboard = Horizon.get_dashboard
get_default_dashboard = Horizon.get_default_dashboard
get_dashboards = Horizon.get_dashboards
urls = Horizon._lazy_urls
# silence flake8 about unused imports here:
__all__ = [
"Dashboard",
"Horizon",
"Panel",
"PanelGroup",
"register",
"unregister",
"get_absolute_url",
"get_user_home",
"get_dashboard",
"get_default_dashboard",
"get_dashboards",
"urls",
]
|
apache-2.0
|
dneg/cortex
|
python/IECore/SWAReader.py
|
12
|
4885
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import IECore
## The SWAReader class reads SpeedTree .swa files in the form of
# IECore.PointsPrimitives.
class SWAReader( IECore.Reader ) :
def __init__( self, fileName=None ) :
IECore.Reader.__init__(
self,
"Reads SpeedTree SWA files"
)
if fileName is not None :
self["fileName"].setTypedValue( fileName )
@staticmethod
def canRead( fileName ) :
try :
f = open( fileName, "r" )
treeName = f.readline()
numTrees = int( f.readline() )
assert( numTrees )
firstTreeData = [ float( x ) for x in f.readline().split() ]
assert( len( firstTreeData ) == 10 )
return True
except :
return False
def doOperation( self, args ) :
f = open( args["fileName"].value, "r" )
p = IECore.V3fVectorData()
xAxis = IECore.V3fVectorData()
yAxis = IECore.V3fVectorData()
zAxis = IECore.V3fVectorData()
scale = IECore.FloatVectorData()
treeNameIndices = IECore.IntVectorData()
treeName = IECore.StringVectorData()
currentTreeName = ""
currentTreeIndex = 0
expectedTreeCount = None
currentTreeCount = 0
for line in f.readlines() :
line = line.strip()
if not line :
continue
if not currentTreeName :
currentTreeName = line.strip( "\"\'" )
treeName.append( currentTreeName )
currentTreeCount = 0
expectedTreeCount = None
elif expectedTreeCount is None :
expectedTreeCount = int( line )
else :
treeData = [ float( x ) for x in line.split() ]
assert( len( treeData ) == 10 )
p.append( IECore.V3f( treeData[0], treeData[2], -treeData[1] ) )
ya = IECore.V3f( treeData[3], treeData[5], -treeData[4] )
xa = IECore.V3f( treeData[6], treeData[8], -treeData[7] )
za = xa.cross( ya )
xAxis.append( xa )
yAxis.append( ya )
zAxis.append( za )
scale.append( treeData[9] )
treeNameIndices.append( currentTreeIndex )
currentTreeCount += 1
if currentTreeCount == expectedTreeCount :
currentTreeName = ""
currentTreeIndex += 1
assert( currentTreeCount == expectedTreeCount )
result = IECore.PointsPrimitive( len( p ) )
result["P"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, p )
result["xAxis"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, xAxis )
result["yAxis"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, yAxis )
result["zAxis"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, zAxis )
result["scale"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, scale )
result["treeNameIndices"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, treeNameIndices )
result["treeName"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, treeName )
result["type"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.StringData( "gl:point" ) )
return result
IECore.registerRunTimeTyped( SWAReader )
IECore.Reader.registerReader( "swa", SWAReader.canRead, SWAReader, SWAReader.staticTypeId() )
|
bsd-3-clause
|
amith01994/intellij-community
|
python/lib/Lib/site-packages/django/contrib/flatpages/tests/csrf.py
|
94
|
3788
|
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase, Client
class FlatpageCSRFTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.client = Client(enforce_csrf_checks=True)
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
csrf_middleware_class = 'django.middleware.csrf.CsrfViewMiddleware'
if csrf_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (csrf_middleware_class,)
if flatpage_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (flatpage_middleware_class,)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.old_LOGIN_URL = settings.LOGIN_URL
settings.LOGIN_URL = '/accounts/login/'
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
settings.LOGIN_URL = self.old_LOGIN_URL
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEquals(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser',password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middlware"
response = self.client.get('/flatpage/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middlware"
response = self.client.get('/no_such_flatpage/')
self.assertEquals(response.status_code, 404)
def test_post_view_flatpage(self):
"POSTing to a flatpage served through a view will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage_root/flatpage/')
self.assertEquals(response.status_code, 403)
def test_post_fallback_flatpage(self):
"POSTing to a flatpage served by the middleware will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage/')
self.assertEquals(response.status_code, 403)
def test_post_unknown_page(self):
"POSTing to an unknown page isn't caught as a 403 CSRF error"
response = self.client.post('/no_such_page/')
self.assertEquals(response.status_code, 404)
|
apache-2.0
|
rghe/ansible
|
test/units/modules/network/f5/test_bigip_wait.py
|
10
|
3828
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_wait import Parameters
from library.modules.bigip_wait import ModuleManager
from library.modules.bigip_wait import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_wait import Parameters
from ansible.modules.network.f5.bigip_wait import ModuleManager
from ansible.modules.network.f5.bigip_wait import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
delay=3,
timeout=500,
sleep=10,
msg='We timed out during waiting for BIG-IP :-('
)
p = Parameters(params=args)
assert p.delay == 3
assert p.timeout == 500
assert p.sleep == 10
assert p.msg == 'We timed out during waiting for BIG-IP :-('
def test_module_string_parameters(self):
args = dict(
delay='3',
timeout='500',
sleep='10',
msg='We timed out during waiting for BIG-IP :-('
)
p = Parameters(params=args)
assert p.delay == 3
assert p.timeout == 500
assert p.sleep == 10
assert p.msg == 'We timed out during waiting for BIG-IP :-('
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_wait_already_available(self, *args):
set_module_args(dict(
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm._connect_to_device = Mock(return_value=True)
mm._device_is_rebooting = Mock(return_value=False)
mm._is_mprov_running_on_device = Mock(return_value=False)
mm._get_client_connection = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is False
assert results['elapsed'] == 0
|
gpl-3.0
|
chafique-delli/OpenUpgrade
|
addons/website_twitter/controllers/main.py
|
76
|
1637
|
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.translate import _
import json
class Twitter(http.Controller):
@http.route(['/twitter_reload'], type='json', auth="user", website=True)
def twitter_reload(self):
return request.website.fetch_favorite_tweets()
@http.route(['/get_favorites'], type='json', auth="public", website=True)
def get_tweets(self, limit=20):
key = request.website.twitter_api_key
secret = request.website.twitter_api_secret
screen_name = request.website.twitter_screen_name
if not key or not secret:
return {"error": _("Please set the Twitter API Key and Secret in the Website Settings.")}
if not screen_name:
return {"error": _("Please set a Twitter screen name to load favorites from, "
"in the Website Settings (it does not have to be yours)")}
twitter_tweets = request.registry['website.twitter.tweet']
tweets = twitter_tweets.search_read(
request.cr, request.uid,
[('website_id','=', request.website.id),
('screen_name','=', screen_name)],
['tweet'], limit=int(limit), order="tweet_id desc", context=request.context)
if len(tweets) < 12:
return {"error": _("Twitter user @%(username)s has less than 12 favorite tweets. "
"Please add more or choose a different screen name.") % \
{'username': screen_name}}
return [json.loads(tweet['tweet']) for tweet in tweets]
|
agpl-3.0
|
JKRP/geonode
|
geonode/groups/search_indexes.py
|
27
|
1292
|
import json
from django.conf import settings
from haystack import indexes
from geonode.groups.models import GroupProfile
class GroupIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
title = indexes.CharField(boost=2)
# https://github.com/toastdriven/django-haystack/issues/569 - Necessary for sorting
title_sortable = indexes.CharField(indexed=False)
description = indexes.CharField(model_attr='description', boost=1.5)
id = indexes.IntegerField(model_attr='id')
type = indexes.CharField(faceted=True)
json = indexes.CharField(indexed=False)
def get_model(self):
return GroupProfile
def prepare_title(self, obj):
return str(obj)
def prepare_title_sortable(self, obj):
return str(obj).lower()
def prepare_type(self, obj):
return "group"
def prepare_json(self, obj):
data = {
"_type": self.prepare_type(obj),
"title": obj.title,
"description": obj.description,
"keywords": [keyword.name for keyword in obj.keywords.all()] if obj.keywords else [],
"thumb": settings.STATIC_URL + "static/img/contact.png",
"detail": None,
}
return json.dumps(data)
|
gpl-3.0
|
mclois/iteexe
|
twisted/test/test_pbfailure.py
|
16
|
6031
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.spread import pb, flavors, jelly
from twisted.internet import reactor, defer
from twisted.python import log, failure
##
# test exceptions
##
class PoopError(Exception): pass
class FailError(Exception): pass
class DieError(Exception): pass
class TimeoutError(Exception): pass
#class JellyError(flavors.Jellyable, pb.Error): pass
class JellyError(flavors.Jellyable, pb.Error, pb.RemoteCopy): pass
class SecurityError(pb.Error, pb.RemoteCopy): pass
pb.setUnjellyableForClass(JellyError, JellyError)
pb.setUnjellyableForClass(SecurityError, SecurityError)
pb.globalSecurity.allowInstancesOf(SecurityError)
####
# server-side
####
class SimpleRoot(pb.Root):
def remote_poop(self):
return defer.fail(failure.Failure(PoopError("Someone threw poopie at me!")))
def remote_fail(self):
raise FailError("I'm a complete failure! :(")
def remote_die(self):
raise DieError("*gack*")
def remote_jelly(self):
self.raiseJelly()
def remote_security(self):
self.raiseSecurity()
def remote_deferredJelly(self):
d = defer.Deferred()
d.addCallback(self.raiseJelly)
d.callback(None)
return d
def remote_deferredSecurity(self):
d = defer.Deferred()
d.addCallback(self.raiseSecurity)
d.callback(None)
return d
def raiseJelly(self, results=None):
raise JellyError("I'm jellyable!")
def raiseSecurity(self, results=None):
raise SecurityError("I'm secure!")
class PBConnTestCase(unittest.TestCase):
unsafeTracebacks = 0
def setUp(self):
self._setUpServer()
self._setUpClient()
def _setUpServer(self):
self.serverFactory = pb.PBServerFactory(SimpleRoot())
self.serverFactory.unsafeTracebacks = self.unsafeTracebacks
self.serverPort = reactor.listenTCP(0, self.serverFactory, interface="127.0.0.1")
def _setUpClient(self):
portNo = self.serverPort.getHost().port
self.clientFactory = pb.PBClientFactory()
self.clientConnector = reactor.connectTCP("127.0.0.1", portNo, self.clientFactory)
def tearDown(self):
return defer.gatherResults([
self._tearDownServer(),
self._tearDownClient()])
def _tearDownServer(self):
return defer.maybeDeferred(self.serverPort.stopListening)
def _tearDownClient(self):
self.clientConnector.disconnect()
return defer.succeed(None)
class PBFailureTest(PBConnTestCase):
compare = unittest.TestCase.assertEquals
def testPBFailures(self):
d = self.clientFactory.getRootObject()
d.addCallback(self.connected)
d.addCallback(self.cleanupLoggedErrors)
return d
def testCopiedFailureLogging(self):
d = self.clientFactory.getRootObject()
def connected(rootObj):
return rootObj.callRemote('die')
d.addCallback(connected)
def exception(failure):
log.err(failure)
errs = log.flushErrors(DieError)
self.assertEquals(len(errs), 2)
d.addErrback(exception)
return d
def addFailingCallbacks(self, remoteCall, expectedResult, eb):
remoteCall.addCallbacks(self.success, eb,
callbackArgs=(expectedResult,))
return remoteCall
##
# callbacks
##
def cleanupLoggedErrors(self, ignored):
errors = log.flushErrors(PoopError, FailError, DieError,
AttributeError, JellyError, SecurityError)
self.assertEquals(len(errors), 6)
return ignored
def connected(self, persp):
methods = (('poop', 42, self.failurePoop),
('fail', 420, self.failureFail),
('die', 4200, self.failureDie),
('nosuch', 42000, self.failureNoSuch),
('jelly', 43, self.failureJelly),
('security', 430, self.failureSecurity),
('deferredJelly', 4300, self.failureDeferredJelly),
('deferredSecurity', 43000, self.failureDeferredSecurity))
return defer.gatherResults([
self.addFailingCallbacks(persp.callRemote(meth), result, eb)
for (meth, result, eb) in methods])
def success(self, result, expectedResult):
self.assertEquals(result, expectedResult)
return result
def failurePoop(self, fail):
fail.trap(PoopError)
self.compare(fail.traceback, "Traceback unavailable\n")
return 42
def failureFail(self, fail):
fail.trap(FailError)
self.compare(fail.traceback, "Traceback unavailable\n")
return 420
def failureDie(self, fail):
fail.trap(DieError)
self.compare(fail.traceback, "Traceback unavailable\n")
return 4200
def failureNoSuch(self, fail):
fail.trap(pb.NoSuchMethod)
self.compare(fail.traceback, "Traceback unavailable\n")
return 42000
def failureJelly(self, fail):
fail.trap(JellyError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 43
def failureSecurity(self, fail):
fail.trap(SecurityError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 430
def failureDeferredJelly(self, fail):
fail.trap(JellyError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 4300
def failureDeferredSecurity(self, fail):
fail.trap(SecurityError)
self.failIf(isinstance(fail.type, str))
self.failUnless(isinstance(fail.value, fail.type))
return 43000
class PBFailureTestUnsafe(PBFailureTest):
compare = unittest.TestCase.failIfEquals
unsafeTracebacks = 1
|
gpl-2.0
|
Zyell/home-assistant
|
tests/components/binary_sensor/test_template.py
|
3
|
4085
|
"""The tests for the Template Binary sensor platform."""
import unittest
from unittest import mock
from homeassistant.const import EVENT_STATE_CHANGED
from homeassistant.components.binary_sensor import template
from homeassistant.exceptions import TemplateError
from tests.common import get_test_home_assistant
class TestBinarySensorTemplate(unittest.TestCase):
"""Test for Binary sensor template platform."""
@mock.patch.object(template, 'BinarySensorTemplate')
def test_setup(self, mock_template):
""""Test the setup."""
config = {
'sensors': {
'test': {
'friendly_name': 'virtual thingy',
'value_template': '{{ foo }}',
'sensor_class': 'motion',
},
}
}
hass = mock.MagicMock()
add_devices = mock.MagicMock()
result = template.setup_platform(hass, config, add_devices)
self.assertTrue(result)
mock_template.assert_called_once_with(hass, 'test', 'virtual thingy',
'motion', '{{ foo }}')
add_devices.assert_called_once_with([mock_template.return_value])
def test_setup_no_sensors(self):
""""Test setup with no sensors."""
config = {}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_setup_invalid_device(self):
""""Test the setup with invalid devices."""
config = {
'sensors': {
'foo bar': {},
},
}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_setup_invalid_sensor_class(self):
""""Test setup with invalid sensor class."""
config = {
'sensors': {
'test': {
'value_template': '{{ foo }}',
'sensor_class': 'foobarnotreal',
},
},
}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_setup_invalid_missing_template(self):
""""Test setup with invalid and missing template."""
config = {
'sensors': {
'test': {
'sensor_class': 'motion',
},
},
}
result = template.setup_platform(None, config, None)
self.assertFalse(result)
def test_attributes(self):
""""Test the attributes."""
hass = mock.MagicMock()
vs = template.BinarySensorTemplate(hass, 'parent', 'Parent',
'motion', '{{ 1 > 1 }}')
self.assertFalse(vs.should_poll)
self.assertEqual('motion', vs.sensor_class)
self.assertEqual('Parent', vs.name)
vs.update()
self.assertFalse(vs.is_on)
vs._template = "{{ 2 > 1 }}"
vs.update()
self.assertTrue(vs.is_on)
def test_event(self):
""""Test the event."""
hass = get_test_home_assistant()
vs = template.BinarySensorTemplate(hass, 'parent', 'Parent',
'motion', '{{ 1 > 1 }}')
vs.update_ha_state()
hass.pool.block_till_done()
with mock.patch.object(vs, 'update') as mock_update:
hass.bus.fire(EVENT_STATE_CHANGED)
hass.pool.block_till_done()
try:
assert mock_update.call_count == 1
finally:
hass.stop()
@mock.patch('homeassistant.helpers.template.render')
def test_update_template_error(self, mock_render):
""""Test the template update error."""
hass = mock.MagicMock()
vs = template.BinarySensorTemplate(hass, 'parent', 'Parent',
'motion', '{{ 1 > 1 }}')
mock_render.side_effect = TemplateError('foo')
vs.update()
mock_render.side_effect = TemplateError(
"UndefinedError: 'None' has no attribute")
vs.update()
|
mit
|
kingvuplus/xrd-alliance
|
lib/python/Screens/Downloads.py
|
1
|
23946
|
from Plugins.Plugin import PluginDescriptor
from Components.Sources.List import List
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from enigma import eTimer, quitMainloop, RT_HALIGN_LEFT, RT_VALIGN_CENTER, eListboxPythonMultiContent, eListbox, gFont, getDesktop, ePicLoad
from Tools.LoadPixmap import LoadPixmap
from enigma import getDesktop
import urllib
from urllib2 import urlopen
from Components.MenuList import MenuList
from Components.Label import Label
from Tools.Directories import fileExists, resolveFilename, SCOPE_SKIN_IMAGE, SCOPE_LANGUAGE, SCOPE_PLUGINS
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Input import Input
from Components.Pixmap import Pixmap
from Components.FileList import FileList
from Screens.ChoiceBox import ChoiceBox
from Plugins.Plugin import PluginDescriptor
from Components.ActionMap import ActionMap
from Screens.InputBox import InputBox
#from Ipkinstall import Ipkinstall
from twisted.web.client import getPage, downloadPage
import os
from Components.Button import Button
from Components.Task import Task, Job, job_manager as JobManager, Condition
from Screens.Console import Console
##################################################
# Coded by [email protected], September 2009 #
##################################################
class RSList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(30)
self.l.setFont(0, gFont("Regular", 20))
##############################################################################
def RSListEntry(download, state):
res = [(download)]
res.append(MultiContentEntryText(pos=(40, 0), size=(620, 25), font=0, text=download))
if state == 0:
res.append(MultiContentEntryPixmapAlphaTest(pos=(5, 6), size=(25,25), png=LoadPixmap(cached=True, desktop=getDesktop(0), path=resolveFilename(SCOPE_SKIN_IMAGE, "skin_default/buttons/button_green.png"))))
else:
res.append(MultiContentEntryPixmapAlphaTest(pos=(5, 6), size=(25,25), png=LoadPixmap(cached=True, desktop=getDesktop(0), path=resolveFilename(SCOPE_SKIN_IMAGE, "skin_default/buttons/button_red.png"))))
print "res =", res
return res
##############################################################################
class Downloads(Screen):
skin = """
<screen position="center,center" size="520,400" title=" " >
<!--widget name="text" position="0,0" size="550,25" font="Regular;20" /-->
<widget name="list" position="10,40" size="500,350" scrollbarMode="showOnDemand" />
<!--widget name="pixmap" position="200,0" size="190,250" /-->
<!--eLabel position="70,100" zPosition="-1" size="100,69" backgroundColor="#222222" /-->
<widget name="info" position="80,80" zPosition="4" size="350,300" font="Regular;18" foregroundColor="#ffffff" transparent="1" halign="left" valign="center" />
</screen>"""
def __init__(self, session):
self.skin = Downloads.skin
Screen.__init__(self, session)
self["list"] = MenuList([])
self["info"] = Label()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "ColorActions", "DirectionActions"],
{
"ok": self.okClicked,
"back": self.close,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"red": self.close,
"green": self.okClicked,
}, -1)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Select"))
title = "Download Categories"
self["title"] = Button(title)
self.icount = 0
self.errcount = 0
self.onLayoutFinish.append(self.openTest)
def openTest(self):
xurl = "http://www.et-view-support.com/addons/XTA-team/" + "list.txt"
print "xurl =", xurl
getPage(xurl).addCallback(self.gotPage).addErrback(self.getfeedError)
def gotPage(self, html):
# try:
print "html = ", html
self.data = []
icount = 0
self.data = html.splitlines()
print "self.data =", self.data
self["info"].setText("")
self["list"].setList(self.data)
# except Exception, error:
# print "[TDw]: Could not download HTTP Page\n" + str(error)
def getfeedError(self, error=""):
error = str(error)
print "Download error =", error
def okClicked(self):
if self.errcount == 1:
self.close()
else:
sel = self["list"].getSelectionIndex()
addon = self.data[sel]
self.session.open(Getipklist, addon)
def keyLeft(self):
self["text"].left()
def keyRight(self):
self["text"].right()
def keyNumberGlobal(self, number):
print "pressed", number
self["text"].number(number)
class Getipklist(Screen):
skin = """
<screen position="center,center" size="800,500" title=" " >
<widget name="text" position="100,20" size="200,30" font="Regular;20" halign="left" />
<ePixmap position="300,25" zPosition="2" size="140,40" pixmap="skin_default/buttons/button_red.png" transparent="1" alphatest="on" />
<widget name="list" position="50,80" size="730,300" scrollbarMode="showOnDemand" />
<ePixmap name="red" position="0,460" zPosition="2" size="140,40" pixmap="skin_default/buttons/button_red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="140,460" zPosition="2" size="140,40" pixmap="skin_default/buttons/button_green.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,450" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="#ffffff" font="Regular;20" transparent="1" shadowColor="#25062748" shadowOffset="-2,-2" />
<widget name="key_green" position="140,450" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="#ffffff" font="Regular;20" transparent="1" shadowColor="#25062748" shadowOffset="-2,-2" />
<eLabel position="70,100" zPosition="-1" size="100,69" backgroundColor="#222222" />
<widget name="info" position="100,230" zPosition="4" size="300,25" font="Regular;18" foregroundColor="#ffffff" transparent="1" halign="center" valign="center" />
</screen>"""
def __init__(self, session, addon):
# def __init__(self, session):
self.addon = addon
self.skin = Getipklist.skin
Screen.__init__(self, session)
self.list = []
self["text"] = Label()
self["text"].setText(_("Already installed"))
self["list"] = List(self.list)
self["list"] = RSList([])
self["info"] = Label()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "ColorActions", "DirectionActions"],
{
"ok": self.okClicked,
"back": self.close,
"red": self.close,
"green": self.okClicked,
}, -1)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Select"))
# title = addon + " List"
title = "Skins List"
self["title"] = Button(title)
self.icount = 0
self.names = []
self.onLayoutFinish.append(self.openTest)
def openTest(self):
self["info"].setText("Downloading list...")
testno = 1
xurl = "http://www.et-view-support.com/addons/XTA-team/" + self.addon + "/list.txt"
print "xurl =", xurl
getPage(xurl).addCallback(self.gotPage).addErrback(self.getfeedError)
def gotPage(self, html):
# try:
print "html = ", html
self.data = []
icount = 0
self.data = html.splitlines()
# print "self.data =", self.data
list = []
for line in self.data:
ipkname = self.data[icount]
print "gotPage icount, ipk name =", icount, ipkname
ipos = ipkname.find("_")
remname = ipkname[:ipos]
state = self.getstate(ipkname)
print "gotPage state, remname = ", state, remname
# state = 0 not installed 1 installed
list.append(RSListEntry(remname, state))
icount = icount+1
self["list"].setList(list)
print 'self["list"] A =', self["list"]
self["info"].setText("")
def getfeedError(self, error=""):
error = str(error)
print "Download error =", error
def getstate(self, ipkname):
item = "/etc/ipkinst/" + ipkname
if os.path.exists(item):
state = 1
return state
else:
state = 0
return state
def okClicked(self):
print "Here in okClicked A"
sel = self["list"].getSelectionIndex()
ipk = self.data[sel]
# addon = self.addon
ipkinst = Getipk(self.session, ipk, self.addon)
# ipkinst = Getipk(self.session, ipk)
ipkinst.openTest()
def keyLeft(self):
self["text"].left()
def keyRight(self):
self["text"].right()
def keyNumberGlobal(self, number):
print "pressed", number
self["text"].number(number)
class Getipk(Screen):
skin = """
<screen position="center,center" size="800,500" title="Play Options" >
<!--widget name="text" position="0,0" size="550,25" font="Regular;20" /-->
<widget name="list" position="10,20" size="750,350" scrollbarMode="showOnDemand" />
<!--widget name="pixmap" position="200,0" size="190,250" /-->
<eLabel position="70,100" zPosition="-1" size="100,69" backgroundColor="#222222" />
<widget name="info" position="50,50" zPosition="4" size="500,400" font="Regular;22" foregroundColor="#ffffff" transparent="1" halign="left" valign="top" />
<ePixmap name="red" position="0,450" zPosition="2" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="140,450" zPosition="2" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="yellow" position="280,450" zPosition="2" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<!--ePixmap name="blue" position="420,450" zPosition="2" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" /-->
<widget name="key_red" position="0,450" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="#ffffff" font="Regular;20" transparent="1" shadowColor="#25062748" shadowOffset="-2,-2" />
<widget name="key_green" position="140,450" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="#ffffff" font="Regular;20" transparent="1" shadowColor="#25062748" shadowOffset="-2,-2" />
<!--widget name="key_yellow" position="280,450" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="#ffffff" font="Regular;20" transparent="1" shadowColor="#25062748" shadowOffset="-2,-2" />
<widget name="key_blue" position="420,450" size="140,50" valign="center" halign="center" zPosition="4" foregroundColor="#ffffff" font="Regular;20" transparent="1" shadowColor="#25062748" shadowOffset="-2,-2" /-->
</screen>"""
def __init__(self, session, ipk, addon):
# def __init__(self, session, ipk):
self.addon = addon
Screen.__init__(self, session)
self.skin = Getipk.skin
title = "Addon Install"
self.setTitle(title)
self["list"] = MenuList([])
self["info"] = Label()
self["key_red"] = Button(_("Exit"))
self["key_green"] = Button(_("Install"))
# self["key_yellow"] = Button(_("Install"))
# self["key_blue"] = Button(_("Stop Download"))
self["setupActions"] = ActionMap(["SetupActions", "ColorActions", "TimerEditActions"],
{
"red": self.close,
"green": self.okClicked,
"yellow": self.install,
# "blue": self.stopdl,
"cancel": self.cancel,
"ok": self.close,
}, -2)
print "Getipk : ipk =", ipk
self.icount = 0
self.ipk = ipk
# self.addon = addon
self.onLayoutFinish.append(self.openTest)
txt = "You have selected\n\n" + ipk + "\n\n\nPlease press Download"
self["info"].setText(txt)
self.srefOld = self.session.nav.getCurrentlyPlayingServiceReference()
self.onLayoutFinish.append(self.openTest)
def openTest(self):
if not os.path.exists("/etc/ipkinst"):
cmd = "mkdir -p /etc/ipkinst"
os.system(cmd)
xurl1 = "http://www.et-view-support.com/addons/XTA-team/" + self.addon + "/"
print "xurl1 =", xurl1
xurl2 = xurl1 + self.ipk
print "xurl2 =", xurl2
xdest = "/tmp/" + self.ipk
print "xdest =", xdest
self.cmd1 = 'wget -O "' + xdest + '" "' + xurl2 + '"'
self.cmd2 = "opkg install --force-overwrite /tmp/" + self.ipk
self.cmd3 = "touch /etc/ipkinst/" + self.ipk + " &"
self.okClicked()
def okClicked(self):
plug = self.ipk
title = _("Installing addon %s" %(plug))
cmd = self.cmd1 + " && " + self.cmd2 + " && " + self.cmd3
self.session.open(Console,_(title),[cmd])
# self.endinstall()
def LastJobView(self):
currentjob = None
for job in JobManager.getPendingJobs():
currentjob = job
if currentjob is not None:
self.session.open(JobView, currentjob)
def install(self):
cmd = "opkg install --force-overwrite /tmp/" + self.ipk + ">/tmp/ipk.log"
print "cmd =", cmd
title = _("Installing addon %s" %(plug))
self.session.open(Console,_(title),[cmd])
# self.viewLog()
self.endinstall()
def viewLog(self):
self["info"].setText("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n Press Exit to continue...")
# self["info"].setText(" ")
if os.path.isfile("/tmp/ipk.log")is not True :
cmd = "touch /tmp/ipk.log"
os.system(cmd)
else:
myfile = file(r"/tmp/ipk.log")
icount = 0
data = []
for line in myfile.readlines():
data.append(icount)
print line
num = len(line)
data[icount] = (line[:-1])
print data[icount]
icount = icount + 1
self["list"].setList(data)
self.endinstall()
def endinstall(self):
path="/tmp"
tmplist = []
ipkname = 0
tmplist=os.listdir(path)
print "files in /tmp", tmplist
icount = 0
for name in tmplist:
nipk = tmplist[icount]
if (nipk[-3:]=="ipk"):
ipkname = nipk
icount = icount+1
if ipkname != 0:
print "endinstall ipk name =", ipkname
ipos = ipkname.find("_")
remname = ipkname[:ipos]
print "endinstall remname =", remname
f=open('/etc/ipklist_installed', 'a')
f1= remname + "\n"
f.write(f1)
cmd = "rm /tmp/*.ipk"
os.system(cmd)
def cancel(self):
self.close()
def keyLeft(self):
self["text"].left()
def keyRight(self):
self["text"].right()
def keyNumberGlobal(self, number):
print "pressed", number
self["text"].number(number)
class Getipk2(Screen):
skin = """
<screen position="center,center" size="600,470" title="Install status" >
<!--widget name="text" position="0,0" size="550,25" font="Regular;20" /-->
<widget name="list" position="10,0" size="630,400" scrollbarMode="showOnDemand" />
<!--widget name="pixmap" position="200,0" size="190,250" /-->
<eLabel position="70,100" zPosition="-1" size="100,69" backgroundColor="#222222" />
<widget name="info" position="100,420" zPosition="4" size="300,35" font="Regular;22" foregroundColor="#ffffff" transparent="1" halign="center" valign="center" />
</screen>"""
def __init__(self, session, ipk, addon):
self.skin = Getipk.skin
Screen.__init__(self, session)
self["list"] = MenuList([])
self["info"] = Label()
self["actions"] = ActionMap(["OkCancelActions"], {"ok": self.okClicked, "cancel": self.close}, -1)
self.icount = 0
self.ipk = ipk
self.addon = addon
self.onLayoutFinish.append(self.openTest)
def openTest(self):
self["info"].setText("Downloading and installing...")
device = open("/proc/stb/info/model", "r").readline().strip()
if device == "dm800":
xurl1 = "http://www.turk-dreamworld.com/bayraklar/Receiverler/Dreambox/TDW/e2/addons/oe2-dm800hd/" + self.addon + "/"
else:
xurl1 = "http://www.turk-dreamworld.com/bayraklar/Receiverler/Dreambox/TDW/e2/addons/oe2/" + self.addon + "/"
print "xurl1 =", xurl1
xurl2 = xurl1 + self.ipk
print "xurl2 =", xurl2
xdest = "/tmp/" + self.ipk
print "xdest =", xdest
downloadPage(xurl2, xdest).addCallback(self.gotPage)
def gotPage(self,txt=""):
print "in gotPage"
self["info"].setText("")
cmd = "opkg install --force-overwrite /tmp/" + self.ipk + ">/tmp/ipk.log"
print "cmd =", cmd
os.system(cmd)
self.viewLog()
def getfeedError(self, error=""):
error = str(error)
print "Download error =", error
def keyLeft(self):
self["text"].left()
def keyRight(self):
self["text"].right()
def okClicked(self):
self.close()
def keyNumberGlobal(self, number):
print "pressed", number
self["text"].number(number)
def viewLog(self):
self["info"].setText("Press OK to continue...")
if os.path.isfile("/tmp/ipk.log")is not True :
cmd = "touch /tmp/ipk.log"
os.system(cmd)
else:
myfile = file(r"/tmp/ipk.log")
icount = 0
data = []
for line in myfile.readlines():
data.append(icount)
print line
num = len(line)
data[icount] = (line[:-1])
print data[icount]
icount = icount + 1
self["list"].setList(data)
self.endinstall()
def endinstall(self):
path="/tmp"
tmplist = []
ipkname = 0
tmplist=os.listdir(path)
print "files in /tmp", tmplist
icount = 0
for name in tmplist:
nipk = tmplist[icount]
if (nipk[-3:]=="ipk"):
ipkname = nipk
icount = icount+1
if ipkname != 0:
print "ipk name =", ipkname
ipos = ipkname.find("_")
remname = ipkname[:ipos]
print "remname =", remname
f=open('/etc/ipklist_installed', 'a')
f1= remname + "\n"
f.write(f1)
cmd = "rm /tmp/*.ipk"
os.system(cmd)
class downloadJob(Job):
def __init__(self, toolbox, cmdline, filename, filetitle):
Job.__init__(self, _("Downloading"))
self.toolbox = toolbox
self.retrycount = 0
downloadTask(self, cmdline, filename, filetitle)
def retry(self):
assert self.status == self.FAILED
self.retrycount += 1
self.restart()
class downloadTask(Task):
ERROR_CORRUPT_FILE, ERROR_RTMP_ReadPacket, ERROR_SEGFAULT, ERROR_SERVER, ERROR_UNKNOWN = range(5)
def __init__(self, job, cmdline, filename, filetitle):
Task.__init__(self, job, filetitle)
# self.postconditions.append(downloadTaskPostcondition())
self.setCmdline(cmdline)
self.filename = filename
self.toolbox = job.toolbox
self.error = None
self.lasterrormsg = None
def processOutput(self, data):
try:
if data.endswith('%)'):
startpos = data.rfind("sec (")+5
if startpos and startpos != -1:
self.progress = int(float(data[startpos:-4]))
elif data.find('%') != -1:
tmpvalue = data[:data.find("%")]
tmpvalue = tmpvalue[tmpvalue.rfind(" "):].strip()
tmpvalue = tmpvalue[tmpvalue.rfind("(")+1:].strip()
self.progress = int(float(tmpvalue))
else:
Task.processOutput(self, data)
except Exception, errormsg:
print "Error processOutput: " + str(errormsg)
Task.processOutput(self, data)
def processOutputLine(self, line):
self.error = self.ERROR_SERVER
def afterRun(self):
pass
class downloadTaskPostcondition(Condition):
RECOVERABLE = True
def check(self, task):
if task.returncode == 0 or task.error is None:
return True
else:
return False
def getErrorMessage(self, task):
return {
task.ERROR_CORRUPT_FILE: _("Video Download Failed!Corrupted Download File:%s" % task.lasterrormsg),
task.ERROR_RTMP_ReadPacket: _("Video Download Failed!Could not read RTMP-Packet:%s" % task.lasterrormsg),
task.ERROR_SEGFAULT: _("Video Download Failed!Segmentation fault:%s" % task.lasterrormsg),
# task.ERROR_SERVER: _("Download Failed!-Server error:%s" % task.lasterrormsg),
task.ERROR_SERVER: _("Download Failed!-Server error:"),
task.ERROR_UNKNOWN: _("Download Failed!Unknown Error:%s" % task.lasterrormsg)
}[task.error]
def main(session, **kwargs):
session.open(Downloads)
def Plugins(**kwargs):
return PluginDescriptor(name="PluginDownload", description="Download/install plugins ", where = PluginDescriptor.WHERE_PLUGINMENU, fnc=main)
|
gpl-2.0
|
colford/sirf_receiver_frontend
|
sirfcontrol/sirfui.py
|
1
|
7631
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 15:59:29 2017
@author: CFord
"""
import sys, math, signal
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from collections import deque
from sirfcontrol.sirf import SirfMessageReader
# Globals to begin with
port = "COM7"
baud = 38400
class SirfMeasuredTracker(QtGui.QWidget):
def __init__(self):
super(SirfMeasuredTracker, self).__init__()
self.width = 470
self.height = 350
self.message = None
self.pre_message = None
self.cno = []
self.initUI()
def initUI(self):
self.setGeometry(300, 300, self.width, self.height)
self.setWindowTitle('SiRF Measurment Tracker')
self.show()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawTracker(qp)
qp.end()
def drawHeaders(self,qp):
font_metrics = QWidget.fontMetrics(self)
font_height = font_metrics.height()
qp.rotate(-90)
x = -130
y = 15
qp.drawText( x, y, "SVID" )
y += font_height + 15
qp.drawText( x, y, "Azimuth" )
y += font_height + 21
qp.drawText( x, y, "Elevation" )
y += font_height + 20
qp.drawText( x, y, "(Re)Acquistion success" )
y += font_height + 4
qp.drawText( x, y, "Carrier phase vaild" )
y += font_height + 4
qp.drawText( x, y, "Bit sync complete" )
y += font_height + 4
qp.drawText( x, y, "Subframe sync complete" )
y += font_height + 4
qp.drawText( x, y, "Carrier pullin complete" )
max_width = font_metrics.width("Carrier pullin complete")
y += font_height + 4
qp.drawText( x, y, "Code locked" )
y += font_height + 4
qp.drawText( x, y, "Acquistion failed" )
y += font_height + 4
qp.drawText( x, y, "Ephemeris data avail" )
qp.rotate(90)
x -= 120
y = max_width + 24
qp.drawText( -x, y, "Carrier to Noise (dB-Hz)" )
y = max_width - 24
qp.drawText( -x, y, "GPS Week No:" )
y = max_width
qp.drawText( -x, y, "GPS TOW (s):" )
def drawLED(self,qp,x,y,height,on):
if on:
qp.fillRect( x, y-height, 5, height, QColor("green" ))
else:
qp.drawRect( x, y-height, 5, height )
def drawData(self,qp):
if self.message == None or self.message == self.pre_message:
return
qp.drawText( 325, 83, str(self.message[1]))
qp.drawText( 325, 106, str(self.message[2]/100))
font_metrics = QWidget.fontMetrics(self)
font_height = font_metrics.height()
box_height = font_height/2+1
startx = 5
y = 150
# Assume channel number fixed
if len(self.cno) == 0:
for i in range(0,self.message[3]):
self.cno.append(deque(10*[0]*10,10*10))
# Add in the new C/No data
for c in range(0,self.message[3]):
for i in range(0,9):
self.cno[c].append(self.message[8+i+(13*c)])
for channels in range(0,self.message[3]):
qp.setPen(QColor("black"))
x = startx
index = (channels+4)+(12*channels)
qp.drawText( x, y, str(self.message[index]))
x += 29
qp.drawText( x, y, str(self.message[index+1]/(2/3)))
x += 35
qp.drawText( x, y, str(self.message[index+2]/2))
x += 35
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x01)
x += font_height + 4
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x02)
x += font_height + 4
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x04)
x += font_height + 4
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x08)
x += font_height + 4
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x10)
x += font_height + 4
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x20)
x += font_height + 4
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x40)
x += font_height + 4
self.drawLED(qp,x,y,box_height,self.message[index+3] & 0x80)
x += font_height + 16
scale = max(self.cno[channels])
for i in range(0,len(self.cno[channels])):
if self.cno[channels][i] == 0:
qp.setPen(QColor("red"))
qp.drawLine(x, y, x, y-1)
else:
qp.setPen(QColor("green"))
qp.drawLine(x, y, x, y-int(box_height/scale*self.cno[channels][i]))
x += 2
y += font_height + 2
self.pre_message = self.message
def drawTracker(self,qp):
self.drawHeaders(qp)
self.drawData(qp)
def newMessage(self,data):
self.message = data.data
self.repaint()
class SirfMessageProcessor(QThread):
def __init__(self,parent = None):
super(SirfMessageProcessor, self).__init__()
self.reader = SirfMessageReader(port,baud)
self.exiting = False
self.start()
def __del__(self):
self.exiting = True
self.wait()
def coldStart(self):
self.reader.cold_start()
def warmStart(self):
self.reader.warm_start()
def hotStart(self):
self.reader.hot_start()
def swPoll(self):
self.reader.sw_poll()
def run(self):
while not self.exiting:
self.message = self.reader.read_message()
if self.message.id == 4:
self.emit(SIGNAL("messageID4"), self.message.decoded)
class SirfControl(QtGui.QWidget):
def __init__(self, connection, parent=None):
super(SirfControl, self).__init__(parent)
self.connection = connection
self.swversion = QtGui.QPushButton("Software version")
self.swversion.clicked.connect(self.swPollClicked)
self.coldstart = QtGui.QPushButton("Cold start")
self.coldstart.clicked.connect(self.coldStartClicked)
self.warmstart = QtGui.QPushButton("Warm start")
self.warmstart.clicked.connect(self.warmStartClicked)
self.hotstart = QtGui.QPushButton("Hot start")
self.hotstart.clicked.connect(self.hotStartClicked)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.swversion)
layout.addWidget(self.coldstart)
layout.addWidget(self.warmstart)
layout.addWidget(self.hotstart)
self.setLayout(layout)
self.setWindowTitle('SiRF Control')
self.show()
def coldStartClicked(self):
self.connection.coldStart()
def warmStartClicked(self):
self.connection.warmStart()
def hotStartClicked(self):
self.connection.hotStart()
def swPollClicked(self):
self.connection.swPoll()
class Sirf(object):
def __init__(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
app = QtGui.QApplication(sys.argv)
sirf_ui = SirfMeasuredTracker()
sirf_processor = SirfMessageProcessor()
sirf_control = SirfControl(sirf_processor)
QtCore.QObject.connect(sirf_processor, SIGNAL("messageID4"), sirf_ui.newMessage)
sirf_ui.show()
app.exec_()
|
gpl-3.0
|
pigeonflight/strider-plone
|
docker/appengine/lib/django-1.4/tests/modeltests/invalid_models/invalid_models/models.py
|
33
|
26337
|
#encoding=utf-8
"""
26. Invalid models
This example exists purely to point out errors in models.
"""
from django.db import connection, models
class FieldErrors(models.Model):
charfield = models.CharField()
charfield2 = models.CharField(max_length=-1)
charfield3 = models.CharField(max_length="bad")
decimalfield = models.DecimalField()
decimalfield2 = models.DecimalField(max_digits=-1, decimal_places=-1)
decimalfield3 = models.DecimalField(max_digits="bad", decimal_places="bad")
decimalfield4 = models.DecimalField(max_digits=9, decimal_places=10)
decimalfield5 = models.DecimalField(max_digits=10, decimal_places=10)
filefield = models.FileField()
choices = models.CharField(max_length=10, choices='bad')
choices2 = models.CharField(max_length=10, choices=[(1,2,3),(1,2,3)])
index = models.CharField(max_length=10, db_index='bad')
field_ = models.CharField(max_length=10)
nullbool = models.BooleanField(null=True)
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash1 = models.CharField(max_length=10)
clash2 = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Clash1(models.Model):
src_safe = models.CharField(max_length=10)
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
class Clash2(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
class Target2(models.Model):
clash3 = models.CharField(max_length=10)
foreign_tgt = models.ForeignKey(Target)
clashforeign_set = models.ForeignKey(Target)
m2m_tgt = models.ManyToManyField(Target)
clashm2m_set = models.ManyToManyField(Target)
class Clash3(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target2, related_name='foreign_tgt')
foreign_2 = models.ForeignKey(Target2, related_name='m2m_tgt')
m2m_1 = models.ManyToManyField(Target2, related_name='foreign_tgt')
m2m_2 = models.ManyToManyField(Target2, related_name='m2m_tgt')
class ClashForeign(models.Model):
foreign = models.ForeignKey(Target2)
class ClashM2M(models.Model):
m2m = models.ManyToManyField(Target2)
class SelfClashForeign(models.Model):
src_safe = models.CharField(max_length=10)
selfclashforeign = models.CharField(max_length=10)
selfclashforeign_set = models.ForeignKey("SelfClashForeign")
foreign_1 = models.ForeignKey("SelfClashForeign", related_name='id')
foreign_2 = models.ForeignKey("SelfClashForeign", related_name='src_safe')
class ValidM2M(models.Model):
src_safe = models.CharField(max_length=10)
validm2m = models.CharField(max_length=10)
# M2M fields are symmetrical by default. Symmetrical M2M fields
# on self don't require a related accessor, so many potential
# clashes are avoided.
validm2m_set = models.ManyToManyField("self")
m2m_1 = models.ManyToManyField("self", related_name='id')
m2m_2 = models.ManyToManyField("self", related_name='src_safe')
m2m_3 = models.ManyToManyField('self')
m2m_4 = models.ManyToManyField('self')
class SelfClashM2M(models.Model):
src_safe = models.CharField(max_length=10)
selfclashm2m = models.CharField(max_length=10)
# Non-symmetrical M2M fields _do_ have related accessors, so
# there is potential for clashes.
selfclashm2m_set = models.ManyToManyField("self", symmetrical=False)
m2m_1 = models.ManyToManyField("self", related_name='id', symmetrical=False)
m2m_2 = models.ManyToManyField("self", related_name='src_safe', symmetrical=False)
m2m_3 = models.ManyToManyField('self', symmetrical=False)
m2m_4 = models.ManyToManyField('self', symmetrical=False)
class Model(models.Model):
"But it's valid to call a model Model."
year = models.PositiveIntegerField() #1960
make = models.CharField(max_length=10) #Aston Martin
name = models.CharField(max_length=10) #DB 4 GT
class Car(models.Model):
colour = models.CharField(max_length=5)
model = models.ForeignKey(Model)
class MissingRelations(models.Model):
rel1 = models.ForeignKey("Rel1")
rel2 = models.ManyToManyField("Rel2")
class MissingManualM2MModel(models.Model):
name = models.CharField(max_length=5)
missing_m2m = models.ManyToManyField(Model, through="MissingM2MModel")
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
tertiary = models.ManyToManyField(Person, through="RelationshipDoubleFK", related_name="tertiary")
class GroupTwo(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership")
secondary = models.ManyToManyField(Group, through="MembershipMissingFK")
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
not_default_or_null = models.CharField(max_length=5)
class MembershipMissingFK(models.Model):
person = models.ForeignKey(Person)
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Relationship")
too_many_friends = models.ManyToManyField('self', through="RelationshipTripleFK")
class PersonSelfRefM2MExplicit(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="ExplicitRelationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_added = models.DateTimeField()
class ExplicitRelationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_to_set")
date_added = models.DateTimeField()
class RelationshipTripleFK(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set_2")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set_2")
third = models.ForeignKey(PersonSelfRefM2M, related_name="too_many_by_far")
date_added = models.DateTimeField()
class RelationshipDoubleFK(models.Model):
first = models.ForeignKey(Person, related_name="first_related_name")
second = models.ForeignKey(Person, related_name="second_related_name")
third = models.ForeignKey(Group, related_name="rel_to_set")
date_added = models.DateTimeField()
class AbstractModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
abstract = True
class AbstractRelationModel(models.Model):
fk1 = models.ForeignKey('AbstractModel')
fk2 = models.ManyToManyField('AbstractModel')
class UniqueM2M(models.Model):
""" Model to test for unique ManyToManyFields, which are invalid. """
unique_people = models.ManyToManyField(Person, unique=True)
class NonUniqueFKTarget1(models.Model):
""" Model to test for non-unique FK target in yet-to-be-defined model: expect an error """
tgt = models.ForeignKey('FKTarget', to_field='bad')
class UniqueFKTarget1(models.Model):
""" Model to test for unique FK target in yet-to-be-defined model: expect no error """
tgt = models.ForeignKey('FKTarget', to_field='good')
class FKTarget(models.Model):
bad = models.IntegerField()
good = models.IntegerField(unique=True)
class NonUniqueFKTarget2(models.Model):
""" Model to test for non-unique FK target in previously seen model: expect an error """
tgt = models.ForeignKey(FKTarget, to_field='bad')
class UniqueFKTarget2(models.Model):
""" Model to test for unique FK target in previously seen model: expect no error """
tgt = models.ForeignKey(FKTarget, to_field='good')
class NonExistingOrderingWithSingleUnderscore(models.Model):
class Meta:
ordering = ("does_not_exist",)
class InvalidSetNull(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_NULL)
class InvalidSetDefault(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_DEFAULT)
class UnicodeForeignKeys(models.Model):
"""Foreign keys which can translate to ascii should be OK, but fail if
they're not."""
good = models.ForeignKey(u'FKTarget')
also_good = models.ManyToManyField(u'FKTarget', related_name='unicode2')
# In Python 3 this should become legal, but currently causes unicode errors
# when adding the errors in core/management/validation.py
#bad = models.ForeignKey(u'★')
class PrimaryKeyNull(models.Model):
my_pk_field = models.IntegerField(primary_key=True, null=True)
class OrderByPKModel(models.Model):
"""
Model to test that ordering by pk passes validation.
Refs #8291
"""
name = models.CharField(max_length=100, blank=True)
class Meta:
ordering = ('pk',)
model_errors = """invalid_models.fielderrors: "charfield": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield2": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield3": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield4": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.
invalid_models.fielderrors: "filefield": FileFields require an "upload_to" attribute.
invalid_models.fielderrors: "choices": "choices" should be iterable (e.g., a tuple or list).
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "index": "db_index" should be either None, True or False.
invalid_models.fielderrors: "field_": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.
invalid_models.fielderrors: "nullbool": BooleanFields do not accept null values. Use a NullBooleanField instead.
invalid_models.clash1: Accessor for field 'foreign' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for field 'foreign' clashes with related m2m field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Reverse query name for field 'foreign' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with related field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Reverse query name for m2m field 'm2m' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clashforeign: Accessor for field 'foreign' clashes with field 'Target2.clashforeign_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clashm2m: Accessor for m2m field 'm2m' clashes with m2m field 'Target2.clashm2m_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.selfclashforeign: Accessor for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign_set'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Reverse query name for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Accessor for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Accessor for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'selfclashm2m_set' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_3' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_4' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.missingrelations: 'rel1' has a relation with model Rel1, which has either not been installed or is abstract.
invalid_models.missingrelations: 'rel2' has an m2m relation with model Rel2, which has either not been installed or is abstract.
invalid_models.grouptwo: 'primary' is a manually-defined m2m relation through model Membership, which does not have foreign keys to Person and GroupTwo
invalid_models.grouptwo: 'secondary' is a manually-defined m2m relation through model MembershipMissingFK, which does not have foreign keys to Group and GroupTwo
invalid_models.missingmanualm2mmodel: 'missing_m2m' specifies an m2m relation through model MissingM2MModel, which has not been installed
invalid_models.group: The model Group has two manually-defined m2m relations through the model Membership, which is not permitted. Please consider using an extra field on your intermediary model instead.
invalid_models.group: Intermediary model RelationshipDoubleFK has more than one foreign key to Person, which is ambiguous and is not permitted.
invalid_models.personselfrefm2m: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.personselfrefm2m: Intermediary model RelationshipTripleFK has more than two foreign keys to PersonSelfRefM2M, which is ambiguous and is not permitted.
invalid_models.personselfrefm2mexplicit: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.abstractrelationmodel: 'fk1' has a relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.abstractrelationmodel: 'fk2' has an m2m relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.uniquem2m: ManyToManyFields cannot be unique. Remove the unique argument on 'unique_people'.
invalid_models.nonuniquefktarget1: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonuniquefktarget2: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonexistingorderingwithsingleunderscore: "ordering" refers to "does_not_exist", a field that doesn't exist.
invalid_models.invalidsetnull: 'fk' specifies on_delete=SET_NULL, but cannot be null.
invalid_models.invalidsetdefault: 'fk' specifies on_delete=SET_DEFAULT, but has no default value.
"""
if not connection.features.interprets_empty_strings_as_nulls:
model_errors += """invalid_models.primarykeynull: "my_pk_field": Primary key fields cannot have null=True.
"""
|
mit
|
Teamxrtc/webrtc-streaming-node
|
third_party/depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/boto/tests/unit/ec2/test_securitygroup.py
|
112
|
8556
|
#!/usr/bin/env python
from tests.compat import unittest
from tests.unit import AWSMockServiceTestCase
from boto.ec2.connection import EC2Connection
from boto.ec2.securitygroup import SecurityGroup
DESCRIBE_SECURITY_GROUP = br"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2013-06-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<securityGroupInfo>
<item>
<ownerId>111122223333</ownerId>
<groupId>sg-1a2b3c4d</groupId>
<groupName>WebServers</groupName>
<groupDescription>Web Servers</groupDescription>
<vpcId/>
<ipPermissions>
<item>
<ipProtocol>tcp</ipProtocol>
<fromPort>80</fromPort>
<toPort>80</toPort>
<groups/>
<ipRanges>
<item>
<cidrIp>0.0.0.0/0</cidrIp>
</item>
</ipRanges>
</item>
</ipPermissions>
<ipPermissionsEgress/>
</item>
<item>
<ownerId>111122223333</ownerId>
<groupId>sg-2a2b3c4d</groupId>
<groupName>RangedPortsBySource</groupName>
<groupDescription>Group A</groupDescription>
<ipPermissions>
<item>
<ipProtocol>tcp</ipProtocol>
<fromPort>6000</fromPort>
<toPort>7000</toPort>
<groups>
<item>
<userId>111122223333</userId>
<groupId>sg-3a2b3c4d</groupId>
<groupName>Group B</groupName>
</item>
</groups>
<ipRanges/>
</item>
</ipPermissions>
<ipPermissionsEgress/>
</item>
</securityGroupInfo>
</DescribeSecurityGroupsResponse>"""
DESCRIBE_INSTANCES = br"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2012-10-01/">
<requestId>c6132c74-b524-4884-87f5-0f4bde4a9760</requestId>
<reservationSet>
<item>
<reservationId>r-72ef4a0a</reservationId>
<ownerId>184906166255</ownerId>
<groupSet/>
<instancesSet>
<item>
<instanceId>i-instance</instanceId>
<imageId>ami-1624987f</imageId>
<instanceState>
<code>16</code>
<name>running</name>
</instanceState>
<privateDnsName/>
<dnsName/>
<reason/>
<keyName>mykeypair</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<productCodes/>
<instanceType>m1.small</instanceType>
<launchTime>2012-12-14T23:48:37.000Z</launchTime>
<placement>
<availabilityZone>us-east-1d</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
<kernelId>aki-88aa75e1</kernelId>
<monitoring>
<state>disabled</state>
</monitoring>
<subnetId>subnet-0dc60667</subnetId>
<vpcId>vpc-id</vpcId>
<privateIpAddress>10.0.0.67</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-1a2b3c4d</groupId>
<groupName>WebServerSG</groupName>
</item>
</groupSet>
<architecture>x86_64</architecture>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping>
<item>
<deviceName>/dev/sda1</deviceName>
<ebs>
<volumeId>vol-id</volumeId>
<status>attached</status>
<attachTime>2012-12-14T23:48:43.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</ebs>
</item>
</blockDeviceMapping>
<virtualizationType>paravirtual</virtualizationType>
<clientToken>foo</clientToken>
<tagSet>
<item>
<key>Name</key>
<value/>
</item>
</tagSet>
<hypervisor>xen</hypervisor>
<networkInterfaceSet>
<item>
<networkInterfaceId>eni-id</networkInterfaceId>
<subnetId>subnet-id</subnetId>
<vpcId>vpc-id</vpcId>
<description>Primary network interface</description>
<ownerId>ownerid</ownerId>
<status>in-use</status>
<privateIpAddress>10.0.0.67</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-id</groupId>
<groupName>WebServerSG</groupName>
</item>
</groupSet>
<attachment>
<attachmentId>eni-attach-id</attachmentId>
<deviceIndex>0</deviceIndex>
<status>attached</status>
<attachTime>2012-12-14T23:48:37.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
<privateIpAddressesSet>
<item>
<privateIpAddress>10.0.0.67</privateIpAddress>
<primary>true</primary>
</item>
<item>
<privateIpAddress>10.0.0.54</privateIpAddress>
<primary>false</primary>
</item>
<item>
<privateIpAddress>10.0.0.55</privateIpAddress>
<primary>false</primary>
</item>
</privateIpAddressesSet>
</item>
</networkInterfaceSet>
<ebsOptimized>false</ebsOptimized>
</item>
</instancesSet>
</item>
</reservationSet>
</DescribeInstancesResponse>
"""
class TestDescribeSecurityGroups(AWSMockServiceTestCase):
connection_class = EC2Connection
def test_get_instances(self):
self.set_http_response(status_code=200, body=DESCRIBE_SECURITY_GROUP)
groups = self.service_connection.get_all_security_groups()
self.set_http_response(status_code=200, body=DESCRIBE_INSTANCES)
instances = groups[0].instances()
self.assertEqual(1, len(instances))
self.assertEqual(groups[0].id, instances[0].groups[0].id)
class SecurityGroupTest(unittest.TestCase):
def test_add_rule(self):
sg = SecurityGroup()
self.assertEqual(len(sg.rules), 0)
# Regression: ``dry_run`` was being passed (but unhandled) before.
sg.add_rule(
ip_protocol='http',
from_port='80',
to_port='8080',
src_group_name='groupy',
src_group_owner_id='12345',
cidr_ip='10.0.0.1',
src_group_group_id='54321',
dry_run=False
)
self.assertEqual(len(sg.rules), 1)
def test_remove_rule_on_empty_group(self):
# Remove a rule from a group with no rules
sg = SecurityGroup()
with self.assertRaises(ValueError):
sg.remove_rule('ip', 80, 80, None, None, None, None)
|
mit
|
south-coast-science/scs_core
|
tests/position/position_test.py
|
1
|
1174
|
#!/usr/bin/env python3
"""
Created on 10 Feb 2021
@author: Bruno Beloff ([email protected])
Getting distance between two points based on latitude/longitude
https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude
"""
from scs_core.position.position import Position
# --------------------------------------------------------------------------------------------------------------------
# run...
p1 = Position(52.2296756, 21.0122287)
p2 = Position(52.2296756, 21.0122287)
print("distance (same point): %s" % p1.distance(p2))
print("-")
p1 = Position(52.2296756, 21.0122287)
p2 = Position(52.406374, 16.9251681)
print("distance: %s" % p1.distance(p2))
print("-")
p1 = Position(52.406374, 16.9251681)
p2 = Position(52.2296756, 21.0122287)
print("distance (reversed): %s" % p1.distance(p2))
print("-")
p1 = Position(52.2296756, 1.0122287)
p2 = Position(52.406374, -3.0748319)
print("distance (crossing meridian): %s" % p1.distance(p2))
print("-")
p1 = Position(-52.2296756, 1.0122287)
p2 = Position(-52.406374, -3.0748319)
print("distance (crossing equator): %s" % p1.distance(p2))
print("-")
|
mit
|
dbarbier/privot
|
python/test/t_FrankCopula_std.py
|
1
|
2700
|
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
# Instanciate one distribution object
dim = 2
copula = FrankCopula(2.5)
print "Copula " , repr(copula)
print "Copula " , copula
print "Mean " , repr(copula.getMean())
print "Covariance " , repr(copula.getCovariance())
# Is this copula an elliptical distribution?
print "Elliptical distribution= " , copula.isElliptical()
# Is this copula elliptical ?
print "Elliptical copula= " , copula.hasEllipticalCopula()
# Is this copula independent ?
print "Independent copula= " , copula.hasIndependentCopula()
# Test for realization of distribution
oneRealization = copula.getRealization()
print "oneRealization=", repr(oneRealization)
# Test for sampling
size = 10
oneSample = copula.getSample( size )
print "oneSample=", repr(oneSample)
# Test for sampling
size = 10000
anotherSample = copula.getSample( size )
print "anotherSample mean=", repr(anotherSample.computeMean())
print "anotherSample covariance=", repr(anotherSample.computeCovariance())
# Define a point
point = NumericalPoint(dim, 0.2)
# Show PDF and CDF of point
pointPDF = copula.computePDF( point )
pointCDF = copula.computeCDF( point )
print "Point = ", repr(point), " pdf=%.6f" % pointPDF, " cdf=%.6f" % pointCDF
# Get 50% quantile
quantile = copula.computeQuantile( 0.5 )
print "Quantile=", repr(quantile)
print "CDF(quantile)=%.6f" % copula.computeCDF(quantile)
# Extract the marginals
for i in range(dim) :
margin = copula.getMarginal(i)
print "margin=", repr(margin)
print "margin PDF=%.6f" % margin.computePDF( NumericalPoint(1,0.25))
print "margin CDF=%.6f" % margin.computeCDF(NumericalPoint(1,0.25))
print "margin quantile=", repr(margin.computeQuantile(0.95))
print "margin realization=", repr(margin.getRealization())
# Extract a 2-D marginal
indices = Indices(2, 0)
indices[0] = 1
indices[1] = 0
print "indices=", repr(indices)
margins = copula.getMarginal(indices)
print "margins=", repr(margins)
print "margins PDF=%.6f" % margins.computePDF(NumericalPoint(2,0.25))
print "margins CDF=%.6f" % margins.computeCDF(NumericalPoint(2,0.25))
quantile = NumericalPoint(margins.computeQuantile(0.95))
print "margins quantile=", repr(quantile)
print "margins CDF(qantile)=%.6f" % margins.computeCDF(quantile)
print "margins realization=", repr(margins.getRealization())
except :
import sys
print "t_FrankCopula_std.py", sys.exc_type, sys.exc_value
|
lgpl-3.0
|
pyfa-org/eos
|
tests/mod_builder/etree/affectee_filter/test_affectee_dom_grp.py
|
1
|
5818
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.const.eos import EffectBuildStatus
from eos.const.eos import ModAffecteeFilter
from eos.const.eos import ModDomain
from eos.const.eos import ModOperator
from eos.const.eve import OperandId
from tests.mod_builder.testcase import ModBuilderTestCase
class TestBuilderEtreeAffecteeDomGrp(ModBuilderTestCase):
def make_etree(self, domain):
e_tgt_dom = self.ef.make(
1, operandID=OperandId.def_dom, expressionValue=domain)
e_tgt_grp = self.ef.make(
2, operandID=OperandId.def_grp, expressionGroupID=46)
e_tgt_attr = self.ef.make(
3, operandID=OperandId.def_attr, expressionAttributeID=6)
e_optr = self.ef.make(
4, operandID=OperandId.def_optr, expressionValue='PostPercent')
e_src_attr = self.ef.make(
5, operandID=OperandId.def_attr, expressionAttributeID=1576)
e_tgt_itms = self.ef.make(
6, operandID=OperandId.dom_grp, arg1=e_tgt_dom['expressionID'],
arg2=e_tgt_grp['expressionID'])
e_tgt_spec = self.ef.make(
7, operandID=OperandId.itm_attr, arg1=e_tgt_itms['expressionID'],
arg2=e_tgt_attr['expressionID'])
e_optr_tgt = self.ef.make(
8, operandID=OperandId.optr_tgt, arg1=e_optr['expressionID'],
arg2=e_tgt_spec['expressionID'])
e_add_mod = self.ef.make(
9, operandID=OperandId.add_dom_grp_mod,
arg1=e_optr_tgt['expressionID'], arg2=e_src_attr['expressionID'])
e_rm_mod = self.ef.make(
10, operandID=OperandId.rm_dom_grp_mod,
arg1=e_optr_tgt['expressionID'], arg2=e_src_attr['expressionID'])
self.effect_row = {
'preExpression': e_add_mod['expressionID'],
'postExpression': e_rm_mod['expressionID']}
def test_domain_self(self):
self.make_etree('Self')
modifiers, status = self.run_builder(self.effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.self)
self.assertEqual(modifier.affectee_filter_extra_arg, 46)
self.assertEqual(modifier.affectee_attr_id, 6)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 1576)
self.assert_log_entries(0)
def test_domain_char(self):
self.make_etree('Char')
modifiers, status = self.run_builder(self.effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.character)
self.assertEqual(modifier.affectee_filter_extra_arg, 46)
self.assertEqual(modifier.affectee_attr_id, 6)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 1576)
self.assert_log_entries(0)
def test_domain_ship(self):
self.make_etree('Ship')
modifiers, status = self.run_builder(self.effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.ship)
self.assertEqual(modifier.affectee_filter_extra_arg, 46)
self.assertEqual(modifier.affectee_attr_id, 6)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 1576)
self.assert_log_entries(0)
def test_domain_target(self):
self.make_etree('Target')
modifiers, status = self.run_builder(self.effect_row)
self.assertEqual(status, EffectBuildStatus.success)
self.assertEqual(len(modifiers), 1)
modifier = modifiers[0]
self.assertEqual(
modifier.affectee_filter, ModAffecteeFilter.domain_group)
self.assertEqual(modifier.affectee_domain, ModDomain.target)
self.assertEqual(modifier.affectee_filter_extra_arg, 46)
self.assertEqual(modifier.affectee_attr_id, 6)
self.assertEqual(modifier.operator, ModOperator.post_percent)
self.assertEqual(modifier.affector_attr_id, 1576)
self.assert_log_entries(0)
def test_domain_other(self):
self.make_etree('Other')
modifiers, status = self.run_builder(self.effect_row)
self.assertEqual(status, EffectBuildStatus.error)
self.assertEqual(len(modifiers), 0)
self.assert_log_entries(1)
|
lgpl-3.0
|
bertrand-l/LearnML
|
learnml/datasets/load.py
|
1
|
6085
|
# -*- coding: utf-8 -*-
"""
Utilities to load datasets from the `dataset` package.
"""
# Author: bertrand-l
# License: BSD
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
from .dataset import DataSet
__all__ = ('load_faithful', 'load_iris', 'load_women')
DATAPATH = os.path.join(os.path.dirname(__file__), 'data')
def filepath(fname):
"""Returns the complete file path to the file."""
return os.path.join(DATAPATH, fname)
def load(fname, xcol=None, xfmt='f', ycol=None, yfmt='f', sep=None,
missing='NaN', tolerant=True):
"""
Read data from a flat (tabulated or delimiter-separated) ascii file with a
header and return a `DataSet` object.
Everything before the line starting with 'DATA>' is considered the header.
'DATA>' may be followed by variables names separated by `sep`. After that,
everything is considered data. Each data column can have its own type.
`X` and `y` are returned as numpy.ndarray only if all columns in `X`
have the same type (i.e. xfmt='f' or 'i' or 's' but not something like
'f,s'). Otherwise they are left as lists.
Parameters
----------
fname : str
name of a file in the `data` directory
xcol, ycol : str or None
column numbers for X or y data, e.g. '0 1 2' or '0-2'. If `xcol` is
None, all columns are assumed to belong to `X`.
xfmt, yfmt : str
data format for each column: {'i', 'f', 's'} or a combination such
as 'iffs'.
sep : str or None
tolerant : boleean
just ignore data lines that cannot be converted
"""
# FIXME: ugly
def convert(string, fmt):
"""Tries to convert a string to a given format."""
string = string.strip()
if string == missing:
return np.nan
else:
if fmt == 'f':
return float(string)
elif fmt == 'i':
return int(string)
else:
return string
def readline(words):
"""Reads line split into 'words' and returns a tuple."""
imax = len(xfmt) - 1
if ix is None:
X = tuple(convert(words[i], xfmt[min(i, imax)])
for i in range(len(words)))
else:
X = tuple(convert(words[ix[i]], xfmt[min(i, imax)])
for i in range(len(ix)))
if iy is not None:
return X, convert(words[iy], yfmt)
else:
return X, None
# form column indices and formats
if isinstance(sep, basestring) and sep.strip() == '':
sep = None
if xcol is None:
ix = None
else:
xcol = str(xcol)
if xcol.find('-') > 0:
beg, end = xcol.split('-')
ix = range(int(beg), int(end) + 1)
else:
ix = [int(i) for i in xcol.split(sep)]
if ycol is None or ycol == '' or xcol is None:
iy = None
else:
iy = int(ycol)
if sep is None:
xfmt = '{}'.format(xfmt).replace(' ', '')
else:
xfmt = '{}'.format(xfmt).replace(' ', '').replace(sep, '')
xfmt = [xf for xf in xfmt]
yfmt = '{}'.format(yfmt).lstrip()[0]
# read line by line
with open(filepath(fname), 'rU') as fp:
X, y = [], []
title, Xlabel, ylabel = None, None, None
header, reading_header = "", True
for line in fp:
line = line.rstrip('\n').strip()
if line.startswith("DATA>"):
# data starts here
reading_header = False
label = line[5:].split(sep)
if ix is None:
Xlabel = tuple([lab.strip() for lab in label])
elif len(label) >= len(ix):
Xlabel = tuple([label[i].strip() for i in ix])
if iy is not None:
ylabel = label[iy].strip()
elif reading_header:
# reading header
if title is None and len(line) > 0:
title = line
header += "\n{0}".format(line)
elif len(line) > 0:
# reading data
words = line.split(sep)
try:
X_, y_ = readline(words)
except (IndexError, TypeError, ValueError) as err:
if not tolerant:
raise err
else:
if len(X) == 0 or len(X_) == len(X[0]):
X.append(X_)
if y_ is not None:
y.append(y_)
if len(y) != len(X):
y = None
if xfmt == [xfmt[0]] * len(xfmt):
X = np.array(X)
if y is not None:
y = np.array(y)
return DataSet(X, Xlabel=Xlabel, y=y, ylabel=ylabel,
info=header, title=title)
def load_faithful():
"""
Old Faithful Geyser Data.
Waiting time between eruptions and the duration of the eruption
for the Old Faithful geyser in Yellowstone National Park, Wyoming,
USA.
272 observations on 2 variables.
"""
return load("faithful.data", xcol='1 2', xfmt='f', sep=None)
def load_iris():
"""
Edgar Anderson's Iris Data.
This famous (Fisher's or Anderson's) iris data set gives the measurements
in centimeters of the variables sepal length and width and petal length and
width, respectively, for 50 flowers from each of 3 species of iris. The
species are Iris setosa, versicolor, and virginica.
150 observations (50 in each of three classes) on 4 attributes and
the class.
"""
return load("iris.data", xcol='0-3', xfmt='f', ycol=4, yfmt='s', sep=',')
def load_women():
"""
Average Heights and Weights for American Women.
This data set gives the average heights and weights for American women
aged 30–39.
15 observations on 2 variables.
"""
return load("women.data", xcol=1, xfmt='f', ycol=2, yfmt='f',
sep=None)
|
bsd-3-clause
|
2ndQuadrant/ansible
|
test/units/modules/storage/netapp/test_na_ontap_cifs_server.py
|
38
|
8705
|
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests ONTAP Ansible module: na_ontap_cifs_server '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_cifs_server \
import NetAppOntapcifsServer as my_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, parm1=None, parm2=None):
''' save arguments '''
self.type = kind
self.parm1 = parm1
self.parm2 = parm2
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'cifs_server':
xml = self.build_vserver_info(self.parm1, self.parm2)
self.xml_out = xml
return xml
@staticmethod
def build_vserver_info(cifs_server, admin_status):
''' build xml data for cifs-server-info '''
xml = netapp_utils.zapi.NaElement('xml')
data = {'num-records': 1,
'attributes-list': {'cifs-server-config': {'cifs-server': cifs_server,
'administrative-status': admin_status}}}
xml.translate_struct(data)
print(xml.to_string())
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.use_vsim = False
def set_default_args(self):
if self.use_vsim:
hostname = '10.193.77.154'
username = 'admin'
password = 'netapp1!'
cifs_server = 'test'
vserver = 'ansible_test'
else:
hostname = 'hostname'
username = 'username'
password = 'password'
cifs_server = 'name'
vserver = 'vserver'
return dict({
'hostname': hostname,
'username': username,
'password': password,
'cifs_server_name': cifs_server,
'vserver': vserver
})
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_ensure_cifs_server_get_called(self):
''' a more interesting test '''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = self.server
cifs_server = my_obj.get_cifs_server()
print('Info: test_cifs_server_get: %s' % repr(cifs_server))
assert cifs_server is None
def test_ensure_cifs_server_apply_for_create_called(self):
''' creating cifs server and checking idempotency '''
module_args = {}
module_args.update(self.set_default_args())
module_args.update({'cifs_server_name': 'create'})
set_module_args(module_args)
my_obj = my_module()
if not self.use_vsim:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_cifs_server_apply: %s' % repr(exc.value))
assert exc.value.args[0]['changed']
if not self.use_vsim:
my_obj.server = MockONTAPConnection('cifs_server', 'create', 'up')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_cifs_server_apply_for_create: %s' % repr(exc.value))
assert not exc.value.args[0]['changed']
def test_ensure_cifs_server_apply_for_delete_called(self):
''' deleting cifs server and checking idempotency '''
module_args = {}
module_args.update(self.set_default_args())
module_args.update({'cifs_server_name': 'delete'})
set_module_args(module_args)
my_obj = my_module()
if not self.use_vsim:
my_obj.server = MockONTAPConnection('cifs_server', 'delete', 'up')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_cifs_server_apply: %s' % repr(exc.value))
assert not exc.value.args[0]['changed']
module_args.update({'state': 'absent'})
set_module_args(module_args)
my_obj = my_module()
if not self.use_vsim:
my_obj.server = MockONTAPConnection('cifs_server', 'delete', 'up')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_cifs_server_delete: %s' % repr(exc.value))
assert exc.value.args[0]['changed']
def test_ensure_start_cifs_server_called(self):
''' starting cifs server and checking idempotency '''
module_args = {}
module_args.update(self.set_default_args())
module_args.update({'cifs_server_name': 'delete'})
module_args.update({'service_state': 'started'})
set_module_args(module_args)
my_obj = my_module()
if not self.use_vsim:
my_obj.server = MockONTAPConnection('cifs_server', 'test', 'up')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_ensure_start_cifs_server: %s' % repr(exc.value))
assert not exc.value.args[0]['changed']
module_args.update({'service_state': 'stopped'})
set_module_args(module_args)
my_obj = my_module()
if not self.use_vsim:
my_obj.server = MockONTAPConnection('cifs_server', 'test', 'up')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_ensure_start_cifs_server: %s' % repr(exc.value))
assert exc.value.args[0]['changed']
def test_ensure_stop_cifs_server_called(self):
''' stopping cifs server and checking idempotency '''
module_args = {}
module_args.update(self.set_default_args())
module_args.update({'cifs_server_name': 'delete'})
module_args.update({'service_state': 'stopped'})
set_module_args(module_args)
my_obj = my_module()
if not self.use_vsim:
my_obj.server = MockONTAPConnection('cifs_server', 'test', 'down')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_ensure_stop_cifs_server: %s' % repr(exc.value))
assert not exc.value.args[0]['changed']
module_args.update({'service_state': 'started'})
set_module_args(module_args)
my_obj = my_module()
if not self.use_vsim:
my_obj.server = MockONTAPConnection('cifs_server', 'test', 'down')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_ensure_stop_cifs_server: %s' % repr(exc.value))
assert exc.value.args[0]['changed']
|
gpl-3.0
|
Fusion-Rom/android_external_chromium_org
|
ppapi/generators/idl_log.py
|
137
|
1271
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Error and information logging for IDL """
import sys
class IDLLog(object):
"""Captures and routes logging output.
Caputres logging output and/or sends out via a file handle, typically
stdout or stderr.
"""
def __init__(self, name, out):
if name:
self._name = '%s : ' % name
else:
self._name = ''
self._out = out
self._capture = False
self._console = True
self._log = []
def Log(self, msg):
if self._console:
line = "%s\n" % (msg)
self._out.write(line)
if self._capture:
self._log.append(msg)
def LogLine(self, filename, lineno, pos, msg):
if self._console:
line = "%s(%d) : %s%s\n" % (filename, lineno, self._name, msg)
self._out.write(line)
if self._capture:
self._log.append(msg)
def SetConsole(self, enable):
self._console = enable
def SetCapture(self, enable):
self._capture = enable
def DrainLog(self):
out = self._log
self._log = []
return out
ErrOut = IDLLog('Error', sys.stderr)
WarnOut = IDLLog('Warning', sys.stdout)
InfoOut = IDLLog('', sys.stdout)
|
bsd-3-clause
|
nzavagli/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/test_complex.py
|
32
|
27808
|
import unittest
from test import test_support
from random import random
from math import atan2, isnan, copysign
INF = float("inf")
NAN = float("nan")
# These tests ensure that complex math does the right thing
class ComplexTest(unittest.TestCase):
def assertAlmostEqual(self, a, b):
if isinstance(a, complex):
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a.real, b.real)
unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a.real, b)
unittest.TestCase.assertAlmostEqual(self, a.imag, 0.)
else:
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a, b.real)
unittest.TestCase.assertAlmostEqual(self, 0., b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a, b)
def assertCloseAbs(self, x, y, eps=1e-9):
"""Return true iff floats x and y "are close"."""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
self.assertTrue(abs((x-y)/y) < eps)
def assertFloatsAreIdentical(self, x, y):
"""assert that floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if isnan(x) or isnan(y):
if isnan(x) and isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif copysign(1.0, x) == copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertClose(self, x, y, eps=1e-9):
"""Return true iff complexes x and y "are close"."""
self.assertCloseAbs(x.real, y.real, eps)
self.assertCloseAbs(x.imag, y.imag, eps)
def check_div(self, x, y):
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
z = x * y
if x != 0:
q = z / x
self.assertClose(q, y)
q = z.__div__(x)
self.assertClose(q, y)
q = z.__truediv__(x)
self.assertClose(q, y)
if y != 0:
q = z / y
self.assertClose(q, x)
q = z.__div__(y)
self.assertClose(q, x)
q = z.__truediv__(y)
self.assertClose(q, x)
def test_div(self):
simple_real = [float(i) for i in xrange(-5, 6)]
simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
for x in simple_complex:
for y in simple_complex:
self.check_div(x, y)
# A naive complex division algorithm (such as in 2.0) is very prone to
# nonsense errors for these (overflows and underflows).
self.check_div(complex(1e200, 1e200), 1+0j)
self.check_div(complex(1e-200, 1e-200), 1+0j)
# Just for fun.
for i in xrange(100):
self.check_div(complex(random(), random()),
complex(random(), random()))
self.assertRaises(ZeroDivisionError, complex.__div__, 1+1j, 0+0j)
# FIXME: The following currently crashes on Alpha
# self.assertRaises(OverflowError, pow, 1e200+1j, 1e200+1j)
def test_truediv(self):
self.assertAlmostEqual(complex.__truediv__(2+0j, 1+1j), 1-1j)
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
for denom_real, denom_imag in [(0, NAN), (NAN, 0), (NAN, NAN)]:
z = complex(0, 0) / complex(denom_real, denom_imag)
self.assertTrue(isnan(z.real))
self.assertTrue(isnan(z.imag))
def test_floordiv(self):
self.assertAlmostEqual(complex.__floordiv__(3+0j, 1.5+0j), 2)
self.assertRaises(ZeroDivisionError, complex.__floordiv__, 3+0j, 0+0j)
def test_coerce(self):
self.assertRaises(OverflowError, complex.__coerce__, 1+1j, 1L<<10000)
def test_no_implicit_coerce(self):
# Python 2.7 removed implicit coercion from the complex type
class A(object):
def __coerce__(self, other):
raise RuntimeError
__hash__ = None
def __cmp__(self, other):
return -1
a = A()
self.assertRaises(TypeError, lambda: a + 2.0j)
self.assertTrue(a < 2.0j)
def test_richcompare(self):
self.assertEqual(complex.__eq__(1+1j, 1L<<10000), False)
self.assertEqual(complex.__lt__(1+1j, None), NotImplemented)
self.assertIs(complex.__eq__(1+1j, 1+1j), True)
self.assertIs(complex.__eq__(1+1j, 2+2j), False)
self.assertIs(complex.__ne__(1+1j, 1+1j), False)
self.assertIs(complex.__ne__(1+1j, 2+2j), True)
self.assertRaises(TypeError, complex.__lt__, 1+1j, 2+2j)
self.assertRaises(TypeError, complex.__le__, 1+1j, 2+2j)
self.assertRaises(TypeError, complex.__gt__, 1+1j, 2+2j)
self.assertRaises(TypeError, complex.__ge__, 1+1j, 2+2j)
def test_richcompare_boundaries(self):
def check(n, deltas, is_equal, imag = 0.0):
for delta in deltas:
i = n + delta
z = complex(i, imag)
self.assertIs(complex.__eq__(z, i), is_equal(delta))
self.assertIs(complex.__ne__(z, i), not is_equal(delta))
# For IEEE-754 doubles the following should hold:
# x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0
# where the interval is representable, of course.
for i in range(1, 10):
pow = 52 + i
mult = 2 ** i
check(2 ** pow, range(1, 101), lambda delta: delta % mult == 0)
check(2 ** pow, range(1, 101), lambda delta: False, float(i))
check(2 ** 53, range(-100, 0), lambda delta: True)
def test_mod(self):
self.assertRaises(ZeroDivisionError, (1+1j).__mod__, 0+0j)
a = 3.33+4.43j
try:
a % 0
except ZeroDivisionError:
pass
else:
self.fail("modulo parama can't be 0")
def test_divmod(self):
self.assertRaises(ZeroDivisionError, divmod, 1+1j, 0+0j)
def test_pow(self):
self.assertAlmostEqual(pow(1+1j, 0+0j), 1.0)
self.assertAlmostEqual(pow(0+0j, 2+0j), 0.0)
self.assertRaises(ZeroDivisionError, pow, 0+0j, 1j)
self.assertAlmostEqual(pow(1j, -1), 1/1j)
self.assertAlmostEqual(pow(1j, 200), 1)
self.assertRaises(ValueError, pow, 1+1j, 1+1j, 1+1j)
a = 3.33+4.43j
self.assertEqual(a ** 0j, 1)
self.assertEqual(a ** 0.+0.j, 1)
self.assertEqual(3j ** 0j, 1)
self.assertEqual(3j ** 0, 1)
try:
0j ** a
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
try:
0j ** (3-2j)
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
# The following is used to exercise certain code paths
self.assertEqual(a ** 105, a ** 105)
self.assertEqual(a ** -105, a ** -105)
self.assertEqual(a ** -30, a ** -30)
self.assertEqual(0.0j ** 0, 1)
b = 5.1+2.3j
self.assertRaises(ValueError, pow, a, b, 0)
def test_boolcontext(self):
for i in xrange(100):
self.assertTrue(complex(random() + 1e-6, random() + 1e-6))
self.assertTrue(not complex(0.0, 0.0))
def test_conjugate(self):
self.assertClose(complex(5.3, 9.8).conjugate(), 5.3-9.8j)
def test_constructor(self):
class OS:
def __init__(self, value): self.value = value
def __complex__(self): return self.value
class NS(object):
def __init__(self, value): self.value = value
def __complex__(self): return self.value
self.assertEqual(complex(OS(1+10j)), 1+10j)
self.assertEqual(complex(NS(1+10j)), 1+10j)
self.assertRaises(TypeError, complex, OS(None))
self.assertRaises(TypeError, complex, NS(None))
self.assertAlmostEqual(complex("1+10j"), 1+10j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10.0), 10+0j)
self.assertAlmostEqual(complex(10L), 10+0j)
self.assertAlmostEqual(complex(10+0j), 10+0j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10L), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1L,10), 1+10j)
self.assertAlmostEqual(complex(1L,10L), 1+10j)
self.assertAlmostEqual(complex(1L,10.0), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10L), 1+10j)
self.assertAlmostEqual(complex(1.0,10.0), 1+10j)
self.assertAlmostEqual(complex(3.14+0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14), 3.14+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(314L), 314.0+0j)
self.assertAlmostEqual(complex(3.14+0j, 0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14, 0.0), 3.14+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(314L, 0L), 314.0+0j)
self.assertAlmostEqual(complex(0j, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0.0, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0j, 3.14), 3.14j)
self.assertAlmostEqual(complex(0.0, 3.14), 3.14j)
self.assertAlmostEqual(complex("1"), 1+0j)
self.assertAlmostEqual(complex("1j"), 1j)
self.assertAlmostEqual(complex(), 0)
self.assertAlmostEqual(complex("-1"), -1)
self.assertAlmostEqual(complex("+1"), +1)
self.assertAlmostEqual(complex("(1+2j)"), 1+2j)
self.assertAlmostEqual(complex("(1.3+2.2j)"), 1.3+2.2j)
self.assertAlmostEqual(complex("3.14+1J"), 3.14+1j)
self.assertAlmostEqual(complex(" ( +3.14-6J )"), 3.14-6j)
self.assertAlmostEqual(complex(" ( +3.14-J )"), 3.14-1j)
self.assertAlmostEqual(complex(" ( +3.14+j )"), 3.14+1j)
self.assertAlmostEqual(complex("J"), 1j)
self.assertAlmostEqual(complex("( j )"), 1j)
self.assertAlmostEqual(complex("+J"), 1j)
self.assertAlmostEqual(complex("( -j)"), -1j)
self.assertAlmostEqual(complex('1e-500'), 0.0 + 0.0j)
self.assertAlmostEqual(complex('-1e-500j'), 0.0 - 0.0j)
self.assertAlmostEqual(complex('-1e-500+1e-500j'), -0.0 + 0.0j)
class complex2(complex): pass
self.assertAlmostEqual(complex(complex2(1+1j)), 1+1j)
self.assertAlmostEqual(complex(real=17, imag=23), 17+23j)
self.assertAlmostEqual(complex(real=17+23j), 17+23j)
self.assertAlmostEqual(complex(real=17+23j, imag=23), 17+46j)
self.assertAlmostEqual(complex(real=1+2j, imag=3+4j), -3+5j)
# check that the sign of a zero in the real or imaginary part
# is preserved when constructing from two floats. (These checks
# are harmless on systems without support for signed zeros.)
def split_zeros(x):
"""Function that produces different results for 0. and -0."""
return atan2(x, -1.)
self.assertEqual(split_zeros(complex(1., 0.).imag), split_zeros(0.))
self.assertEqual(split_zeros(complex(1., -0.).imag), split_zeros(-0.))
self.assertEqual(split_zeros(complex(0., 1.).real), split_zeros(0.))
self.assertEqual(split_zeros(complex(-0., 1.).real), split_zeros(-0.))
c = 3.14 + 1j
self.assertTrue(complex(c) is c)
del c
self.assertRaises(TypeError, complex, "1", "1")
self.assertRaises(TypeError, complex, 1, "1")
if test_support.have_unicode:
self.assertEqual(complex(unicode(" 3.14+J ")), 3.14+1j)
# SF bug 543840: complex(string) accepts strings with \0
# Fixed in 2.3.
self.assertRaises(ValueError, complex, '1+1j\0j')
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, long, 5+3j)
self.assertRaises(TypeError, float, 5+3j)
self.assertRaises(ValueError, complex, "")
self.assertRaises(TypeError, complex, None)
self.assertRaises(ValueError, complex, "\0")
self.assertRaises(ValueError, complex, "3\09")
self.assertRaises(TypeError, complex, "1", "2")
self.assertRaises(TypeError, complex, "1", 42)
self.assertRaises(TypeError, complex, 1, "2")
self.assertRaises(ValueError, complex, "1+")
self.assertRaises(ValueError, complex, "1+1j+1j")
self.assertRaises(ValueError, complex, "--")
self.assertRaises(ValueError, complex, "(1+2j")
self.assertRaises(ValueError, complex, "1+2j)")
self.assertRaises(ValueError, complex, "1+(2j)")
self.assertRaises(ValueError, complex, "(1+2j)123")
if test_support.have_unicode:
self.assertRaises(ValueError, complex, unicode("x"))
self.assertRaises(ValueError, complex, "1j+2")
self.assertRaises(ValueError, complex, "1e1ej")
self.assertRaises(ValueError, complex, "1e++1ej")
self.assertRaises(ValueError, complex, ")1+2j(")
# the following three are accepted by Python 2.6
self.assertRaises(ValueError, complex, "1..1j")
self.assertRaises(ValueError, complex, "1.11.1j")
self.assertRaises(ValueError, complex, "1e1.1j")
if test_support.have_unicode:
# check that complex accepts long unicode strings
self.assertEqual(type(complex(unicode("1"*500))), complex)
class EvilExc(Exception):
pass
class evilcomplex:
def __complex__(self):
raise EvilExc
self.assertRaises(EvilExc, complex, evilcomplex())
class float2:
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertAlmostEqual(complex(float2(42.)), 42)
self.assertAlmostEqual(complex(real=float2(17.), imag=float2(23.)), 17+23j)
self.assertRaises(TypeError, complex, float2(None))
class complex0(complex):
"""Test usage of __complex__() when inheriting from 'complex'"""
def __complex__(self):
return 42j
class complex1(complex):
"""Test usage of __complex__() with a __new__() method"""
def __new__(self, value=0j):
return complex.__new__(self, 2*value)
def __complex__(self):
return self
class complex2(complex):
"""Make sure that __complex__() calls fail if anything other than a
complex is returned"""
def __complex__(self):
return None
self.assertAlmostEqual(complex(complex0(1j)), 42j)
self.assertAlmostEqual(complex(complex1(1j)), 2j)
self.assertRaises(TypeError, complex, complex2(1j))
def test_subclass(self):
class xcomplex(complex):
def __add__(self,other):
return xcomplex(complex(self) + other)
__radd__ = __add__
def __sub__(self,other):
return xcomplex(complex(self) + other)
__rsub__ = __sub__
def __mul__(self,other):
return xcomplex(complex(self) * other)
__rmul__ = __mul__
def __div__(self,other):
return xcomplex(complex(self) / other)
def __rdiv__(self,other):
return xcomplex(other / complex(self))
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self,other):
return xcomplex(complex(self) // other)
def __rfloordiv__(self,other):
return xcomplex(other // complex(self))
def __pow__(self,other):
return xcomplex(complex(self) ** other)
def __rpow__(self,other):
return xcomplex(other ** complex(self) )
def __mod__(self,other):
return xcomplex(complex(self) % other)
def __rmod__(self,other):
return xcomplex(other % complex(self))
infix_binops = ('+', '-', '*', '**', '%', '//', '/')
xcomplex_values = (xcomplex(1), xcomplex(123.0),
xcomplex(-10+2j), xcomplex(3+187j),
xcomplex(3-78j))
test_values = (1, 123.0, 10-19j, xcomplex(1+2j),
xcomplex(1+87j), xcomplex(10+90j))
for op in infix_binops:
for x in xcomplex_values:
for y in test_values:
a = 'x %s y' % op
b = 'y %s x' % op
self.assertTrue(type(eval(a)) is type(eval(b)) is xcomplex)
def test_hash(self):
for x in xrange(-30, 30):
self.assertEqual(hash(x), hash(complex(x, 0)))
x /= 3.0 # now check against floating point
self.assertEqual(hash(x), hash(complex(x, 0.)))
def test_abs(self):
nums = [complex(x/3., y/7.) for x in xrange(-9,9) for y in xrange(-9,9)]
for num in nums:
self.assertAlmostEqual((num.real**2 + num.imag**2) ** 0.5, abs(num))
def test_repr(self):
self.assertEqual(repr(1+6j), '(1+6j)')
self.assertEqual(repr(1-6j), '(1-6j)')
self.assertNotEqual(repr(-(1+0j)), '(-1+-0j)')
self.assertEqual(1-6j,complex(repr(1-6j)))
self.assertEqual(1+6j,complex(repr(1+6j)))
self.assertEqual(-6j,complex(repr(-6j)))
self.assertEqual(6j,complex(repr(6j)))
self.assertEqual(repr(complex(1., INF)), "(1+infj)")
self.assertEqual(repr(complex(1., -INF)), "(1-infj)")
self.assertEqual(repr(complex(INF, 1)), "(inf+1j)")
self.assertEqual(repr(complex(-INF, INF)), "(-inf+infj)")
self.assertEqual(repr(complex(NAN, 1)), "(nan+1j)")
self.assertEqual(repr(complex(1, NAN)), "(1+nanj)")
self.assertEqual(repr(complex(NAN, NAN)), "(nan+nanj)")
self.assertEqual(repr(complex(0, INF)), "infj")
self.assertEqual(repr(complex(0, -INF)), "-infj")
self.assertEqual(repr(complex(0, NAN)), "nanj")
def test_neg(self):
self.assertEqual(-(1+6j), -1-6j)
def test_file(self):
a = 3.33+4.43j
b = 5.1+2.3j
fo = None
try:
fo = open(test_support.TESTFN, "wb")
print >>fo, a, b
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), "%s %s\n" % (a, b))
finally:
if (fo is not None) and (not fo.closed):
fo.close()
test_support.unlink(test_support.TESTFN)
def test_getnewargs(self):
self.assertEqual((1+2j).__getnewargs__(), (1.0, 2.0))
self.assertEqual((1-2j).__getnewargs__(), (1.0, -2.0))
self.assertEqual((2j).__getnewargs__(), (0.0, 2.0))
self.assertEqual((-0j).__getnewargs__(), (0.0, -0.0))
self.assertEqual(complex(0, INF).__getnewargs__(), (0.0, INF))
self.assertEqual(complex(INF, 0).__getnewargs__(), (INF, 0.0))
if float.__getformat__("double").startswith("IEEE"):
def test_plus_minus_0j(self):
# test that -0j and 0j literals are not identified
z1, z2 = 0j, -0j
self.assertEqual(atan2(z1.imag, -1.), atan2(0., -1.))
self.assertEqual(atan2(z2.imag, -1.), atan2(-0., -1.))
@unittest.skipUnless(float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
def test_overflow(self):
self.assertEqual(complex("1e500"), complex(INF, 0.0))
self.assertEqual(complex("-1e500j"), complex(0.0, -INF))
self.assertEqual(complex("-1e500+1.8e308j"), complex(-INF, INF))
@unittest.skipUnless(float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
def test_repr_roundtrip(self):
vals = [0.0, 1e-500, 1e-315, 1e-200, 0.0123, 3.1415, 1e50, INF, NAN]
vals += [-v for v in vals]
# complex(repr(z)) should recover z exactly, even for complex
# numbers involving an infinity, nan, or negative zero
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = complex(repr(z))
self.assertFloatsAreIdentical(z.real, roundtrip.real)
self.assertFloatsAreIdentical(z.imag, roundtrip.imag)
# if we predefine some constants, then eval(repr(z)) should
# also work, except that it might change the sign of zeros
inf, nan = float('inf'), float('nan')
infj, nanj = complex(0.0, inf), complex(0.0, nan)
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = eval(repr(z))
# adding 0.0 has no effect beside changing -0.0 to 0.0
self.assertFloatsAreIdentical(0.0 + z.real,
0.0 + roundtrip.real)
self.assertFloatsAreIdentical(0.0 + z.imag,
0.0 + roundtrip.imag)
def test_format(self):
# empty format string is same as str()
self.assertEqual(format(1+3j, ''), str(1+3j))
self.assertEqual(format(1.5+3.5j, ''), str(1.5+3.5j))
self.assertEqual(format(3j, ''), str(3j))
self.assertEqual(format(3.2j, ''), str(3.2j))
self.assertEqual(format(3+0j, ''), str(3+0j))
self.assertEqual(format(3.2+0j, ''), str(3.2+0j))
# empty presentation type should still be analogous to str,
# even when format string is nonempty (issue #5920).
self.assertEqual(format(3.2+0j, '-'), str(3.2+0j))
self.assertEqual(format(3.2+0j, '<'), str(3.2+0j))
z = 4/7. - 100j/7.
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '10'), str(z))
z = complex(0.0, 3.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '2'), str(z))
z = complex(-0.0, 2.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '3'), str(z))
self.assertEqual(format(1+3j, 'g'), '1+3j')
self.assertEqual(format(3j, 'g'), '0+3j')
self.assertEqual(format(1.5+3.5j, 'g'), '1.5+3.5j')
self.assertEqual(format(1.5+3.5j, '+g'), '+1.5+3.5j')
self.assertEqual(format(1.5-3.5j, '+g'), '+1.5-3.5j')
self.assertEqual(format(1.5-3.5j, '-g'), '1.5-3.5j')
self.assertEqual(format(1.5+3.5j, ' g'), ' 1.5+3.5j')
self.assertEqual(format(1.5-3.5j, ' g'), ' 1.5-3.5j')
self.assertEqual(format(-1.5+3.5j, ' g'), '-1.5+3.5j')
self.assertEqual(format(-1.5-3.5j, ' g'), '-1.5-3.5j')
self.assertEqual(format(-1.5-3.5e-20j, 'g'), '-1.5-3.5e-20j')
self.assertEqual(format(-1.5-3.5j, 'f'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'F'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'e'), '-1.500000e+00-3.500000e+00j')
self.assertEqual(format(-1.5-3.5j, '.2e'), '-1.50e+00-3.50e+00j')
self.assertEqual(format(-1.5-3.5j, '.2E'), '-1.50E+00-3.50E+00j')
self.assertEqual(format(-1.5e10-3.5e5j, '.2G'), '-1.5E+10-3.5E+05j')
self.assertEqual(format(1.5+3j, '<20g'), '1.5+3j ')
self.assertEqual(format(1.5+3j, '*<20g'), '1.5+3j**************')
self.assertEqual(format(1.5+3j, '>20g'), ' 1.5+3j')
self.assertEqual(format(1.5+3j, '^20g'), ' 1.5+3j ')
self.assertEqual(format(1.5+3j, '<20'), '(1.5+3j) ')
self.assertEqual(format(1.5+3j, '>20'), ' (1.5+3j)')
self.assertEqual(format(1.5+3j, '^20'), ' (1.5+3j) ')
self.assertEqual(format(1.123-3.123j, '^20.2'), ' (1.1-3.1j) ')
self.assertEqual(format(1.5+3j, '20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '>20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '<20.2f'), '1.50+3.00j ')
self.assertEqual(format(1.5e20+3j, '<20.2f'), '150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '>40.2f'), ' 150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '^40,.2f'), ' 150,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3j, '^40,.2f'), ' 1,500,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3000j, ',.2f'), '1,500,000,000,000,000,000,000.00+3,000.00j')
# alternate is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '#f')
# zero padding is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '010f')
# '=' alignment is invalid
self.assertRaises(ValueError, (1.5+3j).__format__, '=20')
# integer presentation types are an error
for t in 'bcdoxX':
self.assertRaises(ValueError, (1.5+0.5j).__format__, t)
# make sure everything works in ''.format()
self.assertEqual('*{0:.3f}*'.format(3.14159+2.71828j), '*3.142+2.718j*')
# issue 3382: 'f' and 'F' with inf's and nan's
self.assertEqual('{0:f}'.format(INF+0j), 'inf+0.000000j')
self.assertEqual('{0:F}'.format(INF+0j), 'INF+0.000000j')
self.assertEqual('{0:f}'.format(-INF+0j), '-inf+0.000000j')
self.assertEqual('{0:F}'.format(-INF+0j), '-INF+0.000000j')
self.assertEqual('{0:f}'.format(complex(INF, INF)), 'inf+infj')
self.assertEqual('{0:F}'.format(complex(INF, INF)), 'INF+INFj')
self.assertEqual('{0:f}'.format(complex(INF, -INF)), 'inf-infj')
self.assertEqual('{0:F}'.format(complex(INF, -INF)), 'INF-INFj')
self.assertEqual('{0:f}'.format(complex(-INF, INF)), '-inf+infj')
self.assertEqual('{0:F}'.format(complex(-INF, INF)), '-INF+INFj')
self.assertEqual('{0:f}'.format(complex(-INF, -INF)), '-inf-infj')
self.assertEqual('{0:F}'.format(complex(-INF, -INF)), '-INF-INFj')
self.assertEqual('{0:f}'.format(complex(NAN, 0)), 'nan+0.000000j')
self.assertEqual('{0:F}'.format(complex(NAN, 0)), 'NAN+0.000000j')
self.assertEqual('{0:f}'.format(complex(NAN, NAN)), 'nan+nanj')
self.assertEqual('{0:F}'.format(complex(NAN, NAN)), 'NAN+NANj')
def test_main():
with test_support.check_warnings(("complex divmod.., // and % are "
"deprecated", DeprecationWarning)):
test_support.run_unittest(ComplexTest)
if __name__ == "__main__":
test_main()
|
mit
|
sysadminmatmoz/OCB
|
addons/website_blog/tests/test_website_blog_flow.py
|
45
|
2781
|
# -*- coding: utf-8 -*-
from openerp.addons.website_blog.tests.common import TestWebsiteBlogCommon
class TestWebsiteBlogFlow(TestWebsiteBlogCommon):
def test_website_blog_followers(self):
""" Test the flow of followers and notifications for blogs. Intended
flow :
- people subscribe to a blog
- when creating a new post, nobody except the creator follows it
- people subscribed to the blog does not receive comments on posts
- when published, a notification is sent to all blog followers
- if someone subscribe to the post or comment it, it become follower
and receive notification for future comments. """
# Create a new blog, subscribe the employee to the blog
test_blog = self.env['blog.blog'].sudo(self.user_blogmanager).create({
'name': 'New Blog',
})
self.assertIn(
self.user_blogmanager.partner_id, test_blog.message_partner_ids,
'website_blog: blog create should be in the blog followers')
test_blog.message_subscribe([self.user_employee.partner_id.id, self.user_public.partner_id.id])
# Create a new post, blog followers should not follow the post
test_blog_post = self.env['blog.post'].sudo(self.user_blogmanager).create({
'name': 'New Post',
'blog_id': test_blog.id,
})
self.assertNotIn(
self.user_employee.partner_id, test_blog_post.message_partner_ids,
'website_blog: subscribing to a blog should not subscribe to its posts')
self.assertNotIn(
self.user_public.partner_id, test_blog_post.message_partner_ids,
'website_blog: subscribing to a blog should not subscribe to its posts')
# Publish the blog
test_blog_post.write({'website_published': True})
# Check publish message has been sent to blog followers
publish_message = next((m for m in test_blog_post.blog_id.message_ids if m.subtype_id.id == self.ref('website_blog.mt_blog_blog_published')), None)
self.assertEqual(
publish_message.needaction_partner_ids,
self.user_employee.partner_id | self.user_public.partner_id,
'website_blog: peuple following a blog should be notified of a published post')
# Armand posts a message -> becomes follower
test_blog_post.sudo().message_post(
body='Armande BlogUser Commented',
message_type='comment',
author_id=self.user_employee.partner_id.id,
subtype='mt_comment',
)
self.assertIn(
self.user_employee.partner_id, test_blog_post.message_partner_ids,
'website_blog: people commenting a post should follow it afterwards')
|
agpl-3.0
|
AnishShah/tensorflow
|
tensorflow/contrib/keras/api/keras/datasets/imdb/__init__.py
|
39
|
1036
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMDB movie review sentiment classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.datasets.imdb import get_word_index
from tensorflow.python.keras.datasets.imdb import load_data
del absolute_import
del division
del print_function
|
apache-2.0
|
40223117cda/2015cdaw13
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/locals.py
|
603
|
1141
|
## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
"""Set of functions from PyGame that are handy to have in
the local namespace for your module"""
from pygame.constants import *
from pygame.rect import Rect
import pygame.color as color
Color = color.Color
|
gpl-3.0
|
tsabi/Odoo-tsabi-fixes
|
addons/subscription/subscription.py
|
337
|
8906
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# TODO:
# Error treatment: exception, request, ... -> send request to user_id
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
class subscription_document(osv.osv):
_name = "subscription.document"
_description = "Subscription Document"
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription document without removing it."),
'model': fields.many2one('ir.model', 'Object', required=True),
'field_ids': fields.one2many('subscription.document.fields', 'document_id', 'Fields', copy=True)
}
_defaults = {
'active' : lambda *a: True,
}
class subscription_document_fields(osv.osv):
_name = "subscription.document.fields"
_description = "Subscription Document Fields"
_rec_name = 'field'
_columns = {
'field': fields.many2one('ir.model.fields', 'Field', domain="[('model_id', '=', parent.model)]", required=True),
'value': fields.selection([('false','False'),('date','Current Date')], 'Default Value', size=40, help="Default value is considered for field when new document is generated."),
'document_id': fields.many2one('subscription.document', 'Subscription Document', ondelete='cascade'),
}
_defaults = {}
def _get_document_types(self, cr, uid, context=None):
cr.execute('select m.model, s.name from subscription_document s, ir_model m WHERE s.model = m.id order by s.name')
return cr.fetchall()
class subscription_subscription(osv.osv):
_name = "subscription.subscription"
_description = "Subscription"
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the subscription without removing it."),
'partner_id': fields.many2one('res.partner', 'Partner'),
'notes': fields.text('Internal Notes'),
'user_id': fields.many2one('res.users', 'User', required=True),
'interval_number': fields.integer('Interval Qty'),
'interval_type': fields.selection([('days', 'Days'), ('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'exec_init': fields.integer('Number of documents'),
'date_init': fields.datetime('First Date'),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status', copy=False),
'doc_source': fields.reference('Source Document', required=True, selection=_get_document_types, size=128, help="User can choose the source document on which he wants to create documents"),
'doc_lines': fields.one2many('subscription.subscription.history', 'subscription_id', 'Documents created', readonly=True),
'cron_id': fields.many2one('ir.cron', 'Cron Job', help="Scheduler which runs on subscription", states={'running':[('readonly',True)], 'done':[('readonly',True)]}),
'note': fields.text('Notes', help="Description or Summary of Subscription"),
}
_defaults = {
'date_init': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'user_id': lambda obj,cr,uid,context: uid,
'active': lambda *a: True,
'interval_number': lambda *a: 1,
'interval_type': lambda *a: 'months',
'doc_source': lambda *a: False,
'state': lambda *a: 'draft'
}
def _auto_end(self, cr, context=None):
super(subscription_subscription, self)._auto_end(cr, context=context)
# drop the FK from subscription to ir.cron, as it would cause deadlocks
# during cron job execution. When model_copy() tries to write() on the subscription,
# it has to wait for an ExclusiveLock on the cron job record, but the latter
# is locked by the cron system for the duration of the job!
# FIXME: the subscription module should be reviewed to simplify the scheduling process
# and to use a unique cron job for all subscriptions, so that it never needs to
# be updated during its execution.
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (self._table, '%s_cron_id_fkey' % self._table))
def set_process(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
mapping = {'name':'name','interval_number':'interval_number','interval_type':'interval_type','exec_init':'numbercall','date_init':'nextcall'}
res = {'model':'subscription.subscription', 'args': repr([[row['id']]]), 'function':'model_copy', 'priority':6, 'user_id':row['user_id'] and row['user_id'][0]}
for key,value in mapping.items():
res[value] = row[key]
id = self.pool.get('ir.cron').create(cr, uid, res)
self.write(cr, uid, [row['id']], {'cron_id':id, 'state':'running'})
return True
def model_copy(self, cr, uid, ids, context=None):
for row in self.read(cr, uid, ids, context=context):
if not row.get('cron_id',False):
continue
cron_ids = [row['cron_id'][0]]
remaining = self.pool.get('ir.cron').read(cr, uid, cron_ids, ['numbercall'])[0]['numbercall']
try:
(model_name, id) = row['doc_source'].split(',')
id = int(id)
model = self.pool[model_name]
except:
raise osv.except_osv(_('Wrong Source Document!'), _('Please provide another source document.\nThis one does not exist!'))
default = {'state':'draft'}
doc_obj = self.pool.get('subscription.document')
document_ids = doc_obj.search(cr, uid, [('model.model','=',model_name)])
doc = doc_obj.browse(cr, uid, document_ids)[0]
for f in doc.field_ids:
if f.value=='date':
value = time.strftime('%Y-%m-%d')
else:
value = False
default[f.field.name] = value
state = 'running'
# if there was only one remaining document to generate
# the subscription is over and we mark it as being done
if remaining == 1:
state = 'done'
id = self.pool[model_name].copy(cr, uid, id, default, context)
self.pool.get('subscription.subscription.history').create(cr, uid, {'subscription_id': row['id'], 'date':time.strftime('%Y-%m-%d %H:%M:%S'), 'document_id': model_name+','+str(id)})
self.write(cr, uid, [row['id']], {'state':state})
return True
def unlink(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context or {}):
if record.state=="running":
raise osv.except_osv(_('Error!'),_('You cannot delete an active subscription!'))
return super(subscription_subscription, self).unlink(cr, uid, ids, context)
def set_done(self, cr, uid, ids, context=None):
res = self.read(cr,uid, ids, ['cron_id'])
ids2 = [x['cron_id'][0] for x in res if x['id']]
self.pool.get('ir.cron').write(cr, uid, ids2, {'active':False})
self.write(cr, uid, ids, {'state':'done'})
return True
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return True
class subscription_subscription_history(osv.osv):
_name = "subscription.subscription.history"
_description = "Subscription history"
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date'),
'subscription_id': fields.many2one('subscription.subscription', 'Subscription', ondelete='cascade'),
'document_id': fields.reference('Source Document', required=True, selection=_get_document_types, size=128),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Novasoft-India/OperERP-AM-Motors
|
openerp/addons/account_asset/account_asset.py
|
14
|
29289
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', size=64, required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
account_asset_category()
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
ctx = dict(context or {}, account_period_prefer_normal=True)
periods = self.pool.get('account.period').find(cr, uid, context=ctx)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
if asset.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
for asset in self.browse(cr, uid, ids, context=context):
if asset.value_residual == 0.0:
continue
posted_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_check', '=', True)],order='depreciation_date desc')
old_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_id', '=', False)])
if old_depreciation_line_ids:
depreciation_lin_obj.unlink(cr, uid, old_depreciation_line_ids, context=context)
amount_to_depr = residual_amount = asset.value_residual
if asset.prorata:
depreciation_date = datetime.strptime(self._get_last_depreciation_date(cr, uid, [asset.id], context)[asset.id], '%Y-%m-%d')
else:
# depreciation_date = 1st January of purchase year
purchase_date = datetime.strptime(asset.purchase_date, '%Y-%m-%d')
#if we already have some previous validated entries, starting date isn't 1st January but last entry + method period
if (len(posted_depreciation_line_ids)>0):
last_depreciation_date = datetime.strptime(depreciation_lin_obj.browse(cr,uid,posted_depreciation_line_ids[0],context=context).depreciation_date, '%Y-%m-%d')
depreciation_date = (last_depreciation_date+relativedelta(months=+asset.method_period))
else:
depreciation_date = datetime(purchase_date.year, 1, 1)
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
total_days = (year % 4) and 365 or 366
undone_dotation_number = self._compute_board_undone_dotation_nb(cr, uid, asset, depreciation_date, total_days, context=context)
for x in range(len(posted_depreciation_line_ids), undone_dotation_number):
i = x + 1
amount = self._compute_board_amount(cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=context)
company_currency = asset.company_id.currency_id.id
current_currency = asset.currency_id.id
# compute amount into company currency
amount = currency_obj.compute(cr, uid, current_currency, company_currency, amount, context=context)
residual_amount -= amount
vals = {
'amount': amount,
'asset_id': asset.id,
'sequence': i,
'name': str(asset.id) +'/' + str(i),
'remaining_value': residual_amount,
'depreciated_value': (asset.purchase_value - asset.salvage_value) - (residual_amount + amount),
'depreciation_date': depreciation_date.strftime('%Y-%m-%d'),
}
depreciation_lin_obj.create(cr, uid, vals, context=context)
# Considering Depr. Period as months
depreciation_date = (datetime(year, month, day) + relativedelta(months=+asset.method_period))
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
return True
def validate(self, cr, uid, ids, context=None):
if context is None:
context = {}
return self.write(cr, uid, ids, {
'state':'open'
}, context)
def set_to_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_to_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def _amount_residual(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
l.asset_id as id, SUM(abs(l.debit-l.credit)) AS amount
FROM
account_move_line l
WHERE
l.asset_id IN %s GROUP BY l.asset_id """, (tuple(ids),))
res=dict(cr.fetchall())
for asset in self.browse(cr, uid, ids, context):
res[asset.id] = asset.purchase_value - res.get(asset.id, 0.0) - asset.salvage_value
for id in ids:
res.setdefault(id, 0.0)
return res
def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):
val = {}
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if company.currency_id.company_id and company.currency_id.company_id.id != company_id:
val['currency_id'] = False
else:
val['currency_id'] = company.currency_id.id
return {'value': val}
def onchange_purchase_salvage_value(self, cr, uid, ids, purchase_value, salvage_value, context=None):
val = {}
for asset in self.browse(cr, uid, ids, context=context):
if purchase_value:
val['value_residual'] = purchase_value - salvage_value
if salvage_value:
val['value_residual'] = purchase_value - salvage_value
return {'value': val}
_columns = {
'account_move_line_ids': fields.one2many('account.move.line', 'asset_id', 'Entries', readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.char('Asset Name', size=64, required=True, readonly=True, states={'draft':[('readonly',False)]}),
'code': fields.char('Reference', size=32, readonly=True, states={'draft':[('readonly',False)]}),
'purchase_value': fields.float('Gross Value', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.many2one('res.currency','Currency',required=True, readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'note': fields.text('Note'),
'category_id': fields.many2one('account.asset.category', 'Asset Category', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'parent_id': fields.many2one('account.asset.asset', 'Parent Asset', readonly=True, states={'draft':[('readonly',False)]}),
'child_ids': fields.one2many('account.asset.asset', 'parent_id', 'Children Assets'),
'purchase_date': fields.date('Purchase Date', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', required=True,
help="When an asset is created, the status is 'Draft'.\n" \
"If the asset is confirmed, the status goes in 'Running' and the depreciation lines can be posted in the accounting.\n" \
"You can manually close an asset when the depreciation is over. If the last line of depreciation is posted, the asset automatically goes in that status."),
'active': fields.boolean('Active'),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True, states={'draft':[('readonly',False)]}),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', readonly=True, states={'draft':[('readonly',False)]}, help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Number of Months in a Period', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The amount of time between two depreciations, in months"),
'method_end': fields.date('Ending Date', readonly=True, states={'draft':[('readonly',False)]}),
'method_progress_factor': fields.float('Degressive Factor', readonly=True, states={'draft':[('readonly',False)]}),
'value_residual': fields.function(_amount_residual, method=True, digits_compute=dp.get_precision('Account'), string='Residual Value'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True, readonly=True, states={'draft':[('readonly',False)]},
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'prorata':fields.boolean('Prorata Temporis', readonly=True, states={'draft':[('readonly',False)]}, help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'history_ids': fields.one2many('account.asset.history', 'asset_id', 'History', readonly=True),
'depreciation_line_ids': fields.one2many('account.asset.depreciation.line', 'asset_id', 'Depreciation Lines', readonly=True, states={'draft':[('readonly',False)],'open':[('readonly',False)]}),
'salvage_value': fields.float('Salvage Value', digits_compute=dp.get_precision('Account'), help="It is the amount you plan to have that you cannot depreciate.", readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'code': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.asset.code'),
'purchase_date': lambda obj, cr, uid, context: time.strftime('%Y-%m-%d'),
'active': True,
'state': 'draft',
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
'currency_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.currency_id.id,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.asset',context=context),
}
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_asset_asset, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
def _check_prorata(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.prorata and asset.method_time != 'number':
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive assets.', ['parent_id']),
(_check_prorata, 'Prorata temporis can be applied only for time method "number of depreciations".', ['prorata']),
]
def onchange_category_id(self, cr, uid, ids, category_id, context=None):
res = {'value':{}}
asset_categ_obj = self.pool.get('account.asset.category')
if category_id:
category_obj = asset_categ_obj.browse(cr, uid, category_id, context=context)
res['value'] = {
'method': category_obj.method,
'method_number': category_obj.method_number,
'method_time': category_obj.method_time,
'method_period': category_obj.method_period,
'method_progress_factor': category_obj.method_progress_factor,
'method_end': category_obj.method_end,
'prorata': category_obj.prorata,
}
return res
def onchange_method_time(self, cr, uid, ids, method_time='number', context=None):
res = {'value': {}}
if method_time != 'number':
res['value'] = {'prorata': False}
return res
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if context is None:
context = {}
default.update({'depreciation_line_ids': [], 'state': 'draft'})
return super(account_asset_asset, self).copy(cr, uid, id, default, context=context)
def _compute_entries(self, cr, uid, ids, period_id, context=None):
result = []
period_obj = self.pool.get('account.period')
depreciation_obj = self.pool.get('account.asset.depreciation.line')
period = period_obj.browse(cr, uid, period_id, context=context)
depreciation_ids = depreciation_obj.search(cr, uid, [('asset_id', 'in', ids), ('depreciation_date', '<=', period.date_stop), ('depreciation_date', '>=', period.date_start), ('move_check', '=', False)], context=context)
if context is None:
context = {}
context.update({'depreciation_date':period.date_stop})
return depreciation_obj.create_move(cr, uid, depreciation_ids, context=context)
def create(self, cr, uid, vals, context=None):
asset_id = super(account_asset_asset, self).create(cr, uid, vals, context=context)
self.compute_depreciation_board(cr, uid, [asset_id], context=context)
return asset_id
def open_entries(self, cr, uid, ids, context=None):
if context is None:
context = {}
context.update({'search_default_asset_id': ids, 'default_asset_id': ids})
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'context': context,
}
account_asset_asset()
class account_asset_depreciation_line(osv.osv):
_name = 'account.asset.depreciation.line'
_description = 'Asset depreciation line'
def _get_move_check(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = bool(line.move_id)
return res
_columns = {
'name': fields.char('Depreciation Name', size=64, required=True, select=1),
'sequence': fields.integer('Sequence', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True, ondelete='cascade'),
'parent_state': fields.related('asset_id', 'state', type='char', string='State of Asset'),
'amount': fields.float('Current Depreciation', digits_compute=dp.get_precision('Account'), required=True),
'remaining_value': fields.float('Next Period Depreciation', digits_compute=dp.get_precision('Account'),required=True),
'depreciated_value': fields.float('Amount Already Depreciated', required=True),
'depreciation_date': fields.date('Depreciation Date', select=1),
'move_id': fields.many2one('account.move', 'Depreciation Entry'),
'move_check': fields.function(_get_move_check, method=True, type='boolean', string='Posted', store=True)
}
def create_move(self, cr, uid, ids, context=None):
can_close = False
if context is None:
context = {}
asset_obj = self.pool.get('account.asset.asset')
period_obj = self.pool.get('account.period')
move_obj = self.pool.get('account.move')
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
created_move_ids = []
asset_ids = []
for line in self.browse(cr, uid, ids, context=context):
depreciation_date = context.get('depreciation_date') or time.strftime('%Y-%m-%d')
ctx = dict(context, account_period_prefer_normal=True)
period_ids = period_obj.find(cr, uid, depreciation_date, context=ctx)
company_currency = line.asset_id.company_id.currency_id.id
current_currency = line.asset_id.currency_id.id
context.update({'date': depreciation_date})
amount = currency_obj.compute(cr, uid, current_currency, company_currency, line.amount, context=context)
sign = (line.asset_id.category_id.journal_id.type == 'purchase' and 1) or -1
asset_name = line.asset_id.name
reference = line.name
move_vals = {
'name': asset_name,
'date': depreciation_date,
'ref': reference,
'period_id': period_ids and period_ids[0] or False,
'journal_id': line.asset_id.category_id.journal_id.id,
}
move_id = move_obj.create(cr, uid, move_vals, context=context)
journal_id = line.asset_id.category_id.journal_id.id
partner_id = line.asset_id.partner_id.id
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_depreciation_id.id,
'debit': 0.0,
'credit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and - sign * line.amount or 0.0,
'date': depreciation_date,
})
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_expense_depreciation_id.id,
'credit': 0.0,
'debit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and sign * line.amount or 0.0,
'analytic_account_id': line.asset_id.category_id.account_analytic_id.id,
'date': depreciation_date,
'asset_id': line.asset_id.id
})
self.write(cr, uid, line.id, {'move_id': move_id}, context=context)
created_move_ids.append(move_id)
asset_ids.append(line.asset_id.id)
# we re-evaluate the assets to determine whether we can close them
for asset in asset_obj.browse(cr, uid, list(set(asset_ids)), context=context):
if currency_obj.is_zero(cr, uid, asset.currency_id, asset.value_residual):
asset.write({'state': 'close'})
return created_move_ids
account_asset_depreciation_line()
class account_move_line(osv.osv):
_inherit = 'account.move.line'
_columns = {
'asset_id': fields.many2one('account.asset.asset', 'Asset', ondelete="restrict"),
'entry_ids': fields.one2many('account.move.line', 'asset_id', 'Entries', readonly=True, states={'draft':[('readonly',False)]}),
}
account_move_line()
class account_asset_history(osv.osv):
_name = 'account.asset.history'
_description = 'Asset history'
_columns = {
'name': fields.char('History name', size=64, select=1),
'user_id': fields.many2one('res.users', 'User', required=True),
'date': fields.date('Date', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="The method to use to compute the dates and number of depreciation lines.\n"\
"Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
"Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="Time in month between two depreciations"),
'method_end': fields.date('Ending date'),
'note': fields.text('Note'),
}
_order = 'date desc'
_defaults = {
'date': lambda *args: time.strftime('%Y-%m-%d'),
'user_id': lambda self, cr, uid, ctx: uid
}
account_asset_history()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
dset0x/invenio
|
invenio/modules/oauth2server/upgrades/oauth2server_2015_07_14_innodb.py
|
2
|
1644
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Fixes foreign key relationship."""
from invenio.ext.sqlalchemy import db
from invenio.modules.upgrader.api import op
depends_on = ['invenio_2015_03_03_tag_value']
def info():
"""Return upgrade recipe information."""
return "Fixes foreign key relationship."
def do_upgrade():
"""Carry out the upgrade."""
op.alter_column(
table_name='oauth2TOKEN',
column_name='client_id',
type_=db.String(255),
existing_nullable=False
)
op.alter_column(
table_name='oauth2TOKEN',
column_name='user_id',
type_=db.Integer(15, unsigned=True),
existing_nullable=False
)
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Pre-upgrade checks."""
pass
def post_upgrade():
"""Post-upgrade checks."""
pass
|
gpl-2.0
|
caot/intellij-community
|
python/lib/Lib/site-packages/django/contrib/gis/tests/distapp/models.py
|
406
|
1832
|
from django.contrib.gis.db import models
class SouthTexasCity(models.Model):
"City model on projected coordinate system for South Texas."
name = models.CharField(max_length=30)
point = models.PointField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasCityFt(models.Model):
"Same City model as above, but U.S. survey feet are the units."
name = models.CharField(max_length=30)
point = models.PointField(srid=2278)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AustraliaCity(models.Model):
"City model for Australia, using WGS84."
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class CensusZipcode(models.Model):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasZipcode(models.Model):
"Model for a few South Texas ZIP codes."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=32140, null=True)
objects = models.GeoManager()
def __unicode__(self): return self.name
class Interstate(models.Model):
"Geodetic model for U.S. Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasInterstate(models.Model):
"Projected model for South Texas Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
|
apache-2.0
|
xodus7/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
|
39
|
37211
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to read data in the graph (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
from tensorflow.python.util.deprecation import deprecated
# Default name for key in the feature dict.
KEY_FEATURE_NAME = '__key__'
@deprecated(None, 'Use tf.data.')
def read_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.local_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples. In order to have
predictable and repeatable order of reading and enqueueing, such as in
prediction and evaluation mode, `num_threads` should be 1.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
_, examples = read_keyed_batch_examples(
file_pattern=file_pattern,
batch_size=batch_size,
reader=reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
name=name,
seed=seed)
return examples
@deprecated(None, 'Use tf.data.')
def read_keyed_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.local_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples. In order to have
predictable and repeatable order of reading and enqueueing, such as in
prediction and evaluation mode, `num_threads` should be 1.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
setup_shared_queue=False,
name=name,
seed=seed)
@deprecated(None, 'Use tf.data.')
def read_keyed_batch_examples_shared_queue(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that pulls from the shared queue, read `Example`
protos using provided `reader`, use batch queue to create batches of examples
of size `batch_size`. This provides at most once visit guarantees. Note that
this only works if the parameter servers are not pre-empted or restarted or
the session is not restored from a checkpoint since the state of a queue
is not checkpointed and we will end up restarting from the entire list of
files.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.local_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
setup_shared_queue=True,
name=name,
seed=seed)
def _get_file_names(file_pattern, randomize_input):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
randomize_input: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError('No files given to dequeue_examples.')
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError('No files match %s.' % file_pattern)
# Sort files so it will be deterministic for unit tests. They'll be shuffled
# in `string_input_producer` if `randomize_input` is enabled.
if not randomize_input:
file_names = sorted(file_names)
return file_names
def _get_examples(file_name_queue, reader, num_threads, read_batch_size,
filter_fn, parse_fn):
"""Get example filenames matching.
Args:
file_name_queue: A queue implementation that dequeues elements in
first-in first-out order.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once.
filter_fn: Filtering function, takes both keys as well as an `Example`
Tensors and returns a boolean mask of the same shape as the input Tensors
to be applied for filtering. If `None`, no filtering is done.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
Returns:
List of example file names matching `file_name_queue`.
"""
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
keys, examples_proto = utils.smart_cond(
read_batch_size > 1,
lambda: reader().read_up_to(file_name_queue, read_batch_size),
lambda: reader().read(file_name_queue))
if filter_fn:
mask = filter_fn(keys, examples_proto)
keys = array_ops.boolean_mask(keys, mask)
examples_proto = array_ops.boolean_mask(examples_proto, mask)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
return example_list
def _read_keyed_batch_examples_helper(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
filter_fn=None,
parse_fn=None,
setup_shared_queue=False,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.local_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once.
filter_fn: Filtering function, takes both keys as well `Example` Tensors
and returns a boolean mask of the same shape as the input Tensors to
be applied for filtering. If `None`, no filtering is done.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
setup_shared_queue: Whether to set up a shared queue for file names.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
# Retrieve files to read.
file_names = _get_file_names(file_pattern, randomize_input)
# Check input parameters are given and reasonable.
if (not queue_capacity) or (queue_capacity <= 0):
raise ValueError('Invalid queue_capacity %s.' % queue_capacity)
if (batch_size is None) or (
(not isinstance(batch_size, ops.Tensor)) and
(batch_size <= 0 or batch_size >= queue_capacity)):
raise ValueError('Invalid batch_size %s, with queue_capacity %s.' %
(batch_size, queue_capacity))
if (read_batch_size is None) or (
(not isinstance(read_batch_size, ops.Tensor)) and (read_batch_size <= 0)):
raise ValueError('Invalid read_batch_size %s.' % read_batch_size)
if (not num_threads) or (num_threads <= 0):
raise ValueError('Invalid num_threads %s.' % num_threads)
if (num_epochs is not None) and (num_epochs <= 0):
raise ValueError('Invalid num_epochs %s.' % num_epochs)
with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope:
with ops.name_scope('file_name_queue') as file_name_queue_scope:
if setup_shared_queue:
file_name_queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[dtypes.string], shapes=[[]])
enqueue_op = file_name_queue.enqueue(
input_pipeline_ops.seek_next(
file_names,
shuffle=randomize_input,
num_epochs=num_epochs,
seed=seed))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(file_name_queue, [enqueue_op]))
else:
file_name_queue = input_ops.string_input_producer(
constant_op.constant(file_names, name='input'),
shuffle=randomize_input,
num_epochs=num_epochs,
name=file_name_queue_scope,
seed=seed)
example_list = _get_examples(file_name_queue, reader, num_threads,
read_batch_size, filter_fn, parse_fn)
enqueue_many = read_batch_size > 1
if num_epochs is None:
allow_smaller_final_batch = False
else:
allow_smaller_final_batch = True
# Setup batching queue given list of read example tensors.
if randomize_input:
if isinstance(batch_size, ops.Tensor):
min_after_dequeue = int(queue_capacity * 0.4)
else:
min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size)
queued_examples_with_keys = input_ops.shuffle_batch_join(
example_list,
batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch,
seed=seed)
else:
queued_examples_with_keys = input_ops.batch_join(
example_list,
batch_size,
capacity=queue_capacity,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
if parse_fn and isinstance(queued_examples_with_keys, dict):
queued_keys = queued_examples_with_keys.pop(KEY_FEATURE_NAME)
return queued_keys, queued_examples_with_keys
return queued_examples_with_keys
@deprecated(None, 'Use tf.data.')
def read_keyed_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_enqueue_threads=2,
parse_fn=None,
name=None,
read_batch_size=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples. In order to have
predictable and repeatable order of reading and enqueueing, such as in
prediction and evaluation mode, `reader_num_threads` should be 1.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing. In order to have predictable and repeatable order of reading and
enqueueing, such as in prediction and evaluation mode,
`num_enqueue_threads` should be 1.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once. If `None`, defaults to `batch_size`.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
if read_batch_size is None:
read_batch_size = batch_size
keys, examples = read_keyed_batch_examples(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_enqueue_threads,
name=scope)
@deprecated(None, 'Use tf.data.')
def read_keyed_batch_features_shared_queue(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_queue_runners=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that gets filenames from the shared queue,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = read_keyed_batch_examples_shared_queue(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_queue_runners,
name=scope)
@deprecated(None, 'Use tf.data.')
def queue_parsed_features(parsed_features,
keys=None,
feature_queue_capacity=100,
num_enqueue_threads=2,
name=None):
"""Speeds up parsing by using queues to do it asynchronously.
This function adds the tensors in `parsed_features` to a queue, which allows
the parsing (or any other expensive op before this) to be asynchronous wrt the
rest of the training graph. This greatly improves read latency and speeds up
training since the data will already be parsed and ready when each step of
training needs it.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
keys: `Tensor` of string keys.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing. In order to have predictable and repeatable order of reading and
enqueueing, such as in prediction and evaluation mode,
`num_enqueue_threads` should be 1.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` corresponding to `keys` if provided, otherwise `None`.
- A dict of string key to `Tensor` or `SparseTensor` objects corresponding
to `parsed_features`.
Raises:
ValueError: for invalid inputs.
"""
args = list(parsed_features.values())
if keys is not None:
args += [keys]
with ops.name_scope(name, 'queue_parsed_features', args):
# Lets also add preprocessed tensors into the queue types for each item of
# the queue.
tensors_to_enqueue = []
# Each entry contains the key, and a boolean which indicates whether the
# tensor was a sparse tensor.
tensors_mapping = []
# TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse
# tensors into a queue. This could be taken care in somewhere else so others
# can reuse it. Also, QueueBase maybe extended to handle sparse tensors
# directly.
for key in sorted(parsed_features.keys()):
tensor = parsed_features[key]
if isinstance(tensor, sparse_tensor.SparseTensor):
tensors_mapping.append((key, True))
tensors_to_enqueue.extend(
[tensor.indices, tensor.values, tensor.dense_shape])
else:
tensors_mapping.append((key, False))
tensors_to_enqueue.append(tensor)
if keys is not None:
tensors_to_enqueue.append(keys)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)
# Add a summary op to debug if our feature queue is full or not.
summary.scalar('queue/parsed_features/%s/fraction_of_%d_full' %
(input_queue.name, feature_queue_capacity),
math_ops.cast(input_queue.size(), dtypes.float32) *
(1. / feature_queue_capacity))
# Use a single QueueRunner with multiple threads to enqueue so the queue is
# always full. The threads are coordinated so the last batch will not be
# lost.
enqueue_ops = [
input_queue.enqueue(tensors_to_enqueue)
for _ in range(num_enqueue_threads)
]
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
dequeued_tensors = input_queue.dequeue()
if not isinstance(dequeued_tensors, list):
# input_queue.dequeue() returns a single tensor instead of a list of
# tensors if there is only one tensor to dequeue, which breaks the
# assumption of a list below.
dequeued_tensors = [dequeued_tensors]
# Reset shapes on dequeued tensors.
for i in range(len(tensors_to_enqueue)):
dequeued_tensors[i].set_shape(tensors_to_enqueue[i].get_shape())
# Recreate feature mapping according to the original dictionary.
dequeued_parsed_features = {}
index = 0
for key, is_sparse_tensor in tensors_mapping:
if is_sparse_tensor:
# Three tensors are (indices, values, shape).
dequeued_parsed_features[key] = sparse_tensor.SparseTensor(
dequeued_tensors[index], dequeued_tensors[index + 1],
dequeued_tensors[index + 2])
index += 3
else:
dequeued_parsed_features[key] = dequeued_tensors[index]
index += 1
dequeued_keys = None
if keys is not None:
dequeued_keys = dequeued_tensors[-1]
return dequeued_keys, dequeued_parsed_features
@deprecated(None, 'Use tf.data.')
def read_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
feature_queue_capacity=100,
reader_num_threads=1,
num_enqueue_threads=2,
parse_fn=None,
name=None,
read_batch_size=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
feature_queue_capacity: Capacity of the parsed features queue. Set this
value to a small number, for example 5 if the parsed features are large.
reader_num_threads: The number of threads to read examples. In order to have
predictable and repeatable order of reading and enqueueing, such as in
prediction and evaluation mode, `reader_num_threads` should be 1.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing. In order to have predictable and repeatable order of reading and
enqueueing, such as in prediction and evaluation mode,
`num_enqueue_threads` should be 1.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once. If `None`, defaults to `batch_size`.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
_, features = read_keyed_batch_features(
file_pattern,
batch_size,
features,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
reader_num_threads=reader_num_threads,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_enqueue_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
name=name)
return features
@deprecated(None, 'Use tf.data.')
def read_batch_record_features(file_pattern,
batch_size,
features,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
name='dequeue_record_examples'):
"""Reads TFRecord, queues, batches and parses `Example` proto.
See more detailed description in `read_examples`.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples. In order to have
predictable and repeatable order of reading and enqueueing, such as in
prediction and evaluation mode, `reader_num_threads` should be 1.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
return read_batch_features(
file_pattern=file_pattern,
batch_size=batch_size,
features=features,
reader=io_ops.TFRecordReader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
reader_num_threads=reader_num_threads,
name=name)
|
apache-2.0
|
davisagli/eye
|
eye/views.py
|
1
|
1372
|
import cgi
import json
import pprint
from webob import Response
from persistent import Persistent
def as_json(context):
"""Return an object's representation as JSON"""
info = {
'info': cgi.escape(pprint.pformat(context.context)),
}
return Response(content_type='application/json', body=json.dumps(info))
def as_tree(context):
"""Return info about an object's members as JSON"""
tree = _build_tree(context, 2, 1)
if type(tree) == dict:
tree = [tree]
return Response(content_type='application/json', body=json.dumps(tree))
def _build_tree(node, level = 1024, remove_root = 0, id=None):
if level <= 0:
return None
level -= 1
tree = {}
children = []
result = None
items = node.items()
for k, v in items:
result = (_build_tree(v, level, id=k))
if result:
children.append(result)
if remove_root:
return children
else:
tree["key"] = id
tree["title"] = '%s (%s)' % (id, type(node.context).__name__)
tree["children"] = []
if len(items):
tree["isFolder"] = True
if not len(tree["children"]):
tree["isLazy"] = True
tree["children"] = children
if isinstance(node.context, Persistent):
tree['addClass'] = 'persistent'
return tree
|
mit
|
danilito19/django
|
tests/serializers/tests.py
|
118
|
30530
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import json
import re
import unittest
from datetime import datetime
from xml.dom import minidom
from django.core import management, serializers
from django.core.serializers.base import ProgressBar
from django.db import connection, transaction
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipUnlessDBFeature,
)
from django.test.utils import Approximate
from django.utils import six
from django.utils.six import StringIO
from .models import (
Actor, Article, Author, AuthorProfile, Category, Movie, Player, Score,
Team,
)
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
@override_settings(
SERIALIZATION_MODULES={
"json2": "django.core.serializers.json",
}
)
class SerializerRegistrationTests(SimpleTestCase):
def setUp(self):
self.old_serializers = serializers._serializers
serializers._serializers = {}
def tearDown(self):
serializers._serializers = self.old_serializers
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970, 1, 1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline', 'pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = "Za\u017c\u00f3\u0142\u0107"
movie_title = 'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_progressbar(self):
fake_stdout = StringIO()
serializers.serialize(
self.serializer_name, Article.objects.all(),
progress_output=fake_stdout, object_count=Article.objects.count()
)
self.assertTrue(
fake_stdout.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n')
)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_deferred_field_serialization(self):
author = Author.objects.create(name='Victor Hugo')
author = Author.objects.defer('name').get(pk=author.pk)
serial_str = serializers.serialize(self.serializer_name, [author])
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
# Check the class instead of using isinstance() because model instances
# with deferred fields (e.g. Author_Deferred_name) will pass isinstance.
self.assertEqual(deserial_objs[0].object.__class__, Author)
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author=self.jane,
headline="Nobody remembers the early years",
pub_date=datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 5)
def test_deterministic_mapping_ordering(self):
"""Mapping such as fields should be deterministically ordered. (#24558)"""
output = serializers.serialize(self.serializer_name, [self.a1], indent=2)
categories = self.a1.categories.values_list('pk', flat=True)
self.assertEqual(output, self.mapping_ordering_str % {
'article_pk': self.a1.pk,
'author_pk': self.a1.author_id,
'first_category_pk': categories[0],
'second_category_pk': categories[1],
})
def test_deserialize_force_insert(self):
"""Tests that deserialized content can be saved with force_insert as a parameter."""
serial_str = serializers.serialize(self.serializer_name, [self.a1])
deserial_obj = list(serializers.deserialize(self.serializer_name, serial_str))[0]
with mock.patch('django.db.models.Model') as mock_model:
deserial_obj.save(force_insert=False)
mock_model.save_base.assert_called_with(deserial_obj.object, raw=True, using=None, force_insert=False)
class SerializersTransactionTestBase(object):
available_apps = ['serializers']
@skipUnlessDBFeature('supports_forward_references')
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to run in a transaction in order
# to test forward reference handling.
with transaction.atomic():
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
<object model="serializers.category">
<field type="CharField" name="name">Non-fiction</field>
</object>
</django-objects>"""
mapping_ordering_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.article" pk="%(article_pk)s">
<field name="author" rel="ManyToOneRel" to="serializers.author">%(author_pk)s</field>
<field name="headline" type="CharField">Poker has no place on ESPN</field>
<field name="pub_date" type="DateTimeField">2006-06-16T11:00:00</field>
<field name="categories" rel="ManyToManyRel" to="serializers.category"><object pk="%(first_category_pk)s"></object><object pk="%(second_category_pk)s"></object></field>
<field name="meta_data" rel="ManyToManyRel" to="serializers.categorymetadata"></field>
</object>
</django-objects>"""
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return six.text_type(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
def test_control_char_failure(self):
"""
Serializing control characters with XML should fail as those characters
are not supported in the XML 1.0 standard (except HT, LF, CR).
"""
self.a1.headline = "This contains \u0001 control \u0011 chars"
msg = "Article.headline (pk:%s) contains unserializable characters" % self.a1.pk
with self.assertRaisesMessage(ValueError, msg):
serializers.serialize(self.serializer_name, [self.a1])
self.a1.headline = "HT \u0009, LF \u000A, and CR \u000D are allowed"
self.assertIn(
"HT \t, LF \n, and CR \r are allowed",
serializers.serialize(self.serializer_name, [self.a1])
)
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[
{
"pk": null,
"model": "serializers.category",
"fields": {"name": "Reference"}
}, {
"model": "serializers.category",
"fields": {"name": "Non-fiction"}
}]"""
mapping_ordering_str = """[
{
"model": "serializers.article",
"pk": %(article_pk)s,
"fields": {
"author": %(author_pk)s,
"headline": "Poker has no place on ESPN",
"pub_date": "2006-06-16T11:00:00",
"categories": [
%(first_category_pk)s,
%(second_category_pk)s
],
"meta_data": []
}
}
]
"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
def test_indentation_whitespace(self):
Score.objects.create(score=5.0)
Score.objects.create(score=6.0)
qset = Score.objects.all()
s = serializers.json.Serializer()
json_data = s.serialize(qset, indent=2)
for line in json_data.splitlines():
if re.search(r'.+,\s*$', line):
self.assertEqual(line, line.rstrip())
def test_helpful_error_message_invalid_pk(self):
"""
If there is an invalid primary key, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "badpk",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": 1,
"team": "Team"
}
}]"""
with self.assertRaisesMessage(serializers.base.DeserializationError, "(serializers.player:pk=badpk)"):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_invalid_field(self):
"""
If there is an invalid field value, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "1",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": "invalidint",
"team": "Team"
}
}]"""
expected = "(serializers.player:pk=1) field_value was 'invalidint'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_foreign_keys(self):
"""
Invalid foreign keys with a natural key should throw a helpful error
message, such as what the failing key is.
"""
test_string = """[{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Unknown foreign key",
"meta_data": [
"doesnotexist",
"metadata"
]
}
}]"""
key = ["doesnotexist", "metadata"]
expected = "(serializers.category:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_non_natural(self):
"""
Invalid many-to-many keys should throw a helpful error message.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"categories": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_natural1(self):
"""
Invalid many-to-many keys should throw a helpful error message.
This tests the code path where one of a list of natural keys is invalid.
"""
test_string = """[{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [
["author", "meta1"],
["doesnotexist", "meta1"],
["author", "meta1"]
]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
key = ["doesnotexist", "meta1"]
expected = "(serializers.article:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string):
obj.save()
def test_helpful_error_message_for_many2many_natural2(self):
"""
Invalid many-to-many keys should throw a helpful error message. This
tests the code path where a natural many-to-many key has only a single
value.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string, ignore=False):
obj.save()
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
YAML_IMPORT_ERROR_MESSAGE = r'No module named yaml'
class YamlImportModuleMock(object):
"""Provides a wrapped import_module function to simulate yaml ImportError
In order to run tests that verify the behavior of the YAML serializer
when run on a system that has yaml installed (like the django CI server),
mock import_module, so that it raises an ImportError when the yaml
serializer is being imported. The importlib.import_module() call is
being made in the serializers.register_serializer().
Refs: #12756
"""
def __init__(self):
self._import_module = importlib.import_module
def import_module(self, module_path):
if module_path == serializers.BUILTIN_SERIALIZERS['yaml']:
raise ImportError(YAML_IMPORT_ERROR_MESSAGE)
return self._import_module(module_path)
class NoYamlSerializerTestCase(SimpleTestCase):
"""Not having pyyaml installed provides a misleading error
Refs: #12756
"""
@classmethod
def setUpClass(cls):
"""Removes imported yaml and stubs importlib.import_module"""
super(NoYamlSerializerTestCase, cls).setUpClass()
cls._import_module_mock = YamlImportModuleMock()
importlib.import_module = cls._import_module_mock.import_module
# clear out cached serializers to emulate yaml missing
serializers._serializers = {}
@classmethod
def tearDownClass(cls):
"""Puts yaml back if necessary"""
super(NoYamlSerializerTestCase, cls).tearDownClass()
importlib.import_module = cls._import_module_mock._import_module
# clear out cached serializers to clean out BadSerializer instances
serializers._serializers = {}
def test_serializer_pyyaml_error_message(self):
"""Using yaml serializer without pyyaml raises ImportError"""
jane = Author(name="Jane")
self.assertRaises(ImportError, serializers.serialize, "yaml", [jane])
def test_deserializer_pyyaml_error_message(self):
"""Using yaml deserializer without pyyaml raises ImportError"""
self.assertRaises(ImportError, serializers.deserialize, "yaml", "")
def test_dumpdata_pyyaml_error_message(self):
"""Calling dumpdata produces an error when yaml package missing"""
with six.assertRaisesRegex(self, management.CommandError, YAML_IMPORT_ERROR_MESSAGE):
management.call_command('dumpdata', format='yaml')
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category
- fields:
name: Non-fiction
model: serializers.category"""
mapping_ordering_str = """- model: serializers.article
pk: %(article_pk)s
fields:
author: %(author_pk)s
headline: Poker has no place on ESPN
pub_date: 2006-06-16 11:00:00
categories: [%(first_category_pk)s, %(second_category_pk)s]
meta_data: []
"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, six.string_types):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
|
bsd-3-clause
|
protatremy/buildbot
|
worker/buildbot_worker/exceptions.py
|
9
|
1155
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
class AbandonChain(Exception):
"""A series of chained steps can raise this exception to indicate that
one of the intermediate RunProcesses has failed, such that there is no
point in running the remainder. 'rc' should be the non-zero exit code of
the failing ShellCommand."""
def __repr__(self):
return "<AbandonChain rc=%s>" % self.args[0]
|
gpl-2.0
|
slohse/ansible
|
test/units/modules/network/netscaler/test_netscaler_nitro_request.py
|
57
|
12941
|
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, call
from .netscaler_module import TestModule
import copy
import tempfile
import json
import sys
import codecs
from ansible.modules.network.netscaler import netscaler_nitro_request
module_arguments = dict(
nsip=None,
nitro_user=None,
nitro_pass=None,
nitro_protocol=None,
validate_certs=None,
nitro_auth_token=None,
resource=None,
name=None,
attributes=None,
args=None,
filter=None,
operation=None,
expected_nitro_errorcode=None,
action=None,
instance_ip=None,
instance_name=None,
instance_id=None,
)
class TestNetscalerNitroRequestModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_fail_on_conflicting_authentication_methods(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
nitro_auth_token='##DDASKLFDJ',
))
mock_module_instance = Mock(params=args)
expected_calls = [
call.fail_json(
changed=False,
failed=True,
msg='Cannot define both authentication token and username/password'
)
]
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
netscaler_nitro_request.NitroAPICaller()
mock_module_instance.assert_has_calls(expected_calls)
def test_nitro_user_pass_credentials(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'X-NITRO-USER': 'nsroot',
'X-NITRO-PASS': 'nsroot',
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_login_headers(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
operation='mas_login',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_proxy_call_headers_instance_ip(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_auth_token='##ABDB',
operation='add',
instance_ip='192.168.1.1',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'_MPS_API_PROXY_MANAGED_INSTANCE_IP': args['instance_ip'],
'Cookie': 'NITRO_AUTH_TOKEN=%s' % args['nitro_auth_token'],
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_proxy_call_headers_instance_id(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_auth_token='##ABDB',
operation='add',
instance_id='myid',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'_MPS_API_PROXY_MANAGED_INSTANCE_ID': args['instance_id'],
'Cookie': 'NITRO_AUTH_TOKEN=%s' % args['nitro_auth_token'],
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_proxy_call_headers_instance_name(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_auth_token='##ABDB',
operation='add',
instance_name='myname',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'_MPS_API_PROXY_MANAGED_INSTANCE_NAME': args['instance_name'],
'Cookie': 'NITRO_AUTH_TOKEN=%s' % args['nitro_auth_token'],
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_edit_response_data_no_body_success_status(self):
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule'):
instance = netscaler_nitro_request.NitroAPICaller()
r = None
info = {
'status': 200,
}
result = {}
success_status = 200
expected_result = {
'nitro_errorcode': 0,
'nitro_message': 'Success',
'nitro_severity': 'NONE',
'http_response_body': '',
'http_response_data': info,
}
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_no_body_fail_status(self):
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule'):
instance = netscaler_nitro_request.NitroAPICaller()
r = None
info = {
'status': 201,
}
result = {}
success_status = 200
expected_result = {
'nitro_errorcode': -1,
'nitro_message': 'HTTP status %s' % info['status'],
'nitro_severity': 'ERROR',
'http_response_body': '',
'http_response_data': info,
}
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_actual_body_data(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
nitro_auth_token='##DDASKLFDJ',
))
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
with tempfile.TemporaryFile() as r:
actual_body = {
'errorcode': 258,
'message': 'Some error',
'severity': 'ERROR',
}
r.write(codecs.encode(json.dumps(actual_body), 'utf-8'))
r.seek(0)
instance = netscaler_nitro_request.NitroAPICaller()
info = {
'status': 200,
}
result = {}
success_status = 200
expected_result = {
'http_response_body': json.dumps(actual_body),
'http_response_data': info,
}
nitro_data = {}
for key, value in actual_body.items():
nitro_data['nitro_%s' % key] = value
expected_result.update(nitro_data)
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_actual_body_data_irrelevant(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
nitro_auth_token='##DDASKLFDJ',
))
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
with tempfile.TemporaryFile() as r:
actual_body = {}
r.write(codecs.encode(json.dumps(actual_body), 'utf-8'))
r.seek(0)
instance = netscaler_nitro_request.NitroAPICaller()
info = {
'status': 200,
}
result = {}
success_status = 200
expected_result = {
'http_response_body': json.dumps(actual_body),
'http_response_data': info,
'nitro_errorcode': 0,
'nitro_message': 'Success',
'nitro_severity': 'NONE',
}
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_body_in_info(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
))
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
body = {
'errorcode': 258,
'message': 'Numerical error 258',
'severity': 'ERROR'
}
instance = netscaler_nitro_request.NitroAPICaller()
r = None
info = {
'status': 200,
'body': codecs.encode(json.dumps(body), 'utf-8'),
}
result = {}
success_status = 200
expected_result = {
'http_response_body': json.dumps(body),
'http_response_data': info,
}
nitro_data = {}
for key, value in body.items():
nitro_data['nitro_%s' % key] = value
expected_result.update(nitro_data)
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_handle_get_return_object(self):
resource = 'lbvserver'
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
resource=resource,
))
resource_data = {
'property1': 'value1',
'property2': 'value2',
}
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
instance = netscaler_nitro_request.NitroAPICaller()
data = {resource: resource_data}
result = {
'nitro_errorcode': 0,
'http_response_body': json.dumps(data),
}
expected_result = {
'nitro_object': resource_data
}
expected_result.update(result)
instance.handle_get_return_object(result)
self.assertDictEqual(result, expected_result)
|
gpl-3.0
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GLES2/IMG/multisampled_render_to_texture.py
|
8
|
1173
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_IMG_multisampled_render_to_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_IMG_multisampled_render_to_texture',error_checker=_errors._error_checker)
GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG=_C('GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG',0x9134)
GL_MAX_SAMPLES_IMG=_C('GL_MAX_SAMPLES_IMG',0x9135)
GL_RENDERBUFFER_SAMPLES_IMG=_C('GL_RENDERBUFFER_SAMPLES_IMG',0x9133)
GL_TEXTURE_SAMPLES_IMG=_C('GL_TEXTURE_SAMPLES_IMG',0x9136)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLsizei)
def glFramebufferTexture2DMultisampleIMG(target,attachment,textarget,texture,level,samples):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,_cs.GLsizei,_cs.GLsizei)
def glRenderbufferStorageMultisampleIMG(target,samples,internalformat,width,height):pass
|
lgpl-3.0
|
Jgarcia-IAS/SAT
|
openerp/addons-extra/odoo-pruebas/odoo-server/addons/hw_posbox_upgrade/__openerp__.py
|
313
|
1696
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'PosBox Software Upgrader',
'version': '1.0',
'category': 'Hardware Drivers',
'website': 'https://www.odoo.com/page/point-of-sale',
'sequence': 6,
'summary': 'Allows to remotely upgrade the PosBox software',
'description': """
PosBox Software Upgrader
========================
This module allows to remotely upgrade the PosBox software to a
new version. This module is specific to the PosBox setup and environment
and should not be installed on regular openerp servers.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'test': [
],
'installable': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
NeblioTeam/neblio
|
test/functional/todo/p2p_unrequested_blocks.py
|
1
|
14460
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "nebliod"),
help="nebliod binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2,None,block_time), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1,None,block_time), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2,None,block_time), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3,None,block_h2f.nTime), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4,None,tip.nTime), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289,None,all_blocks[284].nTime), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290,None,block_289f.nTime), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291,None,block_290f.nTime), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292,None,block_291.nTime), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293,None,block_292.nTime), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
mit
|
celery/kombu
|
t/mocks.py
|
2
|
4712
|
from itertools import count
from unittest.mock import Mock
from case import ContextMock
from kombu.transport import base
from kombu.utils import json
def PromiseMock(*args, **kwargs):
m = Mock(*args, **kwargs)
def on_throw(exc=None, *args, **kwargs):
if exc:
raise exc
raise
m.throw.side_effect = on_throw
m.set_error_state.side_effect = on_throw
m.throw1.side_effect = on_throw
return m
class MockPool:
def __init__(self, value=None):
self.value = value or ContextMock()
def acquire(self, **kwargs):
return self.value
class Message(base.Message):
def __init__(self, *args, **kwargs):
self.throw_decode_error = kwargs.get('throw_decode_error', False)
super().__init__(*args, **kwargs)
def decode(self):
if self.throw_decode_error:
raise ValueError("can't decode message")
return super().decode()
class Channel(base.StdChannel):
open = True
throw_decode_error = False
_ids = count(1)
def __init__(self, connection):
self.connection = connection
self.called = []
self.deliveries = count(1)
self.to_deliver = []
self.events = {'basic_return': set()}
self.channel_id = next(self._ids)
def _called(self, name):
self.called.append(name)
def __contains__(self, key):
return key in self.called
def exchange_declare(self, *args, **kwargs):
self._called('exchange_declare')
def prepare_message(self, body, priority=0, content_type=None,
content_encoding=None, headers=None, properties={}):
self._called('prepare_message')
return {'body': body,
'headers': headers,
'properties': properties,
'priority': priority,
'content_type': content_type,
'content_encoding': content_encoding}
def basic_publish(self, message, exchange='', routing_key='',
mandatory=False, immediate=False, **kwargs):
self._called('basic_publish')
return message, exchange, routing_key
def exchange_delete(self, *args, **kwargs):
self._called('exchange_delete')
def queue_declare(self, *args, **kwargs):
self._called('queue_declare')
def queue_bind(self, *args, **kwargs):
self._called('queue_bind')
def queue_unbind(self, *args, **kwargs):
self._called('queue_unbind')
def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs):
self._called('queue_delete')
def basic_get(self, *args, **kwargs):
self._called('basic_get')
try:
return self.to_deliver.pop()
except IndexError:
pass
def queue_purge(self, *args, **kwargs):
self._called('queue_purge')
def basic_consume(self, *args, **kwargs):
self._called('basic_consume')
def basic_cancel(self, *args, **kwargs):
self._called('basic_cancel')
def basic_ack(self, *args, **kwargs):
self._called('basic_ack')
def basic_recover(self, requeue=False):
self._called('basic_recover')
def exchange_bind(self, *args, **kwargs):
self._called('exchange_bind')
def exchange_unbind(self, *args, **kwargs):
self._called('exchange_unbind')
def close(self):
self._called('close')
def message_to_python(self, message, *args, **kwargs):
self._called('message_to_python')
return Message(body=json.dumps(message),
channel=self,
delivery_tag=next(self.deliveries),
throw_decode_error=self.throw_decode_error,
content_type='application/json',
content_encoding='utf-8')
def flow(self, active):
self._called('flow')
def basic_reject(self, delivery_tag, requeue=False):
if requeue:
return self._called('basic_reject:requeue')
return self._called('basic_reject')
def basic_qos(self, prefetch_size=0, prefetch_count=0,
apply_global=False):
self._called('basic_qos')
class Connection:
connected = True
def __init__(self, client):
self.client = client
def channel(self):
return Channel(self)
class Transport(base.Transport):
def establish_connection(self):
return Connection(self.client)
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return 'event'
def close_connection(self, connection):
connection.connected = False
|
bsd-3-clause
|
igniteflow/codebase-python-api-client
|
codebase/utils.py
|
1
|
2236
|
import sys
from codebase import logger
from codebase.client import CodeBaseAPI
class CodeBaseAPIUtils(CodeBaseAPI):
def bulk_update_ticket_statuses(self, current_status_name, target_status_name):
"""
Example usage to set all "Approved for Dev" tp "Deployed to Dev":
STATUS_TRANSITIONS = {
'dev': ('Approved for Dev', 'Deployed to Dev'),
'uat': ('Approved for UAT', 'Deployed to UAT'),
'prod': ('Approved for Prod', 'Deployed to Prod'),
}
env = 'dev'
current_status_name = STATUS_TRANSITIONS[env][0]
target_status_name = STATUS_TRANSITIONS[env][1]
codebase_utils.bulk_update_ticket_statuses(current_status_name, target_status_name)
"""
# get the status ids because Codebase search doesn't support searching on status id
new_status_id = None
statuses = self.statuses()
for status in statuses:
if status['ticketing_status']['name'] == target_status_name:
new_status_id = status['ticketing_status']['id']
# exit if the ticket status was not found
if new_status_id is None:
status_names = ', '.join([
status['ticketing_status']['name'] for status in statuses
])
logger.info(
u'Status "{}" not found in project statuses. '
u'Options are: {}'.format(target_status_name, status_names)
)
return
# update the tickets
items = self.search_all(status=current_status_name)
updated = []
for item in items:
ticket_id = item['ticket']['ticket_id']
data = {
'ticket_note': {
u'changes': {
u'status_id': unicode(new_status_id),
},
},
}
self.add_note(ticket_id, data)
# print output
sys.stdout.write('[{}] {} {} --> {}\n'.format(
ticket_id,
item['ticket']['summary'],
current_status_name,
target_status_name
))
updated.append(ticket_id)
return updated
|
mit
|
labcodes/django
|
tests/forms_tests/widget_tests/base.py
|
83
|
1134
|
from django.forms.renderers import DjangoTemplates, Jinja2
from django.test import SimpleTestCase
try:
import jinja2
except ImportError:
jinja2 = None
class WidgetTest(SimpleTestCase):
beatles = (('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))
@classmethod
def setUpClass(cls):
cls.django_renderer = DjangoTemplates()
cls.jinja2_renderer = Jinja2() if jinja2 else None
cls.renderers = [cls.django_renderer] + ([cls.jinja2_renderer] if cls.jinja2_renderer else [])
super().setUpClass()
def check_html(self, widget, name, value, html='', attrs=None, strict=False, **kwargs):
assertEqual = self.assertEqual if strict else self.assertHTMLEqual
if self.jinja2_renderer:
output = widget.render(name, value, attrs=attrs, renderer=self.jinja2_renderer, **kwargs)
# Django escapes quotes with '"' while Jinja2 uses '"'.
assertEqual(output.replace('"', '"'), html)
output = widget.render(name, value, attrs=attrs, renderer=self.django_renderer, **kwargs)
assertEqual(output, html)
|
bsd-3-clause
|
sushramesh/lwc
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py
|
2929
|
13359
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
mit
|
seize-the-dave/XlsxWriter
|
xlsxwriter/test/worksheet/test_cond_format13.py
|
8
|
4541
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write('A1', 1)
worksheet.write('A2', 2)
worksheet.write('A3', 3)
worksheet.write('A4', 4)
worksheet.write('A5', 5)
worksheet.write('A6', 6)
worksheet.write('A7', 7)
worksheet.write('A8', 8)
worksheet.write('A9', 9)
worksheet.write('A10', 10)
worksheet.write('A11', 11)
worksheet.write('A12', 12)
worksheet.conditional_format('A1:A12', {'type': '3_color_scale'})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A12"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>3</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>4</v>
</c>
</row>
<row r="5" spans="1:1">
<c r="A5">
<v>5</v>
</c>
</row>
<row r="6" spans="1:1">
<c r="A6">
<v>6</v>
</c>
</row>
<row r="7" spans="1:1">
<c r="A7">
<v>7</v>
</c>
</row>
<row r="8" spans="1:1">
<c r="A8">
<v>8</v>
</c>
</row>
<row r="9" spans="1:1">
<c r="A9">
<v>9</v>
</c>
</row>
<row r="10" spans="1:1">
<c r="A10">
<v>10</v>
</c>
</row>
<row r="11" spans="1:1">
<c r="A11">
<v>11</v>
</c>
</row>
<row r="12" spans="1:1">
<c r="A12">
<v>12</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A12">
<cfRule type="colorScale" priority="1">
<colorScale>
<cfvo type="min" val="0"/>
<cfvo type="percentile" val="50"/>
<cfvo type="max" val="0"/>
<color rgb="FFF8696B"/>
<color rgb="FFFFEB84"/>
<color rgb="FF63BE7B"/>
</colorScale>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
bsd-2-clause
|
PrestonMonteWest/compmart
|
account/forms.py
|
1
|
5138
|
import datetime
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.forms.widgets import Widget, Select
from django.utils import six
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from .models import Address, CreditCard
class MonthYearWidget(Widget):
'''
A Widget that splits date input into two <select> boxes for month and year,
with "day" defaulting to the first of the month.
'''
none_value = (0, '---')
month_field = '%s_month'
year_field = '%s_year'
date_re = re.compile(r'(\d{4})-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])$')
def __init__(self, attrs=None, years=None, required=True):
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
def render(self, name, value, attrs=None):
try:
year_val, month_val = value.year, value.month
except AttributeError:
year_val = month_val = None
if isinstance(value, six.string_types):
match = date_re.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
output = []
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
month_choices = list(MONTHS.items())
year_choices = [(i, i) for i in self.years]
if not self.required:
month_choices.insert(0, self.none_value)
year_choices.insert(0, self.none_value)
local_attrs = self.build_attrs({'id': self.month_field % id_})
s = Select(choices=month_choices)
select_html = s.render(self.month_field % name, month_val, local_attrs)
output.append(select_html)
local_attrs['id'] = self.year_field % id_
s = Select(choices=year_choices)
select_html = s.render(self.year_field % name, year_val, local_attrs)
output.append(select_html)
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
if y == m == '0':
return None
if y and m:
return '%s-%s-%s' % (y, m, 1)
return data.get(name)
class AddressForm(forms.ModelForm):
class Meta:
model = Address
fields = ('street', 'city', 'state', 'zip_code')
class CreditCardForm(forms.ModelForm):
number = forms.CharField(label='Card Number', min_length=13, max_length=19)
class Meta:
model = CreditCard
fields = ('holder_name', 'expiration_date')
widgets = {'expiration_date': MonthYearWidget}
@staticmethod
def is_luhn(number):
if len(number) < 2:
raise ValueError('Card number is too short.')
digits = list(map(int, number))
total = sum(digits[-1::-2])
even_digits = digits[-2::-2]
for digit in even_digits:
digit += digit
total += (digit if digit <= 9 else digit - 9)
return total % 10 == 0
@staticmethod
def get_card_type(number):
if number[0] == '4':
return 'Visa'
elif number[:2] in ('34', '37'):
return 'American Express'
elif number[:2] in ('51', '52', '53', '54', '55'):
return 'MasterCard'
else:
raise forms.ValidationError('Unsupported card entered.')
def clean_number(self):
number = self.cleaned_data['number']
if not number.isdigit():
raise forms.ValidationError('Card number must be numeric.')
if not self.is_luhn(number):
raise forms.ValidationError('Invalid card number entered.')
return number
def clean_expiration_date(self):
exp_date = self.cleaned_data['expiration_date']
today = datetime.date.today()
expired = exp_date.year < today.year or (
exp_date.year == today.year and exp_date.month < today.month
)
if expired:
raise forms.ValidationError('Card is expired.')
return exp_date
def clean(self):
cleaned_data = super().clean()
number = cleaned_data.get('number')
if number:
cleaned_data['card_type'] = self.get_card_type(number)
return cleaned_data
'''def save(self, commit=True):
card = super().save(commit=False)
card.number = self.cleaned_data['number']
if commit:
card.save()
return card'''
class MyUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = (
'username',
'first_name',
'email',
)
email = forms.EmailField(required=True)
first_name = forms.CharField(required=True)
|
apache-2.0
|
asm-products/movie-database-service
|
ani/lib/python2.7/site-packages/Crypto/SelfTest/Util/test_Counter.py
|
117
|
7095
|
# -*- coding: utf-8 -*-
#
# SelfTest/Util/test_Counter: Self-test for the Crypto.Util.Counter module
#
# Written in 2009 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-tests for Crypto.Util.Counter"""
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
import unittest
class CounterTests(unittest.TestCase):
def setUp(self):
global Counter
from Crypto.Util import Counter
def test_BE_shortcut(self):
"""Big endian, shortcut enabled"""
c = Counter.new(128)
self.assertEqual(c.__PCT_CTR_SHORTCUT__,True) # assert_
c = Counter.new(128, little_endian=False)
self.assertEqual(c.__PCT_CTR_SHORTCUT__,True) # assert_
c = Counter.new(128, disable_shortcut=False)
self.assertEqual(c.__PCT_CTR_SHORTCUT__,True) # assert_
c = Counter.new(128, little_endian=False, disable_shortcut=False)
self.assertEqual(c.__PCT_CTR_SHORTCUT__,True) # assert_
def test_LE_shortcut(self):
"""Little endian, shortcut enabled"""
c = Counter.new(128, little_endian=True)
self.assertEqual(c.__PCT_CTR_SHORTCUT__,True) # assert_
c = Counter.new(128, little_endian=True, disable_shortcut=False)
self.assertEqual(c.__PCT_CTR_SHORTCUT__,True) # assert_
def test_BE_no_shortcut(self):
"""Big endian, shortcut disabled"""
c = Counter.new(128, disable_shortcut=True)
self.assertRaises(AttributeError, getattr, c, '__PCT_CTR_SHORTCUT__')
c = Counter.new(128, little_endian=False, disable_shortcut=True)
self.assertRaises(AttributeError, getattr, c, '__PCT_CTR_SHORTCUT__')
def test_LE_no_shortcut(self):
"""Little endian, shortcut disabled"""
c = Counter.new(128, little_endian=True, disable_shortcut=True)
self.assertRaises(AttributeError, getattr, c, '__PCT_CTR_SHORTCUT__')
def test_BE_defaults(self):
"""128-bit, Big endian, defaults"""
c = Counter.new(128)
self.assertEqual(1, c.next_value())
self.assertEqual(b("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"), c())
self.assertEqual(2, c.next_value())
self.assertEqual(b("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"), c())
for i in xrange(3, 256):
self.assertEqual(i, c.next_value())
self.assertEqual(b("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")+bchr(i), c())
self.assertEqual(256, c.next_value())
self.assertEqual(b("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00"), c())
def test_LE_defaults(self):
"""128-bit, Little endian, defaults"""
c = Counter.new(128, little_endian=True)
self.assertEqual(1, c.next_value())
self.assertEqual(b("\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), c())
self.assertEqual(2, c.next_value())
self.assertEqual(b("\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), c())
for i in xrange(3, 256):
self.assertEqual(i, c.next_value())
self.assertEqual(bchr(i)+b("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), c())
self.assertEqual(256, c.next_value())
self.assertEqual(b("\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), c())
def test_BE8_wraparound(self):
"""8-bit, Big endian, wraparound"""
c = Counter.new(8)
for i in xrange(1, 256):
self.assertEqual(i, c.next_value())
self.assertEqual(bchr(i), c())
self.assertRaises(OverflowError, c.next_value)
self.assertRaises(OverflowError, c)
self.assertRaises(OverflowError, c.next_value)
self.assertRaises(OverflowError, c)
def test_LE8_wraparound(self):
"""8-bit, Little endian, wraparound"""
c = Counter.new(8, little_endian=True)
for i in xrange(1, 256):
self.assertEqual(i, c.next_value())
self.assertEqual(bchr(i), c())
self.assertRaises(OverflowError, c.next_value)
self.assertRaises(OverflowError, c)
self.assertRaises(OverflowError, c.next_value)
self.assertRaises(OverflowError, c)
def test_BE8_wraparound_allowed(self):
"""8-bit, Big endian, wraparound with allow_wraparound=True"""
c = Counter.new(8, allow_wraparound=True)
for i in xrange(1, 256):
self.assertEqual(i, c.next_value())
self.assertEqual(bchr(i), c())
self.assertEqual(0, c.next_value())
self.assertEqual(b("\x00"), c())
self.assertEqual(1, c.next_value())
def test_LE8_wraparound_allowed(self):
"""8-bit, Little endian, wraparound with allow_wraparound=True"""
c = Counter.new(8, little_endian=True, allow_wraparound=True)
for i in xrange(1, 256):
self.assertEqual(i, c.next_value())
self.assertEqual(bchr(i), c())
self.assertEqual(0, c.next_value())
self.assertEqual(b("\x00"), c())
self.assertEqual(1, c.next_value())
def test_BE8_carry(self):
"""8-bit, Big endian, carry attribute"""
c = Counter.new(8)
for i in xrange(1, 256):
self.assertEqual(0, c.carry)
self.assertEqual(i, c.next_value())
self.assertEqual(bchr(i), c())
self.assertEqual(1, c.carry)
def test_LE8_carry(self):
"""8-bit, Little endian, carry attribute"""
c = Counter.new(8, little_endian=True)
for i in xrange(1, 256):
self.assertEqual(0, c.carry)
self.assertEqual(i, c.next_value())
self.assertEqual(bchr(i), c())
self.assertEqual(1, c.carry)
def get_tests(config={}):
from Crypto.SelfTest.st_common import list_test_cases
return list_test_cases(CounterTests)
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
agpl-3.0
|
xuweiliang/Codelibrary
|
nova/virt/xenapi/volumeops.py
|
17
|
9667
|
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from nova import exception
from nova.i18n import _LI, _LW
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
class VolumeOps(object):
"""Management class for Volume-related tasks."""
def __init__(self, session):
self._session = session
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume to VM instance."""
# TODO(johngarbutt) move this into _attach_volume_to_vm
dev_number = volume_utils.get_device_number(mountpoint)
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
return self._attach_volume(connection_info, vm_ref,
instance_name, dev_number, hotplug)
def connect_volume(self, connection_info):
"""Attach volume to hypervisor, but not the VM."""
return self._attach_volume(connection_info)
def _attach_volume(self, connection_info, vm_ref=None, instance_name=None,
dev_number=None, hotplug=False):
self._check_is_supported_driver_type(connection_info)
connection_data = connection_info['data']
sr_ref, sr_uuid = self._connect_to_volume_provider(connection_data,
instance_name)
try:
vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
connection_data)
vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid)
if vm_ref:
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
dev_number, hotplug)
return (sr_uuid, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(sirp): Forgetting the SR will have the effect of
# cleaning up the VDI and VBD records, so no need to handle
# that explicitly.
volume_utils.forget_sr(self._session, sr_ref)
def _check_is_supported_driver_type(self, connection_info):
driver_type = connection_info['driver_volume_type']
if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
def _connect_to_volume_provider(self, connection_data, instance_name):
sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info(
connection_data, 'Disk-for:%s' % instance_name)
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if not sr_ref:
# introduce SR because not already present
sr_ref = volume_utils.introduce_sr(
self._session, sr_uuid, sr_label, sr_params)
return (sr_ref, sr_uuid)
def _connect_hypervisor_to_volume(self, sr_ref, connection_data):
# connection_data can have credentials in it so make sure to scrub
# those before logging.
LOG.debug("Connect volume to hypervisor: %s",
strutils.mask_password(connection_data))
if 'vdi_uuid' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
vdi_uuid=connection_data['vdi_uuid'])
elif 'target_lun' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
target_lun=connection_data['target_lun'])
else:
# NOTE(sirp): This will introduce the first VDI in the SR
vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref)
return vdi_ref
def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, dev_number,
hotplug):
LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s',
{'vdi_ref': vdi_ref, 'vm_ref': vm_ref})
# osvol is added to the vbd so we can spot which vbds are volumes
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
dev_number, bootable=False,
osvol=True)
if hotplug:
# NOTE(johngarbutt) can only call VBD.plug on a running vm
running = not vm_utils.is_vm_shutdown(self._session, vm_ref)
if running:
LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info(_LI('Dev %(dev_number)s attached to'
' instance %(instance_name)s'),
{'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
{'instance_name': instance_name, 'mountpoint': mountpoint})
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
device_number = volume_utils.get_device_number(mountpoint)
vbd_ref = volume_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
if vbd_ref is None:
# NOTE(sirp): If we don't find the VBD then it must have been
# detached previously.
LOG.warning(_LW('Skipping detach because VBD for %s was '
'not found'), instance_name)
else:
self._detach_vbds_and_srs(vm_ref, [vbd_ref])
LOG.info(_LI('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s'),
{'instance_name': instance_name,
'mountpoint': mountpoint})
def _detach_vbds_and_srs(self, vm_ref, vbd_refs):
is_vm_shutdown = vm_utils.is_vm_shutdown(self._session, vm_ref)
for vbd_ref in vbd_refs:
# find sr before we destroy the vbd
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
if not is_vm_shutdown:
vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref)
vm_utils.destroy_vbd(self._session, vbd_ref)
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
def detach_all(self, vm_ref):
"""Detach all cinder volumes."""
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
if vbd_refs:
self._detach_vbds_and_srs(vm_ref, vbd_refs)
def _get_all_volume_vbd_refs(self, vm_ref):
"""Return VBD refs for all Nova/Cinder volumes."""
vbd_refs = self._session.VM.get_VBDs(vm_ref)
for vbd_ref in vbd_refs:
other_config = self._session.VBD.get_other_config(vbd_ref)
if other_config.get('osvol'):
yield vbd_ref
def find_bad_volumes(self, vm_ref):
"""Find any volumes with their connection severed.
Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not
work when a VBD is present that points to a non-working volume. To work
around this, we scan for non-working volumes and detach them before
retrying a failed operation.
"""
bad_devices = []
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs:
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
try:
# TODO(sirp): bug1152401 This relies on a 120 sec timeout
# within XenServer, update this to fail-fast when this is fixed
# upstream
self._session.SR.scan(sr_ref)
except self._session.XenAPI.Failure as exc:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
device = self._session.VBD.get_device(vbd_ref)
bad_devices.append('/dev/%s' % device)
else:
raise
return bad_devices
def safe_cleanup_from_vdis(self, vdi_refs):
# A helper method to detach volumes that are not associated with an
# instance
for vdi_ref in vdi_refs:
try:
sr_ref = volume_utils.find_sr_from_vdi(self._session, vdi_ref)
except exception.StorageError as exc:
LOG.debug(exc.format_message())
continue
try:
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
except Exception:
LOG.debug('Ignoring error while purging sr: %s' % sr_ref,
exc_info=True)
|
apache-2.0
|
puzan/ansible
|
lib/ansible/modules/cloud/google/gce_img.py
|
25
|
6737
|
#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""An Ansible module to utilize GCE image resources."""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gce_img
version_added: "1.9"
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create or delete
required: true
default: null
description:
description:
- an optional description
required: false
default: null
family:
description:
- an optional family name
required: false
default: null
version_added: "2.2"
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["present", "absent"]
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
timeout:
description:
- timeout for the operation
required: false
default: 180
version_added: "2.0"
service_account_email:
description:
- service account email
required: false
default: null
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
project_id:
description:
- your GCE project ID
required: false
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Tom Melendez (supertom)"
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Create an image named test-image from a tarball in Google Cloud Storage.
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image
source: gs://bucket/path/to/image.tgz
# Delete an image named test-image.
- gce_img:
name: test-image
state: absent
'''
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
has_libcloud = True
except ImportError:
has_libcloud = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
GCS_URI = 'https://storage.googleapis.com/'
def create_image(gce, name, module):
"""Create an image with the specified name."""
source = module.params.get('source')
zone = module.params.get('zone')
desc = module.params.get('description')
timeout = module.params.get('timeout')
family = module.params.get('family')
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith(GCS_URI):
# source is a Google Cloud Storage URI
volume = source
elif source.startswith('gs://'):
# libcloud only accepts https URI.
volume = source.replace('gs://', GCS_URI)
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
gce_extra_args = {}
if family is not None:
gce_extra_args['family'] = family
old_timeout = gce.connection.timeout
try:
gce.connection.timeout = timeout
gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
return True
except ResourceExistsError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
finally:
gce.connection.timeout = old_timeout
def delete_image(gce, name, module):
"""Delete a specific image resource by name."""
try:
gce.ex_delete_image(name)
return True
except ResourceNotFoundError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
family=dict(),
description=dict(),
source=dict(),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(type='path'),
project_id=dict(),
timeout=dict(type='int', default=180)
)
)
if not has_libcloud:
module.fail_json(msg='libcloud with GCE support is required.')
gce = gce_connect(module)
name = module.params.get('name')
state = module.params.get('state')
family = module.params.get('family')
changed = False
if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
changed=False)
# user wants to create an image.
if state == 'present':
changed = create_image(gce, name, module)
# user wants to delete the image.
if state == 'absent':
changed = delete_image(gce, name, module)
module.exit_json(changed=changed, name=name)
if __name__ == '__main__':
main()
|
gpl-3.0
|
abhattad4/Digi-Menu
|
tests/many_to_many/tests.py
|
58
|
19011
|
from __future__ import unicode_literals
from django.db import transaction
from django.test import TestCase
from django.utils import six
from .models import Article, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
with transaction.atomic():
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_forward_assign_with_queryset(self):
# Ensure that querysets used in m2m assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.a1.publications = [self.p1, self.p2]
qs = self.a1.publications.filter(title='The Python Journal')
self.a1.publications = qs
self.assertEqual(1, self.a1.publications.count())
self.assertEqual(1, qs.count())
def test_reverse_assign_with_queryset(self):
# Ensure that querysets used in M2M assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ReverseManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.p1.article_set = [self.a1, self.a2]
qs = self.p1.article_set.filter(headline='Django lets you build Web apps easily')
self.p1.article_set = qs
self.assertEqual(1, self.p1.article_set.count())
self.assertEqual(1, qs.count())
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
|
bsd-3-clause
|
Smart-Torvy/torvy-home-assistant
|
homeassistant/components/notify/ecobee.py
|
6
|
1245
|
"""
Support for ecobee Send Message service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.ecobee/
"""
import logging
import voluptuous as vol
from homeassistant.components import ecobee
from homeassistant.components.notify import (
BaseNotificationService, PLATFORM_SCHEMA) # NOQA
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['ecobee']
_LOGGER = logging.getLogger(__name__)
CONF_INDEX = 'index'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_INDEX, default=0): cv.positive_int,
})
def get_service(hass, config):
"""Get the Ecobee notification service."""
index = config.get(CONF_INDEX)
return EcobeeNotificationService(index)
# pylint: disable=too-few-public-methods
class EcobeeNotificationService(BaseNotificationService):
"""Implement the notification service for the Ecobee thermostat."""
def __init__(self, thermostat_index):
"""Initialize the service."""
self.thermostat_index = thermostat_index
def send_message(self, message="", **kwargs):
"""Send a message to a command line."""
ecobee.NETWORK.ecobee.send_message(self.thermostat_index, message)
|
mit
|
brain-tec/l10n-switzerland
|
l10n_ch_bank/models/bank.py
|
2
|
1706
|
# Copyright 2014 Olivier Jossen brain-tec AG
# Copyright 2014-2015 Guewen Baconnier (Camptocamp SA)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields
class ResBank(models.Model):
""" Inherit res.bank class in order to add swiss specific fields
Fields from the original file downloaded from here:
http://www.six-interbank-clearing.com/de/home/bank-master-data/download-bc-bank-master.html
============= ================
Field in file Column
------------- ----------------
Gruppe bank_group
Filial-ID bank_branchid
Hauptsitz bank_headquarter
Vorwahl bank_areacode
Postkonto bank_postaccount
============= ================
.. note:: Postkonto: ccp does not allow to enter entries like
``*30-38151-2`` because of the ``*`` but this comes from the
xls to import
"""
_inherit = 'res.bank'
bank_group = fields.Char(string='Group', size=2)
bank_branchid = fields.Char(string='Branch-ID', size=5)
bank_clearing_new = fields.Char(string='BCNr new', size=5)
bank_sicnr = fields.Char(string='SIC-Nr', size=6)
bank_headquarter = fields.Char(string='Headquarter', size=5)
bank_bcart = fields.Char(string='BC-Art', size=1)
bank_valid_from = fields.Date(string='Valid from')
bank_sic = fields.Char(string='SIC', size=1)
bank_eurosic = fields.Char(string='euroSIC', size=1)
bank_lang = fields.Char(string='Language', size=1)
bank_postaladdress = fields.Char(string='Postal address', size=35)
bank_areacode = fields.Char(string='Area code', size=5)
bank_postaccount = fields.Char(string='Post account', size=35)
|
agpl-3.0
|
htzy/bigfour
|
common/lib/calc/calc/tests/test_calc.py
|
190
|
20806
|
"""
Unit tests for calc.py
"""
import unittest
import numpy
import calc
from pyparsing import ParseException
# numpy's default behavior when it evaluates a function outside its domain
# is to raise a warning (not an exception) which is then printed to STDOUT.
# To prevent this from polluting the output of the tests, configure numpy to
# ignore it instead.
# See http://docs.scipy.org/doc/numpy/reference/generated/numpy.seterr.html
numpy.seterr(all='ignore') # Also: 'ignore', 'warn' (default), 'raise'
class EvaluatorTest(unittest.TestCase):
"""
Run tests for calc.evaluator
Go through all functionalities as specifically as possible--
work from number input to functions and complex expressions
Also test custom variable substitutions (i.e.
`evaluator({'x':3.0}, {}, '3*x')`
gives 9.0) and more.
"""
def test_number_input(self):
"""
Test different kinds of float inputs
See also
test_trailing_period (slightly different)
test_exponential_answer
test_si_suffix
"""
easy_eval = lambda x: calc.evaluator({}, {}, x)
self.assertEqual(easy_eval("13"), 13)
self.assertEqual(easy_eval("3.14"), 3.14)
self.assertEqual(easy_eval(".618033989"), 0.618033989)
self.assertEqual(easy_eval("-13"), -13)
self.assertEqual(easy_eval("-3.14"), -3.14)
self.assertEqual(easy_eval("-.618033989"), -0.618033989)
def test_period(self):
"""
The string '.' should not evaluate to anything.
"""
with self.assertRaises(ParseException):
calc.evaluator({}, {}, '.')
with self.assertRaises(ParseException):
calc.evaluator({}, {}, '1+.')
def test_trailing_period(self):
"""
Test that things like '4.' will be 4 and not throw an error
"""
self.assertEqual(4.0, calc.evaluator({}, {}, '4.'))
def test_exponential_answer(self):
"""
Test for correct interpretation of scientific notation
"""
answer = 50
correct_responses = [
"50", "50.0", "5e1", "5e+1",
"50e0", "50.0e0", "500e-1"
]
incorrect_responses = ["", "3.9", "4.1", "0", "5.01e1"]
for input_str in correct_responses:
result = calc.evaluator({}, {}, input_str)
fail_msg = "Expected '{0}' to equal {1}".format(
input_str, answer
)
self.assertEqual(answer, result, msg=fail_msg)
for input_str in incorrect_responses:
result = calc.evaluator({}, {}, input_str)
fail_msg = "Expected '{0}' to not equal {1}".format(
input_str, answer
)
self.assertNotEqual(answer, result, msg=fail_msg)
def test_si_suffix(self):
"""
Test calc.py's unique functionality of interpreting si 'suffixes'.
For instance 'k' stand for 'kilo-' so '1k' should be 1,000
"""
test_mapping = [
('4.2%', 0.042), ('2.25k', 2250), ('8.3M', 8300000),
('9.9G', 9.9e9), ('1.2T', 1.2e12), ('7.4c', 0.074),
('5.4m', 0.0054), ('8.7u', 0.0000087),
('5.6n', 5.6e-9), ('4.2p', 4.2e-12)
]
for (expr, answer) in test_mapping:
tolerance = answer * 1e-6 # Make rel. tolerance, because of floats
fail_msg = "Failure in testing suffix '{0}': '{1}' was not {2}"
fail_msg = fail_msg.format(expr[-1], expr, answer)
self.assertAlmostEqual(
calc.evaluator({}, {}, expr), answer,
delta=tolerance, msg=fail_msg
)
def test_operator_sanity(self):
"""
Test for simple things like '5+2' and '5/2'
"""
var1 = 5.0
var2 = 2.0
operators = [('+', 7), ('-', 3), ('*', 10), ('/', 2.5), ('^', 25)]
for (operator, answer) in operators:
input_str = "{0} {1} {2}".format(var1, operator, var2)
result = calc.evaluator({}, {}, input_str)
fail_msg = "Failed on operator '{0}': '{1}' was not {2}".format(
operator, input_str, answer
)
self.assertEqual(answer, result, msg=fail_msg)
def test_raises_zero_division_err(self):
"""
Ensure division by zero gives an error
"""
with self.assertRaises(ZeroDivisionError):
calc.evaluator({}, {}, '1/0')
with self.assertRaises(ZeroDivisionError):
calc.evaluator({}, {}, '1/0.0')
with self.assertRaises(ZeroDivisionError):
calc.evaluator({'x': 0.0}, {}, '1/x')
def test_parallel_resistors(self):
"""
Test the parallel resistor operator ||
The formula is given by
a || b || c ...
= 1 / (1/a + 1/b + 1/c + ...)
It is the resistance of a parallel circuit of resistors with resistance
a, b, c, etc&. See if this evaulates correctly.
"""
self.assertEqual(calc.evaluator({}, {}, '1||1'), 0.5)
self.assertEqual(calc.evaluator({}, {}, '1||1||2'), 0.4)
self.assertEqual(calc.evaluator({}, {}, "j||1"), 0.5 + 0.5j)
def test_parallel_resistors_with_zero(self):
"""
Check the behavior of the || operator with 0
"""
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, '0||1')))
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, '0.0||1')))
self.assertTrue(numpy.isnan(calc.evaluator({'x': 0.0}, {}, 'x||1')))
def assert_function_values(self, fname, ins, outs, tolerance=1e-3):
"""
Helper function to test many values at once
Test the accuracy of evaluator's use of the function given by fname
Specifically, the equality of `fname(ins[i])` against outs[i].
This is used later to test a whole bunch of f(x) = y at a time
"""
for (arg, val) in zip(ins, outs):
input_str = "{0}({1})".format(fname, arg)
result = calc.evaluator({}, {}, input_str)
fail_msg = "Failed on function {0}: '{1}' was not {2}".format(
fname, input_str, val
)
self.assertAlmostEqual(val, result, delta=tolerance, msg=fail_msg)
def test_trig_functions(self):
"""
Test the trig functions provided in calc.py
which are: sin, cos, tan, arccos, arcsin, arctan
"""
angles = ['-pi/4', '0', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']
sin_values = [-0.707, 0, 0.5, 0.588, -0.707, 0.707, 1.298 + 0.635j]
cos_values = [0.707, 1, 0.866, 0.809, -0.707, 0.707, 0.834 - 0.989j]
tan_values = [-1, 0, 0.577, 0.727, 1, 1, 0.272 + 1.084j]
# Cannot test tan(pi/2) b/c pi/2 is a float and not precise...
self.assert_function_values('sin', angles, sin_values)
self.assert_function_values('cos', angles, cos_values)
self.assert_function_values('tan', angles, tan_values)
# Include those where the real part is between -pi/2 and pi/2
arcsin_inputs = ['-0.707', '0', '0.5', '0.588', '1.298 + 0.635*j']
arcsin_angles = [-0.785, 0, 0.524, 0.629, 1 + 1j]
self.assert_function_values('arcsin', arcsin_inputs, arcsin_angles)
# Rather than a complex number, numpy.arcsin gives nan
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(-1.1)')))
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arcsin(1.1)')))
# Include those where the real part is between 0 and pi
arccos_inputs = ['1', '0.866', '0.809', '0.834-0.989*j']
arccos_angles = [0, 0.524, 0.628, 1 + 1j]
self.assert_function_values('arccos', arccos_inputs, arccos_angles)
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(-1.1)')))
self.assertTrue(numpy.isnan(calc.evaluator({}, {}, 'arccos(1.1)')))
# Has the same range as arcsin
arctan_inputs = ['-1', '0', '0.577', '0.727', '0.272 + 1.084*j']
arctan_angles = arcsin_angles
self.assert_function_values('arctan', arctan_inputs, arctan_angles)
def test_reciprocal_trig_functions(self):
"""
Test the reciprocal trig functions provided in calc.py
which are: sec, csc, cot, arcsec, arccsc, arccot
"""
angles = ['-pi/4', 'pi/6', 'pi/5', '5*pi/4', '9*pi/4', '1 + j']
sec_values = [1.414, 1.155, 1.236, -1.414, 1.414, 0.498 + 0.591j]
csc_values = [-1.414, 2, 1.701, -1.414, 1.414, 0.622 - 0.304j]
cot_values = [-1, 1.732, 1.376, 1, 1, 0.218 - 0.868j]
self.assert_function_values('sec', angles, sec_values)
self.assert_function_values('csc', angles, csc_values)
self.assert_function_values('cot', angles, cot_values)
arcsec_inputs = ['1.1547', '1.2361', '2', '-2', '-1.4142', '0.4983+0.5911*j']
arcsec_angles = [0.524, 0.628, 1.047, 2.094, 2.356, 1 + 1j]
self.assert_function_values('arcsec', arcsec_inputs, arcsec_angles)
arccsc_inputs = ['-1.1547', '-1.4142', '2', '1.7013', '1.1547', '0.6215-0.3039*j']
arccsc_angles = [-1.047, -0.785, 0.524, 0.628, 1.047, 1 + 1j]
self.assert_function_values('arccsc', arccsc_inputs, arccsc_angles)
# Has the same range as arccsc
arccot_inputs = ['-0.5774', '-1', '1.7321', '1.3764', '0.5774', '(0.2176-0.868*j)']
arccot_angles = arccsc_angles
self.assert_function_values('arccot', arccot_inputs, arccot_angles)
def test_hyperbolic_functions(self):
"""
Test the hyperbolic functions
which are: sinh, cosh, tanh, sech, csch, coth
"""
inputs = ['0', '0.5', '1', '2', '1+j']
neg_inputs = ['0', '-0.5', '-1', '-2', '-1-j']
negate = lambda x: [-k for k in x]
# sinh is odd
sinh_vals = [0, 0.521, 1.175, 3.627, 0.635 + 1.298j]
self.assert_function_values('sinh', inputs, sinh_vals)
self.assert_function_values('sinh', neg_inputs, negate(sinh_vals))
# cosh is even - do not negate
cosh_vals = [1, 1.128, 1.543, 3.762, 0.834 + 0.989j]
self.assert_function_values('cosh', inputs, cosh_vals)
self.assert_function_values('cosh', neg_inputs, cosh_vals)
# tanh is odd
tanh_vals = [0, 0.462, 0.762, 0.964, 1.084 + 0.272j]
self.assert_function_values('tanh', inputs, tanh_vals)
self.assert_function_values('tanh', neg_inputs, negate(tanh_vals))
# sech is even - do not negate
sech_vals = [1, 0.887, 0.648, 0.266, 0.498 - 0.591j]
self.assert_function_values('sech', inputs, sech_vals)
self.assert_function_values('sech', neg_inputs, sech_vals)
# the following functions do not have 0 in their domain
inputs = inputs[1:]
neg_inputs = neg_inputs[1:]
# csch is odd
csch_vals = [1.919, 0.851, 0.276, 0.304 - 0.622j]
self.assert_function_values('csch', inputs, csch_vals)
self.assert_function_values('csch', neg_inputs, negate(csch_vals))
# coth is odd
coth_vals = [2.164, 1.313, 1.037, 0.868 - 0.218j]
self.assert_function_values('coth', inputs, coth_vals)
self.assert_function_values('coth', neg_inputs, negate(coth_vals))
def test_hyperbolic_inverses(self):
"""
Test the inverse hyperbolic functions
which are of the form arc[X]h
"""
results = [0, 0.5, 1, 2, 1 + 1j]
sinh_vals = ['0', '0.5211', '1.1752', '3.6269', '0.635+1.2985*j']
self.assert_function_values('arcsinh', sinh_vals, results)
cosh_vals = ['1', '1.1276', '1.5431', '3.7622', '0.8337+0.9889*j']
self.assert_function_values('arccosh', cosh_vals, results)
tanh_vals = ['0', '0.4621', '0.7616', '0.964', '1.0839+0.2718*j']
self.assert_function_values('arctanh', tanh_vals, results)
sech_vals = ['1.0', '0.8868', '0.6481', '0.2658', '0.4983-0.5911*j']
self.assert_function_values('arcsech', sech_vals, results)
results = results[1:]
csch_vals = ['1.919', '0.8509', '0.2757', '0.3039-0.6215*j']
self.assert_function_values('arccsch', csch_vals, results)
coth_vals = ['2.164', '1.313', '1.0373', '0.868-0.2176*j']
self.assert_function_values('arccoth', coth_vals, results)
def test_other_functions(self):
"""
Test the non-trig functions provided in calc.py
Specifically:
sqrt, log10, log2, ln, abs,
fact, factorial
"""
# Test sqrt
self.assert_function_values(
'sqrt',
[0, 1, 2, 1024], # -1
[0, 1, 1.414, 32] # 1j
)
# sqrt(-1) is NAN not j (!!).
# Test logs
self.assert_function_values(
'log10',
[0.1, 1, 3.162, 1000000, '1+j'],
[-1, 0, 0.5, 6, 0.151 + 0.341j]
)
self.assert_function_values(
'log2',
[0.5, 1, 1.414, 1024, '1+j'],
[-1, 0, 0.5, 10, 0.5 + 1.133j]
)
self.assert_function_values(
'ln',
[0.368, 1, 1.649, 2.718, 42, '1+j'],
[-1, 0, 0.5, 1, 3.738, 0.347 + 0.785j]
)
# Test abs
self.assert_function_values('abs', [-1, 0, 1, 'j'], [1, 0, 1, 1])
# Test factorial
fact_inputs = [0, 1, 3, 7]
fact_values = [1, 1, 6, 5040]
self.assert_function_values('fact', fact_inputs, fact_values)
self.assert_function_values('factorial', fact_inputs, fact_values)
self.assertRaises(ValueError, calc.evaluator, {}, {}, "fact(-1)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "fact(0.5)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "factorial(-1)")
self.assertRaises(ValueError, calc.evaluator, {}, {}, "factorial(0.5)")
def test_constants(self):
"""
Test the default constants provided in calc.py
which are: j (complex number), e, pi, k, c, T, q
"""
# Of the form ('expr', python value, tolerance (or None for exact))
default_variables = [
('i', 1j, None),
('j', 1j, None),
('e', 2.7183, 1e-4),
('pi', 3.1416, 1e-4),
('k', 1.3806488e-23, 1e-26), # Boltzmann constant (Joules/Kelvin)
('c', 2.998e8, 1e5), # Light Speed in (m/s)
('T', 298.15, 0.01), # Typical room temperature (Kelvin)
('q', 1.602176565e-19, 1e-22) # Fund. Charge (Coulombs)
]
for (variable, value, tolerance) in default_variables:
fail_msg = "Failed on constant '{0}', not within bounds".format(
variable
)
result = calc.evaluator({}, {}, variable)
if tolerance is None:
self.assertEqual(value, result, msg=fail_msg)
else:
self.assertAlmostEqual(
value, result,
delta=tolerance, msg=fail_msg
)
def test_complex_expression(self):
"""
Calculate combinations of operators and default functions
"""
self.assertAlmostEqual(
calc.evaluator({}, {}, "(2^2+1.0)/sqrt(5e0)*5-1"),
10.180,
delta=1e-3
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "1+1/(1+1/(1+1/(1+1)))"),
1.6,
delta=1e-3
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "10||sin(7+5)"),
-0.567, delta=0.01
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "sin(e)"),
0.41, delta=0.01
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "k*T/q"),
0.025, delta=1e-3
)
self.assertAlmostEqual(
calc.evaluator({}, {}, "e^(j*pi)"),
-1, delta=1e-5
)
def test_explicit_sci_notation(self):
"""
Expressions like 1.6*10^-3 (not 1.6e-3) it should evaluate.
"""
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^-3"),
-0.0016
)
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^(-3)"),
-0.0016
)
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^3"),
-1600
)
self.assertEqual(
calc.evaluator({}, {}, "-1.6*10^(3)"),
-1600
)
def test_simple_vars(self):
"""
Substitution of variables into simple equations
"""
variables = {'x': 9.72, 'y': 7.91, 'loooooong': 6.4}
# Should not change value of constant
# even with different numbers of variables...
self.assertEqual(calc.evaluator({'x': 9.72}, {}, '13'), 13)
self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, '13'), 13)
self.assertEqual(calc.evaluator(variables, {}, '13'), 13)
# Easy evaluation
self.assertEqual(calc.evaluator(variables, {}, 'x'), 9.72)
self.assertEqual(calc.evaluator(variables, {}, 'y'), 7.91)
self.assertEqual(calc.evaluator(variables, {}, 'loooooong'), 6.4)
# Test a simple equation
self.assertAlmostEqual(
calc.evaluator(variables, {}, '3*x-y'),
21.25, delta=0.01 # = 3 * 9.72 - 7.91
)
self.assertAlmostEqual(
calc.evaluator(variables, {}, 'x*y'),
76.89, delta=0.01
)
self.assertEqual(calc.evaluator({'x': 9.72, 'y': 7.91}, {}, "13"), 13)
self.assertEqual(calc.evaluator(variables, {}, "13"), 13)
self.assertEqual(
calc.evaluator(
{'a': 2.2997471478310274, 'k': 9, 'm': 8, 'x': 0.6600949841121},
{}, "5"
),
5
)
def test_variable_case_sensitivity(self):
"""
Test the case sensitivity flag and corresponding behavior
"""
self.assertEqual(
calc.evaluator({'R1': 2.0, 'R3': 4.0}, {}, "r1*r3"),
8.0
)
variables = {'t': 1.0}
self.assertEqual(calc.evaluator(variables, {}, "t"), 1.0)
self.assertEqual(calc.evaluator(variables, {}, "T"), 1.0)
self.assertEqual(
calc.evaluator(variables, {}, "t", case_sensitive=True),
1.0
)
# Recall 'T' is a default constant, with value 298.15
self.assertAlmostEqual(
calc.evaluator(variables, {}, "T", case_sensitive=True),
298, delta=0.2
)
def test_simple_funcs(self):
"""
Subsitution of custom functions
"""
variables = {'x': 4.712}
functions = {'id': lambda x: x}
self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)
self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)
self.assertEqual(calc.evaluator(variables, functions, 'id(x)'), 4.712)
functions.update({'f': numpy.sin})
self.assertAlmostEqual(
calc.evaluator(variables, functions, 'f(x)'),
-1, delta=1e-3
)
def test_function_case_insensitive(self):
"""
Test case insensitive evaluation
Normal functions with some capitals should be fine
"""
self.assertAlmostEqual(
-0.28,
calc.evaluator({}, {}, 'SiN(6)', case_sensitive=False),
delta=1e-3
)
def test_function_case_sensitive(self):
"""
Test case sensitive evaluation
Incorrectly capitilized should fail
Also, it should pick the correct version of a function.
"""
with self.assertRaisesRegexp(calc.UndefinedVariable, 'SiN'):
calc.evaluator({}, {}, 'SiN(6)', case_sensitive=True)
# With case sensitive turned on, it should pick the right function
functions = {'f': lambda x: x, 'F': lambda x: x + 1}
self.assertEqual(
6, calc.evaluator({}, functions, 'f(6)', case_sensitive=True)
)
self.assertEqual(
7, calc.evaluator({}, functions, 'F(6)', case_sensitive=True)
)
def test_undefined_vars(self):
"""
Check to see if the evaluator catches undefined variables
"""
variables = {'R1': 2.0, 'R3': 4.0}
with self.assertRaisesRegexp(calc.UndefinedVariable, 'QWSEKO'):
calc.evaluator({}, {}, "5+7*QWSEKO")
with self.assertRaisesRegexp(calc.UndefinedVariable, 'r2'):
calc.evaluator({'r1': 5}, {}, "r1+r2")
with self.assertRaisesRegexp(calc.UndefinedVariable, 'r1 r3'):
calc.evaluator(variables, {}, "r1*r3", case_sensitive=True)
|
agpl-3.0
|
mcgachey/edx-platform
|
common/lib/xmodule/xmodule/modulestore/store_utilities.py
|
124
|
4490
|
import re
import logging
from collections import namedtuple
import uuid
def _prefix_only_url_replace_regex(pattern):
"""
Match urls in quotes pulling out the fields from pattern
"""
return re.compile(ur"""
(?x) # flags=re.VERBOSE
(?P<quote>\\?['"]) # the opening quotes
{}
(?P=quote) # the first matching closing quote
""".format(pattern))
def rewrite_nonportable_content_links(source_course_id, dest_course_id, text):
"""
rewrite any non-portable links to (->) relative links:
/c4x/<org>/<course>/asset/<name> -> /static/<name>
/jump_to/i4x://<org>/<course>/<category>/<name> -> /jump_to_id/<id>
"""
def portable_asset_link_subtitution(match):
quote = match.group('quote')
block_id = match.group('block_id')
return quote + '/static/' + block_id + quote
def portable_jump_to_link_substitution(match):
quote = match.group('quote')
rest = match.group('block_id')
return quote + '/jump_to_id/' + rest + quote
# if something blows up, log the error and continue
# create a serialized template for what the id will look like in the source_course but with
# the block_id as a regex pattern
placeholder_id = uuid.uuid4().hex
asset_block_pattern = unicode(source_course_id.make_asset_key('asset', placeholder_id))
asset_block_pattern = asset_block_pattern.replace(placeholder_id, r'(?P<block_id>.*?)')
try:
text = _prefix_only_url_replace_regex(asset_block_pattern).sub(portable_asset_link_subtitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", asset_block_pattern, text, str(exc))
placeholder_category = 'cat_{}'.format(uuid.uuid4().hex)
usage_block_pattern = unicode(source_course_id.make_usage_key(placeholder_category, placeholder_id))
usage_block_pattern = usage_block_pattern.replace(placeholder_category, r'(?P<category>[^/+@]+)')
usage_block_pattern = usage_block_pattern.replace(placeholder_id, r'(?P<block_id>.*?)')
jump_to_link_base = ur'/courses/{course_key_string}/jump_to/{usage_key_string}'.format(
course_key_string=unicode(source_course_id), usage_key_string=usage_block_pattern
)
try:
text = _prefix_only_url_replace_regex(jump_to_link_base).sub(portable_jump_to_link_substitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", jump_to_link_base, text, str(exc))
# Also, there commonly is a set of link URL's used in the format:
# /courses/<org>/<course>/<name> which will be broken if migrated to a different course_id
# so let's rewrite those, but the target will also be non-portable,
#
# Note: we only need to do this if we are changing course-id's
#
if source_course_id != dest_course_id:
try:
generic_courseware_link_base = u'/courses/{}/'.format(unicode(source_course_id))
text = re.sub(_prefix_only_url_replace_regex(generic_courseware_link_base), portable_asset_link_subtitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", source_course_id, text, str(exc))
return text
def draft_node_constructor(module, url, parent_url, location=None, parent_location=None, index=None):
"""
Contructs a draft_node namedtuple with defaults.
"""
draft_node = namedtuple('draft_node', ['module', 'location', 'url', 'parent_location', 'parent_url', 'index'])
return draft_node(module, location, url, parent_location, parent_url, index)
def get_draft_subtree_roots(draft_nodes):
"""
Takes a list of draft_nodes, which are namedtuples, each of which identify
itself and its parent.
If a draft_node is in `draft_nodes`, then we expect for all its children
should be in `draft_nodes` as well. Since `_import_draft` is recursive,
we only want to import the roots of any draft subtrees contained in
`draft_nodes`.
This generator yields those roots.
"""
urls = [draft_node.url for draft_node in draft_nodes]
for draft_node in draft_nodes:
if draft_node.parent_url not in urls:
yield draft_node
|
agpl-3.0
|
chrwi/ardupilot
|
Tools/LogAnalyzer/DataflashLog.py
|
83
|
27803
|
#
# Code to abstract the parsing of APM Dataflash log files, currently only used by the LogAnalyzer
#
# Initial code by Andrew Chapman ([email protected]), 16th Jan 2014
#
from __future__ import print_function
import collections
import os
import numpy
import bisect
import sys
import ctypes
class Format(object):
'''Data channel format as specified by the FMT lines in the log file'''
def __init__(self,msgType,msgLen,name,types,labels):
self.NAME = 'FMT'
self.msgType = msgType
self.msgLen = msgLen
self.name = name
self.types = types
self.labels = labels.split(',')
def __str__(self):
return "%8s %s" % (self.name, `self.labels`)
@staticmethod
def trycastToFormatType(value,valueType):
'''using format characters from libraries/DataFlash/DataFlash.h to cast strings to basic python int/float/string types
tries a cast, if it does not work, well, acceptable as the text logs do not match the format, e.g. MODE is expected to be int'''
try:
if valueType in "fcCeEL":
return float(value)
elif valueType in "bBhHiIM":
return int(value)
elif valueType in "nNZ":
return str(value)
except:
pass
return value
def to_class(self):
members = dict(
NAME = self.name,
labels = self.labels[:],
)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels[:]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
p = property(lambda x:getattr(x, attributename),
lambda x, v:setattr(x,attributename, Format.trycastToFormatType(v,format)))
members[propertyname] = p
members[attributename] = None
createproperty(label, _type)
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,'_'+k)) for k in x.labels]))
def init(a, *x):
if len(x) != len(a.labels):
raise ValueError("Invalid Length")
#print(list(zip(a.labels, x)))
for (l,v) in zip(a.labels, x):
try:
setattr(a, l, v)
except Exception as e:
print("{} {} {} failed".format(a,l,v))
print(e)
members['__init__'] = init
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(object,),
members
)
#print(members)
return cls
class logheader(ctypes.LittleEndianStructure):
_fields_ = [ \
('head1', ctypes.c_uint8),
('head2', ctypes.c_uint8),
('msgid', ctypes.c_uint8),
]
def __repr__(self):
return "<logheader head1=0x{self.head1:x} head2=0x{self.head2:x} msgid=0x{self.msgid:x} ({self.msgid})>".format(self=self)
class BinaryFormat(ctypes.LittleEndianStructure):
NAME = 'FMT'
MSG = 128
SIZE = 0
FIELD_FORMAT = {
'b': ctypes.c_int8,
'B': ctypes.c_uint8,
'h': ctypes.c_int16,
'H': ctypes.c_uint16,
'i': ctypes.c_int32,
'I': ctypes.c_uint32,
'f': ctypes.c_float,
'd': ctypes.c_double,
'n': ctypes.c_char * 4,
'N': ctypes.c_char * 16,
'Z': ctypes.c_char * 64,
'c': ctypes.c_int16,# * 100,
'C': ctypes.c_uint16,# * 100,
'e': ctypes.c_int32,# * 100,
'E': ctypes.c_uint32,# * 100,
'L': ctypes.c_int32,
'M': ctypes.c_uint8,
'q': ctypes.c_int64,
'Q': ctypes.c_uint64,
}
FIELD_SCALE = {
'c': 100,
'C': 100,
'e': 100,
'E': 100,
}
_packed_ = True
_fields_ = [ \
('head', logheader),
('type', ctypes.c_uint8),
('length', ctypes.c_uint8),
('name', ctypes.c_char * 4),
('types', ctypes.c_char * 16),
('labels', ctypes.c_char * 64),
]
def __repr__(self):
return "<{cls} {data}>".format(cls=self.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(self,k)) for (k,_) in self._fields_[1:]]))
def to_class(self):
members = dict(
NAME = self.name,
MSG = self.type,
SIZE = self.length,
labels = self.labels.split(",") if self.labels else [],
_pack_ = True)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels.split(",")
if self.labels and (len(fieldtypes) != len(fieldlabels)):
print("Broken FMT message for {} .. ignoring".format(self.name), file=sys.stderr)
return None
fields = [('head',logheader)]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
scale = BinaryFormat.FIELD_SCALE.get(format, None)
p = property(lambda x:getattr(x, attributename))
if scale is not None:
p = property(lambda x:getattr(x, attributename) / scale)
members[propertyname] = p
try:
fields.append((attributename, BinaryFormat.FIELD_FORMAT[format]))
except KeyError:
print('ERROR: Failed to add FMT type: {}, with format: {}'.format(attributename, format))
raise
createproperty(label, _type)
members['_fields_'] = fields
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,k)) for k in x.labels]))
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(ctypes.LittleEndianStructure,),
members
)
if ctypes.sizeof(cls) != cls.SIZE:
print("size mismatch for {} expected {} got {}".format(cls, ctypes.sizeof(cls), cls.SIZE), file=sys.stderr)
# for i in cls.labels:
# print("{} = {}".format(i,getattr(cls,'_'+i)))
return None
return cls
BinaryFormat.SIZE = ctypes.sizeof(BinaryFormat)
class Channel(object):
'''storage for a single stream of data, i.e. all GPS.RelAlt values'''
# TODO: rethink data storage, but do more thorough regression testing before refactoring it
# TODO: store data as a scipy spline curve so we can more easily interpolate and sample the slope?
def __init__(self):
self.dictData = {} # dict of linenum->value # store dupe data in dict and list for now, until we decide which is the better way to go
self.listData = [] # list of (linenum,value) # store dupe data in dict and list for now, until we decide which is the better way to go
def getSegment(self, startLine, endLine):
'''returns a segment of this data (from startLine to endLine, inclusive) as a new Channel instance'''
segment = Channel()
segment.dictData = {k:v for k,v in self.dictData.iteritems() if k >= startLine and k <= endLine}
return segment
def min(self):
return min(self.dictData.values())
def max(self):
return max(self.dictData.values())
def avg(self):
return numpy.mean(self.dictData.values())
def getNearestValueFwd(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
while index<len(self.listData):
line = self.listData[index][0]
#print "Looking forwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line >= lineNumber:
return (self.listData[index][1],line)
index += 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValueBack(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999)) - 1
while index>=0:
line = self.listData[index][0]
#print "Looking backwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line <= lineNumber:
return (self.listData[index][1],line)
index -= 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValue(self, lineNumber, lookForwards=True):
'''find the nearest data value to the given lineNumber, defaults to first looking forwards. Returns (value,lineNumber)'''
if lookForwards:
try:
return self.getNearestValueFwd(lineNumber)
except:
return self.getNearestValueBack(lineNumber)
else:
try:
return self.getNearestValueBack(lineNumber)
except:
return self.getNearestValueFwd(lineNumber)
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getInterpolatedValue(self, lineNumber):
(prevValue,prevValueLine) = self.getNearestValue(lineNumber, lookForwards=False)
(nextValue,nextValueLine) = self.getNearestValue(lineNumber, lookForwards=True)
if prevValueLine == nextValueLine:
return prevValue
weight = (lineNumber-prevValueLine) / float(nextValueLine-prevValueLine)
return ((weight*prevValue) + ((1-weight)*nextValue))
def getIndexOf(self, lineNumber):
'''returns the index within this channel's listData of the given lineNumber, or raises an Exception if not found'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
#print "INDEX of line %d: %d" % (lineNumber,index)
#print "self.listData[index][0]: %d" % self.listData[index][0]
if (self.listData[index][0] == lineNumber):
return index
else:
raise Exception("Error finding index for line %d" % lineNumber)
class LogIterator:
'''Smart iterator that can move through a log by line number and maintain an index into the nearest values of all data channels'''
# TODO: LogIterator currently indexes the next available value rather than the nearest value, we should make it configurable between next/nearest
class LogIteratorSubValue:
'''syntactic sugar to allow access by LogIterator[lineLabel][dataLabel]'''
logdata = None
iterators = None
lineLabel = None
def __init__(self, logdata, iterators, lineLabel):
self.logdata = logdata
self.lineLabel = lineLabel
self.iterators = iterators
def __getitem__(self, dataLabel):
index = self.iterators[self.lineLabel][0]
return self.logdata.channels[self.lineLabel][dataLabel].listData[index][1]
iterators = {} # lineLabel -> (listIndex,lineNumber)
logdata = None
currentLine = None
def __init__(self, logdata, lineNumber=0):
self.logdata = logdata
self.currentLine = lineNumber
for lineLabel in self.logdata.formats:
if lineLabel in self.logdata.channels:
self.iterators[lineLabel] = ()
self.jump(lineNumber)
def __iter__(self):
return self
def __getitem__(self, lineLabel):
return LogIterator.LogIteratorSubValue(self.logdata, self.iterators, lineLabel)
def next(self):
'''increment iterator to next log line'''
self.currentLine += 1
if self.currentLine > self.logdata.lineCount:
return self
for lineLabel in self.iterators.keys():
# check if the currentLine has gone past our the line we're pointing to for this type of data
dataLabel = self.logdata.formats[lineLabel].labels[0]
(index, lineNumber) = self.iterators[lineLabel]
# if so, and it is not the last entry in the log, then increment the indices for all dataLabels under that lineLabel
if (self.currentLine > lineNumber) and (index < len(self.logdata.channels[lineLabel][dataLabel].listData)-1):
index += 1
lineNumber = self.logdata.channels[lineLabel][dataLabel].listData[index][0]
self.iterators[lineLabel] = (index,lineNumber)
return self
def jump(self, lineNumber):
'''jump iterator to specified log line'''
self.currentLine = lineNumber
for lineLabel in self.iterators.keys():
dataLabel = self.logdata.formats[lineLabel].labels[0]
(value,lineNumber) = self.logdata.channels[lineLabel][dataLabel].getNearestValue(self.currentLine)
self.iterators[lineLabel] = (self.logdata.channels[lineLabel][dataLabel].getIndexOf(lineNumber), lineNumber)
class DataflashLogHelper:
'''helper functions for dealing with log data, put here to keep DataflashLog class as a simple parser and data store'''
@staticmethod
def getTimeAtLine(logdata, lineNumber):
'''returns the nearest GPS timestamp in milliseconds after the given line number'''
if not "GPS" in logdata.channels:
raise Exception("no GPS log data found")
# older logs use 'TIme', newer logs use 'TimeMS'
timeLabel = "TimeMS"
if "Time" in logdata.channels["GPS"]:
timeLabel = "Time"
while lineNumber <= logdata.lineCount:
if lineNumber in logdata.channels["GPS"][timeLabel].dictData:
return logdata.channels["GPS"][timeLabel].dictData[lineNumber]
lineNumber = lineNumber + 1
sys.stderr.write("didn't find GPS data for " + str(lineNumber) + " - using maxtime\n")
return logdata.channels["GPS"][timeLabel].max()
@staticmethod
def findLoiterChunks(logdata, minLengthSeconds=0, noRCInputs=True):
'''returns a list of (to,from) pairs defining sections of the log which are in loiter mode. Ordered from longest to shortest in time. If noRCInputs == True it only returns chunks with no control inputs'''
# TODO: implement noRCInputs handling when identifying stable loiter chunks, for now we're ignoring it
def chunkSizeCompare(chunk1, chunk2):
chunk1Len = chunk1[1]-chunk1[0]
chunk2Len = chunk2[1]-chunk2[0]
if chunk1Len == chunk2Len:
return 0
elif chunk1Len > chunk2Len:
return -1
else:
return 1
od = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
chunks = []
for i in range(len(od.keys())):
if od.values()[i][0] == "LOITER":
startLine = od.keys()[i]
endLine = None
if i == len(od.keys())-1:
endLine = logdata.lineCount
else:
endLine = od.keys()[i+1]-1
chunkTimeSeconds = (DataflashLogHelper.getTimeAtLine(logdata,endLine)-DataflashLogHelper.getTimeAtLine(logdata,startLine)+1) / 1000.0
if chunkTimeSeconds > minLengthSeconds:
chunks.append((startLine,endLine))
#print "LOITER chunk: %d to %d, %d lines" % (startLine,endLine,endLine-startLine+1)
#print " (time %d to %d, %d seconds)" % (DataflashLogHelper.getTimeAtLine(logdata,startLine), DataflashLogHelper.getTimeAtLine(logdata,endLine), chunkTimeSeconds)
chunks.sort(chunkSizeCompare)
return chunks
@staticmethod
def isLogEmpty(logdata):
'''returns an human readable error string if the log is essentially empty, otherwise returns None'''
# naive check for now, see if the throttle output was ever above 20%
throttleThreshold = 20
if logdata.vehicleType == "ArduCopter":
throttleThreshold = 200 # copter uses 0-1000, plane+rover use 0-100
if "CTUN" in logdata.channels:
maxThrottle = logdata.channels["CTUN"]["ThrOut"].max()
if maxThrottle < throttleThreshold:
return "Throttle never above 20%"
return None
class DataflashLog(object):
'''APM Dataflash log file reader and container class. Keep this simple, add more advanced or specific functions to DataflashLogHelper class'''
knownHardwareTypes = ["APM", "PX4", "MPNG"]
intTypes = "bBhHiIM"
floatTypes = "fcCeEL"
charTypes = "nNZ"
def __init__(self, logfile=None, format="auto", ignoreBadlines=False):
self.filename = None
self.vehicleType = "" # ArduCopter, ArduPlane, ArduRover, etc, verbatim as given by header
self.firmwareVersion = ""
self.firmwareHash = ""
self.freeRAM = 0
self.hardwareType = "" # APM 1, APM 2, PX4, MPNG, etc What is VRBrain? BeagleBone, etc? Needs more testing
self.formats = {} # name -> Format
self.parameters = {} # token -> value
self.messages = {} # lineNum -> message
self.modeChanges = {} # lineNum -> (mode,value)
self.channels = {} # lineLabel -> {dataLabel:Channel}
self.filesizeKB = 0
self.durationSecs = 0
self.lineCount = 0
self.skippedLines = 0
if logfile:
self.read(logfile, format, ignoreBadlines)
def getCopterType(self):
'''returns quad/hex/octo/tradheli if this is a copter log'''
if self.vehicleType != "ArduCopter":
return None
motLabels = []
if "MOT" in self.formats: # not listed in PX4 log header for some reason?
motLabels = self.formats["MOT"].labels
if "GGain" in motLabels:
return "tradheli"
elif len(motLabels) == 4:
return "quad"
elif len(motLabels) == 6:
return "hex"
elif len(motLabels) == 8:
return "octo"
else:
return ""
def read(self, logfile, format="auto", ignoreBadlines=False):
'''returns on successful log read (including bad lines if ignoreBadlines==True), will throw an Exception otherwise'''
# TODO: dataflash log parsing code is pretty hacky, should re-write more methodically
self.filename = logfile
if self.filename == '<stdin>':
f = sys.stdin
else:
f = open(self.filename, 'r')
if format == 'bin':
head = '\xa3\x95\x80\x80'
elif format == 'log':
head = ""
elif format == 'auto':
if self.filename == '<stdin>':
# assuming TXT format
# raise ValueError("Invalid log format for stdin: {}".format(format))
head = ""
else:
head = f.read(4)
f.seek(0)
else:
raise ValueError("Unknown log format for {}: {}".format(self.filename, format))
if head == '\xa3\x95\x80\x80':
numBytes, lineNumber = self.read_binary(f, ignoreBadlines)
pass
else:
numBytes, lineNumber = self.read_text(f, ignoreBadlines)
# gather some general stats about the log
self.lineCount = lineNumber
self.filesizeKB = numBytes / 1024.0
# TODO: switch duration calculation to use TimeMS values rather than GPS timestemp
if "GPS" in self.channels:
# the GPS time label changed at some point, need to handle both
timeLabel = None
for i in 'TimeMS','TimeUS','Time':
if i in self.channels["GPS"]:
timeLabel = i
break
firstTimeGPS = self.channels["GPS"][timeLabel].listData[0][1]
lastTimeGPS = self.channels["GPS"][timeLabel].listData[-1][1]
if timeLabel == 'TimeUS':
firstTimeGPS /= 1000
lastTimeGPS /= 1000
self.durationSecs = (lastTimeGPS-firstTimeGPS) / 1000
# TODO: calculate logging rate based on timestamps
# ...
def process(self, lineNumber, e):
if e.NAME == 'FMT':
cls = e.to_class()
if cls is not None: # FMT messages can be broken ...
if hasattr(e, 'type') and e.type not in self._formats: # binary log specific
self._formats[e.type] = cls
if cls.NAME not in self.formats:
self.formats[cls.NAME] = cls
elif e.NAME == "PARM":
self.parameters[e.Name] = e.Value
elif e.NAME == "MSG":
if not self.vehicleType:
tokens = e.Message.split(' ')
vehicleTypes = ["ArduPlane", "ArduCopter", "ArduRover"]
self.vehicleType = tokens[0]
self.firmwareVersion = tokens[1]
if len(tokens) == 3:
self.firmwareHash = tokens[2][1:-1]
else:
self.messages[lineNumber] = e.Message
elif e.NAME == "MODE":
if self.vehicleType in ["ArduCopter"]:
try:
modes = {0:'STABILIZE',
1:'ACRO',
2:'ALT_HOLD',
3:'AUTO',
4:'GUIDED',
5:'LOITER',
6:'RTL',
7:'CIRCLE',
9:'LAND',
10:'OF_LOITER',
11:'DRIFT',
13:'SPORT',
14:'FLIP',
15:'AUTOTUNE',
16:'HYBRID',}
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ThrCrs)
except:
self.modeChanges[lineNumber] = (e.Mode, e.ThrCrs)
elif self.vehicleType in ["ArduPlane", "APM:Plane", "ArduRover", "APM:Rover", "APM:Copter"]:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
else:
raise Exception("Unknown log type for MODE line {} {}".format(self.vehicleType, repr(e)))
# anything else must be the log data
else:
groupName = e.NAME
# first time seeing this type of log line, create the channel storage
if not groupName in self.channels:
self.channels[groupName] = {}
for label in e.labels:
self.channels[groupName][label] = Channel()
# store each token in its relevant channel
for label in e.labels:
value = getattr(e, label)
channel = self.channels[groupName][label]
channel.dictData[lineNumber] = value
channel.listData.append((lineNumber, value))
def read_text(self, f, ignoreBadlines):
self.formats = {'FMT':Format}
lineNumber = 0
numBytes = 0
knownHardwareTypes = ["APM", "PX4", "MPNG"]
for line in f:
lineNumber = lineNumber + 1
numBytes += len(line) + 1
try:
#print "Reading line: %d" % lineNumber
line = line.strip('\n\r')
tokens = line.split(', ')
# first handle the log header lines
if line == " Ready to drive." or line == " Ready to FLY.":
continue
if line == "----------------------------------------": # present in pre-3.0 logs
raise Exception("Log file seems to be in the older format (prior to self-describing logs), which isn't supported")
if len(tokens) == 1:
tokens2 = line.split(' ')
if line == "":
pass
elif len(tokens2) == 1 and tokens2[0].isdigit(): # log index
pass
elif len(tokens2) == 3 and tokens2[0] == "Free" and tokens2[1] == "RAM:":
self.freeRAM = int(tokens2[2])
elif tokens2[0] in knownHardwareTypes:
self.hardwareType = line # not sure if we can parse this more usefully, for now only need to report it back verbatim
elif (len(tokens2) == 2 or len(tokens2) == 3) and tokens2[1][0].lower() == "v": # e.g. ArduCopter V3.1 (5c6503e2)
self.vehicleType = tokens2[0]
self.firmwareVersion = tokens2[1]
if len(tokens2) == 3:
self.firmwareHash = tokens2[2][1:-1]
else:
errorMsg = "Error parsing line %d of log file: %s" % (lineNumber, self.filename)
if ignoreBadlines:
print(errorMsg + " (skipping line)", file=sys.stderr)
self.skippedLines += 1
else:
raise Exception("")
else:
if not tokens[0] in self.formats:
raise ValueError("Unknown Format {}".format(tokens[0]))
e = self.formats[tokens[0]](*tokens[1:])
self.process(lineNumber, e)
except Exception as e:
print("BAD LINE: " + line, file=sys.stderr)
if not ignoreBadlines:
raise Exception("Error parsing line %d of log file %s - %s" % (lineNumber,self.filename,e.args[0]))
return (numBytes,lineNumber)
def read_binary(self, f, ignoreBadlines):
lineNumber = 0
numBytes = 0
for e in self._read_binary(f, ignoreBadlines):
lineNumber += 1
if e is None:
continue
numBytes += e.SIZE
# print(e)
self.process(lineNumber, e)
return (numBytes,lineNumber)
def _read_binary(self, f, ignoreBadlines):
self._formats = {128:BinaryFormat}
data = bytearray(f.read())
offset = 0
while len(data) > offset + ctypes.sizeof(logheader):
h = logheader.from_buffer(data, offset)
if not (h.head1 == 0xa3 and h.head2 == 0x95):
if ignoreBadlines == False:
raise ValueError(h)
else:
if h.head1 == 0xff and h.head2 == 0xff and h.msgid == 0xff:
print("Assuming EOF due to dataflash block tail filled with \\xff... (offset={off})".format(off=offset), file=sys.stderr)
break
if h.msgid in self._formats:
typ = self._formats[h.msgid]
if len(data) <= offset + typ.SIZE:
break
try:
e = typ.from_buffer(data, offset)
except:
print("data:{} offset:{} size:{} sizeof:{} sum:{}".format(len(data),offset,typ.SIZE,ctypes.sizeof(typ),offset+typ.SIZE))
raise
offset += typ.SIZE
else:
raise ValueError(str(h) + "unknown type")
yield e
|
gpl-3.0
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/broker/v2_6_0/attribute_doc_broker.py
|
17
|
45632
|
from ..broker import Broker
class AttributeDocBroker(Broker):
controller = "attribute_docs"
def show(self, **kwargs):
"""Shows the details for the specified attribute doc.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this attribute.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return attribute_doc: The attribute doc identified by the specified id.
:rtype attribute_doc: AttributeDoc
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available attribute docs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this attribute.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this attribute.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, model_doc_id, attribute, description, created_at, updated_at, gui_type, method_ind, hidden_ind, name, default_sort_desc_ind, default_column_ind, return_model_doc_id, method_param_list.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AttributeDoc. Valid values are id, model_doc_id, attribute, description, created_at, updated_at, gui_type, method_ind, hidden_ind, name, default_sort_desc_ind, default_column_ind, return_model_doc_id, method_param_list. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return attribute_docs: An array of the AttributeDoc objects that match the specified input criteria.
:rtype attribute_docs: Array of AttributeDoc
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available attribute docs matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param attribute: Attribute name for this record.
:type attribute: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param attribute: Attribute name for this record.
:type attribute: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param default_column_ind: A flag indicating if this is default column/
:type default_column_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param default_column_ind: A flag indicating if this is default column/
:type default_column_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param default_sort_desc_ind: A flag indicating if default sort order is descending.
:type default_sort_desc_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param default_sort_desc_ind: A flag indicating if default sort order is descending.
:type default_sort_desc_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param description: Attribute description
:type description: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: Attribute description
:type description: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param gui_type: Data type used in GUI to handle the attribute.
:type gui_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param gui_type: Data type used in GUI to handle the attribute.
:type gui_type: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param hidden_ind: A flag indicating if attribute is hidden
:type hidden_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param hidden_ind: A flag indicating if attribute is hidden
:type hidden_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this attribute.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this attribute.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param method_ind: A flag indicating if attribute is method
:type method_ind: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param method_ind: A flag indicating if attribute is method
:type method_ind: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param method_param_list: Parameter list for method
:type method_param_list: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param method_param_list: Parameter list for method
:type method_param_list: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param model_doc_id: The internal NetMRI identifier of Model the attribute is assigned to.
:type model_doc_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param model_doc_id: The internal NetMRI identifier of Model the attribute is assigned to.
:type model_doc_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param name: Human readable attribute name.
:type name: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: Human readable attribute name.
:type name: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param return_model_doc_id: The internal NetMRI identifier of Model returned by method.
:type return_model_doc_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param return_model_doc_id: The internal NetMRI identifier of Model returned by method.
:type return_model_doc_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, model_doc_id, attribute, description, created_at, updated_at, gui_type, method_ind, hidden_ind, name, default_sort_desc_ind, default_column_ind, return_model_doc_id, method_param_list.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AttributeDoc. Valid values are id, model_doc_id, attribute, description, created_at, updated_at, gui_type, method_ind, hidden_ind, name, default_sort_desc_ind, default_column_ind, return_model_doc_id, method_param_list. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against attribute docs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: attribute, created_at, default_column_ind, default_sort_desc_ind, description, gui_type, hidden_ind, id, method_ind, method_param_list, model_doc_id, name, return_model_doc_id, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return attribute_docs: An array of the AttributeDoc objects that match the specified input criteria.
:rtype attribute_docs: Array of AttributeDoc
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available attribute docs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: attribute, created_at, default_column_ind, default_sort_desc_ind, description, gui_type, hidden_ind, id, method_ind, method_param_list, model_doc_id, name, return_model_doc_id, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_attribute: The operator to apply to the field attribute. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. attribute: Attribute name for this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_attribute: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_attribute: If op_attribute is specified, the field named in this input will be compared to the value in attribute using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_attribute must be specified if op_attribute is specified.
:type val_f_attribute: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_attribute: If op_attribute is specified, this value will be compared to the value in attribute using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_attribute must be specified if op_attribute is specified.
:type val_c_attribute: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_default_column_ind: The operator to apply to the field default_column_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. default_column_ind: A flag indicating if this is default column/ For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_default_column_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_default_column_ind: If op_default_column_ind is specified, the field named in this input will be compared to the value in default_column_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_default_column_ind must be specified if op_default_column_ind is specified.
:type val_f_default_column_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_default_column_ind: If op_default_column_ind is specified, this value will be compared to the value in default_column_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_default_column_ind must be specified if op_default_column_ind is specified.
:type val_c_default_column_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_default_sort_desc_ind: The operator to apply to the field default_sort_desc_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. default_sort_desc_ind: A flag indicating if default sort order is descending. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_default_sort_desc_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_default_sort_desc_ind: If op_default_sort_desc_ind is specified, the field named in this input will be compared to the value in default_sort_desc_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_default_sort_desc_ind must be specified if op_default_sort_desc_ind is specified.
:type val_f_default_sort_desc_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_default_sort_desc_ind: If op_default_sort_desc_ind is specified, this value will be compared to the value in default_sort_desc_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_default_sort_desc_ind must be specified if op_default_sort_desc_ind is specified.
:type val_c_default_sort_desc_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: Attribute description For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_gui_type: The operator to apply to the field gui_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. gui_type: Data type used in GUI to handle the attribute. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_gui_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_gui_type: If op_gui_type is specified, the field named in this input will be compared to the value in gui_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_gui_type must be specified if op_gui_type is specified.
:type val_f_gui_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_gui_type: If op_gui_type is specified, this value will be compared to the value in gui_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_gui_type must be specified if op_gui_type is specified.
:type val_c_gui_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_hidden_ind: The operator to apply to the field hidden_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. hidden_ind: A flag indicating if attribute is hidden For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_hidden_ind: If op_hidden_ind is specified, the field named in this input will be compared to the value in hidden_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_hidden_ind must be specified if op_hidden_ind is specified.
:type val_f_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_hidden_ind: If op_hidden_ind is specified, this value will be compared to the value in hidden_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_hidden_ind must be specified if op_hidden_ind is specified.
:type val_c_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this attribute. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_method_ind: The operator to apply to the field method_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. method_ind: A flag indicating if attribute is method For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_method_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_method_ind: If op_method_ind is specified, the field named in this input will be compared to the value in method_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_method_ind must be specified if op_method_ind is specified.
:type val_f_method_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_method_ind: If op_method_ind is specified, this value will be compared to the value in method_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_method_ind must be specified if op_method_ind is specified.
:type val_c_method_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_method_param_list: The operator to apply to the field method_param_list. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. method_param_list: Parameter list for method For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_method_param_list: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_method_param_list: If op_method_param_list is specified, the field named in this input will be compared to the value in method_param_list using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_method_param_list must be specified if op_method_param_list is specified.
:type val_f_method_param_list: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_method_param_list: If op_method_param_list is specified, this value will be compared to the value in method_param_list using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_method_param_list must be specified if op_method_param_list is specified.
:type val_c_method_param_list: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_model_doc_id: The operator to apply to the field model_doc_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. model_doc_id: The internal NetMRI identifier of Model the attribute is assigned to. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_model_doc_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_model_doc_id: If op_model_doc_id is specified, the field named in this input will be compared to the value in model_doc_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_model_doc_id must be specified if op_model_doc_id is specified.
:type val_f_model_doc_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_model_doc_id: If op_model_doc_id is specified, this value will be compared to the value in model_doc_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_model_doc_id must be specified if op_model_doc_id is specified.
:type val_c_model_doc_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_name: The operator to apply to the field name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. name: Human readable attribute name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_name: If op_name is specified, the field named in this input will be compared to the value in name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_name must be specified if op_name is specified.
:type val_f_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_name: If op_name is specified, this value will be compared to the value in name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_name must be specified if op_name is specified.
:type val_c_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_return_model_doc_id: The operator to apply to the field return_model_doc_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. return_model_doc_id: The internal NetMRI identifier of Model returned by method. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_return_model_doc_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_return_model_doc_id: If op_return_model_doc_id is specified, the field named in this input will be compared to the value in return_model_doc_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_return_model_doc_id must be specified if op_return_model_doc_id is specified.
:type val_f_return_model_doc_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_return_model_doc_id: If op_return_model_doc_id is specified, this value will be compared to the value in return_model_doc_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_return_model_doc_id must be specified if op_return_model_doc_id is specified.
:type val_c_return_model_doc_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, model_doc_id, attribute, description, created_at, updated_at, gui_type, method_ind, hidden_ind, name, default_sort_desc_ind, default_column_ind, return_model_doc_id, method_param_list.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AttributeDoc. Valid values are id, model_doc_id, attribute, description, created_at, updated_at, gui_type, method_ind, hidden_ind, name, default_sort_desc_ind, default_column_ind, return_model_doc_id, method_param_list. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return attribute_docs: An array of the AttributeDoc objects that match the specified input criteria.
:rtype attribute_docs: Array of AttributeDoc
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
|
apache-2.0
|
spyder-ide/conda-manager
|
conda_manager/widgets/search.py
|
2
|
2652
|
# -*- coding:utf-8 -*-
#
# Copyright © 2015 The Spyder Development Team
# Copyright © 2014 Gonzalo Peña-Castellanos (@goanpeca)
#
# Licensed under the terms of the MIT License
"""
"""
# Standard library imports
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import sys
# Third party imports
from qtpy.QtCore import Qt, QSize
from qtpy.QtGui import QIcon, QPixmap
from qtpy.QtWidgets import (QApplication, QHBoxLayout, QLabel, QLineEdit,
QToolButton)
# Local imports
from conda_manager.utils import get_image_path
class SearchLineEdit(QLineEdit):
"""Line edit search widget with icon and remove all button"""
def __init__(self, parent=None, icon=True):
super(SearchLineEdit, self).__init__(parent)
self.setTextMargins(1, 0, 20, 0)
if icon:
self.setTextMargins(18, 0, 20, 0)
self._label = QLabel(self)
self._pixmap_icon = QPixmap(get_image_path('conda_search.png'))
self._label.setPixmap(self._pixmap_icon)
self._label.setStyleSheet('''border: 0px; padding-bottom: 0px;
padding-left: 2px;''')
self._pixmap = QPixmap(get_image_path('conda_del.png'))
self.button_clear = QToolButton(self)
self.button_clear.setIcon(QIcon(self._pixmap))
self.button_clear.setIconSize(QSize(18, 18))
self.button_clear.setCursor(Qt.ArrowCursor)
self.button_clear.setStyleSheet("""QToolButton
{background: transparent;
padding-right: 2px; border: none; margin:0px; }""")
self.button_clear.setVisible(False)
# Layout
self._layout = QHBoxLayout(self)
self._layout.addWidget(self.button_clear, 0, Qt.AlignRight)
self._layout.setSpacing(0)
self._layout.setContentsMargins(0, 2, 2, 0)
# Signals and slots
self.button_clear.clicked.connect(self.clear_text)
self.textChanged.connect(self._toggle_visibility)
self.textEdited.connect(self._toggle_visibility)
def _toggle_visibility(self):
""" """
if len(self.text()) == 0:
self.button_clear.setVisible(False)
else:
self.button_clear.setVisible(True)
def sizeHint(self):
return QSize(200, self._pixmap_icon.height())
# Public api
# ----------
def clear_text(self):
""" """
self.setText('')
self.setFocus()
if __name__ == '__main__':
app = QApplication(sys.argv)
w = SearchLineEdit()
w.show()
sys.exit(app.exec_())
|
mit
|
aldryn/aldryn-blog
|
aldryn_blog/search_indexes.py
|
1
|
1338
|
# -*- coding: utf-8 -*-
from django.db.models import Q
from django.template import RequestContext
from aldryn_search.utils import get_index_base, strip_tags
from .conf import settings
from .models import Post
class BlogIndex(get_index_base()):
haystack_use_for_indexing = settings.ALDRYN_BLOG_SEARCH
INDEX_TITLE = True
def get_title(self, obj):
return obj.title
def get_description(self, obj):
return obj.lead_in
def get_language(self, obj):
return obj.language
def prepare_pub_date(self, obj):
return obj.publication_start
def get_index_queryset(self, language):
queryset = self.get_model().published.all()
return queryset.filter(Q(language=language) | Q(language__isnull=True))
def get_model(self):
return Post
def get_search_data(self, obj, language, request):
lead_in = self.get_description(obj)
text_bits = [strip_tags(lead_in)]
plugins = obj.content.cmsplugin_set.filter(language=language)
for base_plugin in plugins:
instance, plugin_type = base_plugin.get_plugin_instance()
if instance is not None:
content = strip_tags(instance.render_plugin(context=RequestContext(request)))
text_bits.append(content)
return ' '.join(text_bits)
|
bsd-3-clause
|
hforge/itools
|
itools/database/backends/patchs.py
|
1
|
5600
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2020 Sylvain Taverne <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
import difflib
import tarfile
from glob import glob
from datetime import datetime, timedelta
from time import strftime
from uuid import uuid4
# Import from itools
from itools.fs import lfs
from itools.loop import cron
class PatchsBackend(object):
rotate_interval = timedelta(weeks=2)
def __init__(self, db_path, db_fs, read_only):
self.db_fs = db_fs
self.db_path = db_path
# Init patchs folder
self.patchs_path = '{0}/database/.git/patchs'.format(db_path)
if not lfs.exists(self.patchs_path):
lfs.make_folder(self.patchs_path)
self.patchs_fs = lfs.open(self.patchs_path)
# Launch rotate on patchs folder (only one time, so only on RW database)
if not read_only:
self.launch_rotate()
def get_last_rotate_date(self):
# Find the more recent date
dates = []
n2 = '[0-9][0-9]'
date_pattern = n2 + n2 + '-' + n2 + '-' + n2 + '_' + n2 + n2
for name in glob(self.patchs_path + '/' + date_pattern + '.tgz'):
try:
date = datetime.strptime(name[-18:-3], '%Y-%m-%d_%H%M')
except ValueError:
continue
dates.append(date)
if dates:
dates.sort()
return dates[-1]
return None
def launch_rotate(self):
last = self.get_last_rotate_date()
# If here, there is no rotated files => so, we create one
if not last:
last = datetime.now()
self.rotate()
# Compute the next call
next_call = last + self.rotate_interval - datetime.now()
if next_call <= timedelta(0):
next_call = timedelta(seconds=1)
# Call cron
cron(self.rotate, next_call)
def rotate(self):
gzip_folders = []
for name in self.patchs_fs.get_names():
try:
patchs_date = datetime.strptime(name, '%Y%m%d')
except ValueError:
continue
delta = datetime.now() - patchs_date
if delta > self.rotate_interval:
gzip_folders.append(self.patchs_path + '/' + name)
# Check if we have something to gzip ?
if not gzip_folders:
return self.rotate_interval
print('[Database] Launch patchs rotation. May take time')
# Create TAR file
tar_destination = self.patchs_path + '/{0}.tgz'.format(strftime('%Y-%m-%d_%H%M'))
with tarfile.open(tar_destination, "w:gz" ) as tar:
for gzip_folder in gzip_folders:
tar.add(gzip_folder)
# Remove old folders
for gzip_folder in gzip_folders:
self.patchs_fs.remove(gzip_folder)
# We return always True to be "cron" compliant
return self.rotate_interval
def create_patch(self, added, changed, removed, handlers, git_author):
""" We create a patch into database/.git/patchs at each transaction.
The idea is to commit into GIT each N transactions on big databases to avoid performances problems.
We want to keep a diff on each transaction, to help debug.
"""
author_id, author_email = git_author
diffs = {}
# Added
for key in added:
if key.endswith('.metadata'):
after = handlers.get(key).to_str().splitlines(True)
diff = difflib.unified_diff('', after, fromfile=key, tofile=key)
diffs[key] = ''.join(diff)
# Changed
for key in changed:
if key.endswith('.metadata'):
with self.db_fs.open(key) as f:
before = f.readlines()
after = handlers.get(key).to_str().splitlines(True)
diff = difflib.unified_diff(before, after, fromfile=key, tofile=key)
diffs[key] = ''.join(diff)
# Removed
for key in removed:
if key.endswith('.metadata'):
with self.db_fs.open(key) as f:
before = f.readlines()
after = ''
diff = difflib.unified_diff(before, after, fromfile=key, tofile=key)
diffs[key] = ''.join(diff)
# Create patch
base_path = datetime.now().strftime('.git/patchs/%Y%m%d/')
if not self.db_fs.exists(base_path):
self.db_fs.make_folder(base_path)
the_time = datetime.now().strftime('%Hh%Mm%S.%f')
patch_key = '{base_path}/{the_time}-user{author_id}-{uuid}.patch'.format(
base_path=base_path,
author_id=author_id,
the_time=the_time,
uuid=uuid4())
data = ''.join([diffs[x] for x in sorted(diffs.keys())])
# Write
with self.db_fs.open(patch_key, 'w') as f:
f.write(data)
f.truncate(f.tell())
|
gpl-3.0
|
se4u/pylearn2
|
pylearn2/scripts/papers/maxout/svhn_preprocessing.py
|
55
|
2006
|
import os
import logging
import shutil
from theano import config
from pylearn2.datasets import preprocessing
from pylearn2.datasets.svhn import SVHN
from pylearn2.utils.string_utils import preprocess
orig_path = preprocess('${PYLEARN2_DATA_PATH}/SVHN/format2')
try:
local_path = preprocess('${SVHN_LOCAL_PATH}')
except ValueError:
raise ValueError("You need to define SVHN_LOCAL_PATH environment "
"variable.")
train_name ='h5/splitted_train_32x32.h5'
valid_name = 'h5/valid_32x32.h5'
test_name = 'h5/test_32x32.h5'
# copy data if don't exist
if not os.path.isdir(os.path.join(local_path, 'h5')):
os.makedirs(os.path.join(local_path, 'h5'))
for d_set in [train_name, valid_name, test_name]:
if not os.path.isfile(os.path.join(local_path, d_set)):
logging.info("Copying data from {0} to {1}".format(os.path.join(local_path, d_set), local_path))
shutil.copyfile(os.path.join(orig_path, d_set),
os.path.join(local_path, d_set))
def check_dtype(data):
if str(data.X.dtype) != config.floatX:
logging.warning("The dataset is saved as {}, changing theano's floatX "\
"to the same dtype".format(data.X.dtype))
config.floatX = str(data.X.dtype)
# Load train data
train = SVHN('splitted_train', path=local_path)
check_dtype(train)
# prepare preprocessing
pipeline = preprocessing.Pipeline()
# without batch_size there is a high chance that you might encounter memory error
# or pytables crashes
pipeline.items.append(preprocessing.GlobalContrastNormalization(batch_size=5000))
pipeline.items.append(preprocessing.LeCunLCN((32,32)))
# apply the preprocessings to train
train.apply_preprocessor(pipeline, can_fit=True)
del train
# load and preprocess valid
valid = SVHN('valid', path=local_path)
check_dtype(valid)
valid.apply_preprocessor(pipeline, can_fit=False)
# load and preprocess test
test = SVHN('test', path=local_path)
check_dtype(test)
test.apply_preprocessor(pipeline, can_fit=False)
|
bsd-3-clause
|
AcalephStorage/ansible-modules-extras
|
network/f5/bigip_monitor_http.py
|
8
|
16585
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <[email protected]>
# based on Matt Hite's bigip_pool module
# (c) 2013, Matt Hite <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_http
short_description: "Manages F5 BIG-IP LTM http monitors"
description:
- "Manages F5 BIG-IP LTM monitors via iControl SOAP API"
version_added: "1.4"
author: Serge van Ginderachter
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
parent:
description:
- The parent template of this monitor template
required: false
default: 'http'
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
receive_disable:
description:
- The receive disable string for the monitor call
required: true
default: none
ip:
description:
- IP address part of the ipport definition. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create HTTP Monitor
local_action:
module: bigip_monitor_http
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors
- name: BIGIP F5 | Remove HTTP Monitor
local_action:
module: bigip_monitor_http
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
TEMPLATE_TYPE = 'TTYPE_HTTP'
DEFAULT_PARENT_TYPE = 'http'
# ===========================================
# bigip_monitor module generic methods.
# these should be re-useable for other monitor types
#
def bigip_api(bigip, user, password):
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
return api
def disable_ssl_cert_validation():
# You probably only want to do this for testing and never in production.
# From https://www.python.org/dev/peps/pep-0476/#id29
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
def set_string_property(api, monitor, str_property):
api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
def check_integer_property(api, monitor, int_property):
try:
return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
def set_integer_property(api, monitor, int_property):
api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
changed = False
for str_property in template_string_properties:
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
set_integer_property(api, monitor, int_property)
changed = True
return changed
def get_ipport(api, monitor):
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
try:
api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
return True, ""
except bigsuds.OperationFailed, e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
# ===========================================
# main loop
#
# writing a module for other monitor types should
# only need an updated main() (and monitor specific functions)
def main():
# begin monitor specific stuff
module = AnsibleModule(
argument_spec = dict(
server = dict(required=True),
user = dict(required=True),
password = dict(required=True),
validate_certs = dict(default='yes', type='bool'),
partition = dict(default='Common'),
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
parent = dict(default=DEFAULT_PARENT_TYPE),
parent_partition = dict(default='Common'),
send = dict(required=False),
receive = dict(required=False),
receive_disable = dict(required=False),
ip = dict(required=False),
port = dict(required=False, type='int'),
interval = dict(required=False, type='int'),
timeout = dict(required=False, type='int'),
time_until_up = dict(required=False, type='int', default=0)
),
supports_check_mode=True
)
server = module.params['server']
user = module.params['user']
password = module.params['password']
validate_certs = module.params['validate_certs']
partition = module.params['partition']
parent_partition = module.params['parent_partition']
state = module.params['state']
name = module.params['name']
parent = "/%s/%s" % (parent_partition, module.params['parent'])
monitor = "/%s/%s" % (partition, name)
send = module.params['send']
receive = module.params['receive']
receive_disable = module.params['receive_disable']
ip = module.params['ip']
port = module.params['port']
interval = module.params['interval']
timeout = module.params['timeout']
time_until_up = module.params['time_until_up']
# end monitor specific stuff
if not validate_certs:
disable_ssl_cert_validation()
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
api = bigip_api(server, user, password)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
# ipport is a special setting
if monitor_exists: # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
else: # use API defaults if not defined to create it
if interval is None:
interval = 5
if timeout is None:
timeout = 16
if ip is None:
ip = '0.0.0.0'
if port is None:
port = 0
if send is None:
send = ''
if receive is None:
receive = ''
if receive_disable is None:
receive_disable = ''
# define and set address type
if ip == '0.0.0.0' and port == 0:
address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
elif ip == '0.0.0.0' and port != 0:
address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
elif ip != '0.0.0.0' and port != 0:
address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
else:
address_type = 'ATYPE_UNSET'
ipport = {'address_type': address_type,
'ipport': {'address': ip,
'port': port}}
template_attributes = {'parent_template': parent,
'interval': interval,
'timeout': timeout,
'dest_ipport': ipport,
'is_read_only': False,
'is_directly_usable': True}
# monitor specific stuff
template_string_properties = [{'type': 'STYPE_SEND',
'value': send},
{'type': 'STYPE_RECEIVE',
'value': receive},
{'type': 'STYPE_RECEIVE_DRAIN',
'value': receive_disable}]
template_integer_properties = [{'type': 'ITYPE_INTERVAL',
'value': interval},
{'type': 'ITYPE_TIMEOUT',
'value': timeout},
{'type': 'ITYPE_TIME_UNTIL_UP',
'value': time_until_up}]
# main logic, monitor generic
try:
result = {'changed': False} # default
if state == 'absent':
if monitor_exists:
if not module.check_mode:
# possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
else: # state present
## check for monitor itself
if not monitor_exists: # create it
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
else:
result['changed'] |= True
## check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
result['changed'] |= update_monitor_properties(api, module, monitor,
template_string_properties,
template_integer_properties)
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
#else: monitor doesn't exist (check mode) or ipport is already ok
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
asm-products/movie-database-service
|
ani/lib/python2.7/site-packages/pip/commands/uninstall.py
|
395
|
2203
|
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
session = self._build_session(options)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
session=session,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name))
for filename in options.requirements:
for req in parse_requirements(filename,
options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
requirement_set.uninstall(auto_confirm=options.yes)
|
agpl-3.0
|
eaplatanios/tensorflow
|
tensorflow/python/ops/rnn.py
|
3
|
62251
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models.
@@bidirectional_dynamic_rnn
@@dynamic_rnn
@@raw_rnn
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_concat = rnn_cell_impl._concat
# pylint: enable=protected-access
def _transpose_batch_time(x):
"""Transposes the batch and time dimensions of a Tensor.
If the input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A Tensor.
Returns:
x transposed along the first two dimensions.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
return x
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _best_effort_input_batch_size(flat_input):
"""Get static input batch size if available, with fallback to the dynamic one.
Args:
flat_input: An iterable of time major input Tensors of shape
`[max_time, batch_size, ...]`.
All inputs should have compatible batch sizes.
Returns:
The batch size in Python integer if available, or a scalar Tensor otherwise.
Raises:
ValueError: if there is any input with an invalid shape.
"""
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2" % input_)
batch_size = shape[1].value
if batch_size is not None:
return batch_size
# Fallback to the dynamic batch size of the first input.
return array_ops.shape(flat_input[0])[1]
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _maybe_tensor_shape_from_tensor(shape):
if isinstance(shape, ops.Tensor):
return tensor_shape.as_shape(tensor_util.constant_value(shape))
else:
return shape
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on `sequence_length`.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on whether we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_length[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_length[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: int32 `Tensor` scalar.
sequence_length: int32 `Tensor` vector of size [batch_size].
min_sequence_length: int32 `Tensor` scalar, min of sequence_length.
max_sequence_length: int32 `Tensor` scalar, max of sequence_length.
zero_output: `Tensor` vector of shape [output_size].
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
# Vector describing which batch entries are finished.
copy_cond = time >= sequence_length
def _copy_one_through(output, new_output):
# TensorArray and scalar get passed through.
if isinstance(output, tensor_array_ops.TensorArray):
return new_output
if output.shape.ndims == 0:
return new_output
# Otherwise propagate the old or the new value.
with ops.colocate_with(new_output):
return array_ops.where(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
if not isinstance(substate, tensor_array_ops.TensorArray):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
@tf_export("nn.bidirectional_dynamic_rnn")
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch.
If not provided, all batch entries are assumed to be full sequences; and
time reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_dim=seq_dim, batch_dim=batch_dim)
else:
return array_ops.reverse(input_, axis=[seq_dim])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
@tf_export("nn.dynamic_rnn")
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for correctness than performance.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
with vs.variable_scope(scope or "rnn") as varscope:
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
if not context.executing_eagerly():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if not context.executing_eagerly() and sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nested) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _concat(batch_size, size)
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
else:
max_sequence_length = time_steps
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, element_shape, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
element_shape=element_shape,
tensor_array_name=base_name + name)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
output_ta = tuple(
_create_ta(
"output_%d" % i,
element_shape=(tensor_shape.TensorShape([const_batch_size])
.concatenate(
_maybe_tensor_shape_from_tensor(out_size))),
dtype=_infer_state_dtype(dtype, state))
for i, out_size in enumerate(flat_output_size))
input_ta = tuple(
_create_ta(
"input_%d" % i,
element_shape=flat_input_i.shape[1:],
dtype=flat_input_i.dtype)
for i, flat_input_i in enumerate(flat_input))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
else:
output_ta = tuple([0 for _ in range(time_steps.numpy())]
for i in range(len(flat_output_size)))
input_ta = flat_input
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
if in_graph_mode:
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
else:
input_t = tuple(ta[time.numpy()] for ta in input_ta)
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
if in_graph_mode:
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
else:
for ta, out in zip(output_ta_t, output):
ta[time.numpy()] = out
return (time + 1, output_ta_t, new_state)
if in_graph_mode:
# Make sure that we run at least 1 step, if necessary, to ensure
# the TensorArrays pick up the dynamic shape.
loop_bound = math_ops.minimum(
time_steps, math_ops.maximum(1, max_sequence_length))
else:
# Using max_sequence_length isn't currently supported in the Eager branch.
loop_bound = time_steps
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < loop_bound,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
maximum_iterations=time_steps,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
if in_graph_mode:
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _concat(
[const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
else:
final_outputs = output_final_ta
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
if not in_graph_mode:
final_outputs = array_ops.stack(final_outputs, axis=0)
return (final_outputs, final_state)
@tf_export("nn.raw_rnn")
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, emit_structure, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit_structure), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors which is aggregated in the `emit_ta` inside the
`while_loop`. For the first call to `loop_fn`, the `emit_output`
corresponds to the `emit_structure` which is then used to determine the
size of the `zero_tensor` for the `emit_ta` (defaults to
`cell.output_size`). For the subsequent calls to the `loop_fn`, the
`emit_output` corresponds to the actual output tensor
that is to be aggregated in the `emit_ta`. The parameter `cell_state`
and output `next_cell_state` may be either a single or (possibly nested)
tuple of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if not context.executing_eagerly():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
const_batch_size = batch_size
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else
array_ops.shape(emit) for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i,
dynamic_size=True,
element_shape=(tensor_shape.TensorShape([const_batch_size])
.concatenate(
_maybe_tensor_shape_from_tensor(size_i))),
size=0,
name="rnn_output_%d" % i)
for i, (dtype_i, size_i)
in enumerate(zip(flat_emit_dtypes, flat_emit_size))]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(_concat(batch_size, size_i), dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
def copy_fn(cur_i, cand_i):
# TensorArray and scalar get passed through.
if isinstance(cur_i, tensor_array_ops.TensorArray):
return cand_i
if cur_i.shape.ndims == 0:
return cand_i
# Otherwise propagate the old or the new value.
with ops.colocate_with(cand_i):
return array_ops.where(elements_finished, cur_i, cand_i)
return nest.map_structure(copy_fn, current, candidate)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_ta = nest.map_structure(
lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
@tf_export("nn.static_rnn")
def static_rnn(cell,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if not context.executing_eagerly():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _concat(batch_size, output_size)
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _concat(fixed_batch_size.value, output_size, static=True)
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0:
varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
@tf_export("nn.static_state_saving_rnn")
def static_state_saving_rnn(cell,
inputs,
state_saver,
state_name,
sequence_length=None,
scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError("state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s" % (str(state_name),
str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" %
(len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(
cell,
inputs,
initial_state=initial_state,
sequence_length=sequence_length,
scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [
state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)
]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output
]
outputs[-1] = nest.pack_sequence_as(
structure=last_output, flat_sequence=flat_last_output)
return (outputs, state)
@tf_export("nn.static_bidirectional_rnn")
def static_bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope=fw_scope)
# Backward direction
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(
structure=output_fw, flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
|
apache-2.0
|
nvoron23/brython
|
src/Lib/pyre.py
|
115
|
15252
|
#
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB ([email protected]).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode digits.
\D Matches any non-digit character; equivalent to [^\d].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode whitespace characters.
\S Matches any non-whitespace character; equivalent to [^\s].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]
in bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the
range of Unicode alphanumeric characters (letters plus digits
plus underscore).
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
match the corresponding ASCII character categories
(rather than the whole Unicode categories, which is the
default).
For bytes patterns, this flag is the only available
behaviour and needn't be specified.
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE For compatibility only. Ignored for string patterns (it
is the default), and forbidden for bytes patterns.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
import functools
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X",
"U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
A = ASCII = sre_compile.SRE_FLAG_ASCII # assume ascii "locale"
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode "locale"
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings. If
capturing parentheses are used in pattern, then the text of all
groups in the pattern are also returned as part of the resulting
list. If maxsplit is nonzero, at most maxsplit splits occur,
and the remainder of the string is returned as the final element
of the list."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more capturing groups are present in the pattern, return
a list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
#print("_re.py:214")
return _compile(pattern, flags)
def purge():
"Clear the regular expression caches"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum_str = frozenset(
"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
_alphanum_bytes = frozenset(
b"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
def escape(pattern):
"""
Escape all the characters in pattern except ASCII letters, numbers and '_'.
"""
if isinstance(pattern, str):
alphanum = _alphanum_str
s = list(pattern)
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return "".join(s)
else:
alphanum = _alphanum_bytes
s = []
esc = ord(b"\\")
for c in pattern:
if c in alphanum:
s.append(c)
else:
if c == 0:
s.extend(b"\\000")
else:
s.append(esc)
s.append(c)
return bytes(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 512
def _compile(pattern, flags):
# internal: compile pattern
try:
#fixme brython
#return _cache[type(pattern), pattern, flags]
return _cache["%s:%s:%s" % (type(pattern), pattern, flags)]
except KeyError:
pass
#print(pattern)
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError(
"Cannot process flags argument with a compiled pattern")
return pattern
if not sre_compile.isstring(pattern):
raise TypeError("first argument must be string or compiled pattern")
p = sre_compile.compile(pattern, flags)
#print('_compile', p)
if len(_cache) >= _MAXCACHE:
_cache.clear()
#fix me brython
#_cache[type(pattern), pattern, flags] = p
_cache["%s:%s:%s" % (type(pattern), pattern, flags)]= p
return p
def _compile_repl(repl, pattern):
# internal: compile replacement pattern
try:
#fix me brython
#return _cache_repl[repl, pattern]
return _cache_repl["%s:%s" % (repl, pattern)]
except KeyError:
pass
p = sre_parse.parse_template(repl, pattern)
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl["%s:%s" % (repl, pattern)] = p
#fix me brython
#_cache_repl[repl, pattern] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copyreg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copyreg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if callable(action):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
|
bsd-3-clause
|
sirrice/scorpion
|
scorpion/sigmod/overlap.py
|
1
|
5112
|
import math
import pdb
import numpy as np
import Orange
from Orange.feature import Type as OType
class OverlapPenalty(object):
def __init__(self, domain, cdists, ddists, granularity=100):
"""
Args:
domain: Orange.Domain object
"""
self.domain = domain
self.cdists = cdists
self.ddists = ddists
self.granularity = granularity
self.funcs = {}
self.allcounts = {}
self.disc_nvals = {}
self.setup()
def setup(self):
funcs = {}
allcounts = {}
for attr in self.domain:
if attr.var_type == OType.Discrete:
func, counts = self.setup_discrete_attribute(attr)
else:
func, counts = self.setup_continuous_attribute(attr)
funcs[attr.name] = func
allcounts[attr.name] = counts
self.funcs, self.allcounts = funcs, allcounts
def setup_continuous_attribute(self, attr):
distribution = self.cdists[attr.name]
minv, maxv = distribution.min, distribution.max
if minv == maxv:
func = lambda v: 0
counts = np.zeros(1)
else:
def make_func(minv, block, gran):
def f(v):
return int(min(gran, max(0, math.ceil((v-minv)/block))))
return f
block = (maxv - minv) / float(self.granularity)
counts = np.zeros(self.granularity+1)
func = make_func(minv, block, self.granularity)
return func, counts
def setup_discrete_attribute(self, attr):
vals = self.ddists[attr.name].keys()
d = {val: idx for idx, val in enumerate(vals)}
# values will be passed in as indices into vals
def func(v):
if v < 0 or v > len(vals) or v is None:
return len(vals)
return v
counts = np.zeros(len(vals)+1)
self.disc_nvals[attr.name] = len(vals)
return func, counts
def reset_counts(self):
for counts in self.allcounts.values():
counts[:] = 0
def continuous_idxs(self, attr, minv, maxv):
if isinstance(attr, basestring):
name = attr
else:
name = attr.name
dist = self.cdists[name]
if minv <= dist.min and maxv >= dist.max:
return []
func = self.funcs[name]
return np.arange(func(minv),func(maxv)+1)
def discrete_idxs(self, attr, vals):
if isinstance(attr, basestring):
name = attr
else:
name = attr.name
if len(vals) == self.disc_nvals[name]:
return np.array([])
func = self.funcs[name]
return np.array(map(func, vals))
def __call__(self, clusters, min_weight=0.7):
"""
return weights to multiply to each cluster's influence
"""
penalties = self.penalties(clusters)
weights = 1. - penalties
weights[weights <= min_weight] = min_weight
return weights
def penalties(self, clusters):
"""
Compute a penalty for each cluster
Return is normalzed to [0, 1]
"""
self.reset_counts()
penalties = np.array(map(self.penalty, clusters))
if penalties.max() == 0:
return penalties
penalties /= penalties.max()
return penalties
def penalty(self, cluster):
totals = {}
for col, (minv, maxv) in zip(cluster.cols, zip(*cluster.bbox)):
idxs = self.continuous_idxs(col, minv, maxv)
if len(idxs):
totals[col] = self.allcounts[col][idxs]
self.allcounts[col][idxs] += .5
for col, vals in cluster.discretes.iteritems():
idxs = self.discrete_idxs(col, vals)
if len(idxs):
totals[col] = self.allcounts[col][idxs]
self.allcounts[col][idxs] += 1
smooth = lambda counts: max(0, (counts - 0.5).max())
return sum(map(smooth, totals.values()))
def create_clusters(n):
clusters = []
for i in xrange(n):
minv = random.random() * 70
maxv = minv + 10
bbox = ((minv,), (maxv,))
a = ['a'+str(j) for j in range(3)]
x = ['x'+str(j) for j in range(10)]
discretes = {
'a': nprand.choice(a, 2, replace=False),
'x': nprand.choice(x, 3, replace=False)
}
cluster = Cluster(bbox, 0, ['c'], discretes=discretes)
clusters.append(cluster)
return clusters
if __name__ == '__main__':
import pdb
import random
import timeit
import numpy.random as nprand
from scorpion.bottomup.cluster import Cluster
from scorpion.arch import *
rows = []
cols = ['a', 'x', 'c']
for i in xrange(100):
a = 'a'+str(i%3)
b = 'x'+str(i%10)
c = 100 * random.random()
rows.append([a, b, c])
table = create_orange_table(rows, cols)
domain = table.domain
cdists = dict(zip(cols, Orange.statistics.basic.Domain(table)))
ddists = dict(zip(cols, Orange.statistics.distribution.Domain(table)))
overlap = OverlapPenalty(domain, cdists, ddists)
clusters = create_clusters(20)
weights = overlap(clusters)
print weights
import profile, pstats, StringIO
for n in [10, 20, 50]:
clusters = create_clusters(n)
f = lambda: overlap(clusters)
print n, '\t', timeit.timeit(f, number=100)
continue
pr = profile.Profile()
pr.run("print n, '\t', timeit.timeit(f, number=100)")
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('time')
ps.print_stats()
print s.getvalue()
|
mit
|
nunogt/tempest
|
tempest/api/object_storage/test_object_temp_url_negative.py
|
15
|
3914
|
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import hmac
import time
from six.moves.urllib import parse as urlparse
from tempest_lib import exceptions as lib_exc
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import test
class ObjectTempUrlNegativeTest(base.BaseObjectTest):
metadata = {}
containers = []
@classmethod
def resource_setup(cls):
super(ObjectTempUrlNegativeTest, cls).resource_setup()
cls.container_name = data_utils.rand_name(name='TestContainer')
cls.container_client.create_container(cls.container_name)
cls.containers = [cls.container_name]
# update account metadata
cls.key = 'Meta'
cls.metadata = {'Temp-URL-Key': cls.key}
cls.account_client.create_account_metadata(metadata=cls.metadata)
cls.account_client_metadata, _ = \
cls.account_client.list_account_metadata()
@classmethod
def resource_cleanup(cls):
resp, _ = cls.account_client.delete_account_metadata(
metadata=cls.metadata)
cls.delete_containers(cls.containers)
super(ObjectTempUrlNegativeTest, cls).resource_cleanup()
def setUp(self):
super(ObjectTempUrlNegativeTest, self).setUp()
# make sure the metadata has been set
self.assertIn('x-account-meta-temp-url-key',
self.account_client_metadata)
self.assertEqual(
self.account_client_metadata['x-account-meta-temp-url-key'],
self.key)
# create object
self.object_name = data_utils.rand_name(name='ObjectTemp')
self.content = data_utils.arbitrary_string(size=len(self.object_name),
base_text=self.object_name)
self.object_client.create_object(self.container_name,
self.object_name, self.content)
def _get_expiry_date(self, expiration_time=1000):
return int(time.time() + expiration_time)
def _get_temp_url(self, container, object_name, method, expires,
key):
"""Create the temporary URL."""
path = "%s/%s/%s" % (
urlparse.urlparse(self.object_client.base_url).path,
container, object_name)
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
object_name,
sig, expires)
return url
@test.attr(type=['negative'])
@test.idempotent_id('5a583aca-c804-41ba-9d9a-e7be132bdf0b')
@test.requires_ext(extension='tempurl', service='object')
def test_get_object_after_expiration_time(self):
expires = self._get_expiry_date(1)
# get a temp URL for the created object
url = self._get_temp_url(self.container_name,
self.object_name, "GET",
expires, self.key)
# temp URL is valid for 1 seconds, let's wait 2
time.sleep(2)
self.assertRaises(lib_exc.Unauthorized,
self.object_client.get, url)
|
apache-2.0
|
YingHsuan/termite_data_server
|
web2py/anyserver.py
|
19
|
12802
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file is based, although a rewrite, on MIT-licensed code from the Bottle web framework.
"""
import os
import sys
import optparse
import urllib
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path] + [p for p in sys.path if not p == path]
class Servers:
@staticmethod
def cgi(app, address=None, **options):
from wsgiref.handlers import CGIHandler
CGIHandler().run(app) # Just ignore host and port here
@staticmethod
def flup(app, address, **options):
import flup.server.fcgi
flup.server.fcgi.WSGIServer(app, bindAddress=address).run()
@staticmethod
def wsgiref(app, address, **options): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
options = {}
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
options['handler_class'] = QuietHandler
srv = make_server(address[0], address[1], app, **options)
srv.serve_forever()
@staticmethod
def cherrypy(app, address, **options):
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket(app, address, **options):
from gluon.rocket import CherryPyWSGIServer
server = CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket_with_repoze_profiler(app, address, **options):
from gluon.rocket import CherryPyWSGIServer
from repoze.profile.profiler import AccumulatingProfileMiddleware
from gluon.settings import global_settings
global_settings.web2py_crontype = 'none'
wrapped = AccumulatingProfileMiddleware(
app,
log_filename='wsgi.prof',
discard_first_request=True,
flush_at_shutdown=True,
path='/__profile__'
)
server = CherryPyWSGIServer(address, wrapped)
server.start()
@staticmethod
def paste(app, address, **options):
options = {}
from paste import httpserver
from paste.translogger import TransLogger
httpserver.serve(app, host=address[0], port=address[1], **options)
@staticmethod
def fapws(app, address, **options):
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(address[0], str(address[1]))
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return app(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
@staticmethod
def gevent(app, address, **options):
options = options['options']
workers = options.workers
from gevent import pywsgi
from gevent.pool import Pool
pywsgi.WSGIServer(address, app, spawn=workers and Pool(
int(options.workers)) or 'default', log=None).serve_forever()
@staticmethod
def bjoern(app, address, **options):
import bjoern
bjoern.run(app, *address)
@staticmethod
def tornado(app, address, **options):
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(app)
server = tornado.httpserver.HTTPServer(container)
server.listen(address=address[0], port=address[1])
tornado.ioloop.IOLoop.instance().start()
@staticmethod
def twisted(app, address, **options):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app))
reactor.listenTCP(address[1], factory, interface=address[0])
reactor.run()
@staticmethod
def diesel(app, address, **options):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(app, port=address[1])
app.run()
@staticmethod
def gunicorn(app, address, **options):
options = {}
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % address}
config.update(options)
sys.argv = ['anyserver.py']
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return app
g = GunicornApplication()
g.run()
@staticmethod
def eventlet(app, address, **options):
from eventlet import wsgi, listen
wsgi.server(listen(address), app)
@staticmethod
def mongrel2(app, address, **options):
import uuid
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from mongrel2 import handler
conn = handler.Connection(str(uuid.uuid4()),
"tcp://127.0.0.1:9997",
"tcp://127.0.0.1:9996")
mongrel2_handler(app, conn, debug=False)
@staticmethod
def motor(app, address, **options):
#https://github.com/rpedroso/motor
import motor
app = motor.WSGIContainer(app)
http_server = motor.HTTPServer(app)
http_server.listen(address=address[0], port=address[1])
#http_server.start(2)
motor.IOLoop.instance().start()
@staticmethod
def pulsar(app, address, **options):
from pulsar.apps import wsgi
sys.argv = ['anyserver.py']
s = wsgi.WSGIServer(callable=app, bind="%s:%d" % address)
s.start()
def mongrel2_handler(application, conn, debug=False):
"""
Based on :
https://github.com/berry/Mongrel2-WSGI-Handler/blob/master/wsgi-handler.py
WSGI handler based on the Python wsgiref SimpleHandler.
A WSGI application should return a iterable op StringTypes.
Any encoding must be handled by the WSGI application itself.
"""
from wsgiref.handlers import SimpleHandler
try:
import cStringIO as StringIO
except:
import StringIO
# TODO - this wsgi handler executes the application and renders a page
# in memory completely before returning it as a response to the client.
# Thus, it does not "stream" the result back to the client. It should be
# possible though. The SimpleHandler accepts file-like stream objects. So,
# it should be just a matter of connecting 0MQ requests/response streams to
# the SimpleHandler requests and response streams. However, the Python API
# for Mongrel2 doesn't seem to support file-like stream objects for requests
# and responses. Unless I have missed something.
while True:
if debug:
print "WAITING FOR REQUEST"
# receive a request
req = conn.recv()
if debug:
print "REQUEST BODY: %r\n" % req.body
if req.is_disconnect():
if debug:
print "DISCONNECT"
continue # effectively ignore the disconnect from the client
# Set a couple of environment attributes a.k.a. header attributes
# that are a must according to PEP 333
environ = req.headers
environ['SERVER_PROTOCOL'] = 'HTTP/1.1' # SimpleHandler expects a server_protocol, lets assume it is HTTP 1.1
environ['REQUEST_METHOD'] = environ['METHOD']
if ':' in environ['Host']:
environ['SERVER_NAME'] = environ['Host'].split(':')[0]
environ['SERVER_PORT'] = environ['Host'].split(':')[1]
else:
environ['SERVER_NAME'] = environ['Host']
environ['SERVER_PORT'] = ''
environ['SCRIPT_NAME'] = '' # empty for now
environ['PATH_INFO'] = urllib.unquote(environ['PATH'])
if '?' in environ['URI']:
environ['QUERY_STRING'] = environ['URI'].split('?')[1]
else:
environ['QUERY_STRING'] = ''
if 'Content-Length' in environ:
environ['CONTENT_LENGTH'] = environ[
'Content-Length'] # necessary for POST to work with Django
environ['wsgi.input'] = req.body
if debug:
print "ENVIRON: %r\n" % environ
# SimpleHandler needs file-like stream objects for
# requests, errors and responses
reqIO = StringIO.StringIO(req.body)
errIO = StringIO.StringIO()
respIO = StringIO.StringIO()
# execute the application
handler = SimpleHandler(reqIO, respIO, errIO, environ,
multithread=False, multiprocess=False)
handler.run(application)
# Get the response and filter out the response (=data) itself,
# the response headers,
# the response status code and the response status description
response = respIO.getvalue()
response = response.split("\r\n")
data = response[-1]
headers = dict([r.split(": ") for r in response[1:-2]])
code = response[0][9:12]
status = response[0][13:]
# strip BOM's from response data
# Especially the WSGI handler from Django seems to generate them (2 actually, huh?)
# a BOM isn't really necessary and cause HTML parsing errors in Chrome and Safari
# See also: http://www.xs4all.nl/~mechiel/projects/bomstrip/
# Although I still find this a ugly hack, it does work.
data = data.replace('\xef\xbb\xbf', '')
# Get the generated errors
errors = errIO.getvalue()
# return the response
if debug:
print "RESPONSE: %r\n" % response
if errors:
if debug:
print "ERRORS: %r" % errors
data = "%s\r\n\r\n%s" % (data, errors)
conn.reply_http(
req, data, code=code, status=status, headers=headers)
def run(servername, ip, port, softcron=True, logging=False, profiler=None,
options=None):
if servername == 'gevent':
from gevent import monkey
monkey.patch_all()
elif servername == 'eventlet':
import eventlet
eventlet.monkey_patch()
import gluon.main
if logging:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profiler_dir=profiler)
else:
application = gluon.main.wsgibase
if softcron:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
getattr(Servers, servername)(application, (ip, int(port)), options=options)
def main():
usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P"
try:
version = open('VERSION','r')
except IOError:
version = ''
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-l',
'--logging',
action='store_true',
default=False,
dest='logging',
help='log into httpserver.log')
parser.add_option('-P',
'--profiler',
default=False,
dest='profiler_dir',
help='profiler dir')
servers = ', '.join(x for x in dir(Servers) if not x[0] == '_')
parser.add_option('-s',
'--server',
default='rocket',
dest='server',
help='server name (%s)' % servers)
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help='ip address')
parser.add_option('-p',
'--port',
default='8000',
dest='port',
help='port number')
parser.add_option('-w',
'--workers',
default=None,
dest='workers',
help='number of workers number')
(options, args) = parser.parse_args()
print 'starting %s on %s:%s...' % (
options.server, options.ip, options.port)
run(options.server, options.ip, options.port,
logging=options.logging, profiler=options.profiler_dir,
options=options)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
UTSA-ICS/keystone-kerberos
|
keystone/common/cache/backends/noop.py
|
30
|
1426
|
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dogpile.cache import api
NO_VALUE = api.NO_VALUE
class NoopCacheBackend(api.CacheBackend):
"""A no op backend as a default caching backend.
The no op backend is provided as the default caching backend for keystone
to ensure that ``dogpile.cache.memory`` is not used in any real-world
circumstances unintentionally. ``dogpile.cache.memory`` does not have a
mechanism to cleanup it's internal dict and therefore could cause run-away
memory utilization.
"""
def __init__(self, *args):
return
def get(self, key):
return NO_VALUE
def get_multi(self, keys):
return [NO_VALUE for x in keys]
def set(self, key, value):
return
def set_multi(self, mapping):
return
def delete(self, key):
return
def delete_multi(self, keys):
return
|
apache-2.0
|
batxes/4Cin
|
Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/mtx1_models/Six_zebra_models10386.py
|
4
|
13923
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((3585.8, 7200.6, 14942.8), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((2977.45, 7061.07, 15550.8), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((3110.63, 6758.32, 13639.7), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((3228.44, 6399.28, 11333.9), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((3318.05, 6290.53, 10599.5), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2604.11, 4490.57, 11469.8), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3286.3, 3863.68, 9788.61), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2627.16, 2388.19, 9167.52), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2608.01, 1818.1, 7728.38), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2913.35, 1143.77, 6049.26), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2244.08, 2138.39, 4769.3), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((514.268, 1387.08, 3777.46), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((-1170.82, 611.729, 2871.51), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((-1383.18, 1393.16, 4263.86), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((-208.337, 766.795, 3281.98), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((1340.64, 745.14, 3327.33), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((2229.43, 1057.18, 4376.53), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((3340.46, 1389.94, 5401.85), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((2942.4, 584.758, 7004.69), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4362.55, 323.029, 7627.69), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((6097.25, -343.957, 7806.74), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((7291.79, -1586.8, 8151.77), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((7374.53, -165.59, 7889.5), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((7076.61, 1350.15, 6465.77), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6863.16, 2244.29, 4528.61), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6774.12, 2613.47, 3540.19), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7743.63, 4996.09, 4406.82), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((8803.1, 6490.05, 4117.96), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((9635.4, 6440.39, 5042.43), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((11540.6, 5850.69, 6233.24), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((10779.4, 6295.87, 6275.92), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((11219.4, 7680.07, 5790.3), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((12392.7, 9551.83, 6362.97), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((11499.7, 10590.3, 6899.07), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((10570.2, 11037.2, 8026.82), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((10391.1, 11929.7, 9546.55), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((10643.3, 11696.6, 11197.1), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((11306.6, 10252, 10856.9), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((12312.4, 10588, 9729.56), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((11372.7, 10334.6, 7984.84), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((11337.4, 11514.6, 7558.81), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((10693.3, 10305.6, 7533.48), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((10381, 10186.1, 8161.07), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((10639, 10490.5, 7701.12), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((11220.8, 9370.32, 6358.93), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((9204.24, 8192.55, 4653.42), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7835.01, 8766.65, 3506.54), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((6906.14, 9454.26, 3413.46), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6742.93, 11408.2, 3086.19), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7175.13, 13812.2, 2021.73), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((8593.68, 13940.5, 2902.01), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((9718.43, 11311.6, 2279.74), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((9491.94, 11346.7, 3040.47), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((10108, 12323, 4562.03), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((9983.78, 12782.6, 6033.37), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((8875.77, 11646.9, 6987.74), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
gpl-3.0
|
jorik041/phantomjs
|
src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/decoder.py
|
261
|
25883
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = '[email protected] (Kenton Varda)'
import struct
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(mask):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
_DecodeVarint = _VarintDecoder((1 << 64) - 1)
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1)
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if ((float_bytes[3] in '\x7F\xFF')
and (float_bytes[2] >= '\x80')):
# If at least one significand bit is set...
if float_bytes[0:3] != '\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3] == '\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7] in '\x7F\xFF')
and (double_bytes[6] >= '\xF0')
and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
# --------------------------------------------------------------------
Int32Decoder = EnumDecoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = unicode
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(local_unicode(buffer[pos:new_pos], 'utf-8'))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8')
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return pos
return DecodeItem
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
local_ord = ord
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = local_ord(tag_bytes[0]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
|
bsd-3-clause
|
lreis2415/SEIMS
|
seims/__init__.py
|
1
|
1350
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""@package pySEIMS
Python APIs for SEIMS
Preprocess, postprocess, parameters sensitivity, calibration, and scenario_analysis
-------------------
author : Liangjun Zhu, Junzhi Liu
copyright : (C) 2018-2020 by Lreis, IGSNRR, CAS
email : [email protected]
******************************************************************************
* *
* SEIMS is distributed for Research and/or Education only, any commercial *
* purpose will be FORBIDDEN. SEIMS is an open-source project, but without *
* ANY WARRANTY, WITHOUT even the implied warranty of MERCHANTABILITY or *
* FITNESS for A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* *
******************************************************************************/
"""
from __future__ import absolute_import
__author__ = "SEIMS Team"
__version__ = "2.2"
__revision__ = "2.2.0"
__all__ = ["preprocess", "postprocess", "calibration", "scenario_analysis",
"parameters_sensitivity", "test"]
|
gpl-3.0
|
matthiasdiener/spack
|
var/spack/repos/builtin/packages/plumed/package.py
|
4
|
7313
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import collections
from spack import *
class Plumed(AutotoolsPackage):
"""PLUMED is an open source library for free energy calculations in
molecular systems which works together with some of the most popular
molecular dynamics engines.
Free energy calculations can be performed as a function of many order
parameters with a particular focus on biological problems, using state
of the art methods such as metadynamics, umbrella sampling and
Jarzynski-equation based steered MD.
The software, written in C++, can be easily interfaced with both fortran
and C/C++ codes.
"""
homepage = 'http://www.plumed.org/'
url = 'https://github.com/plumed/plumed2/archive/v2.2.3.tar.gz'
version('2.4.1', '6558e1fd02fc46e847ab6a3fb5ed5411')
version('2.3.5', '3cc5f025cb6f5d963f3c778f15c77d44')
version('2.3.3', '9f5729e406e79a06a16976fcb020e024')
version('2.3.0', 'a9b5728f115dca8f0519111f1f5a6fa5')
version('2.2.4', 'afb00da25a3fbd47acf377e53342059d')
version('2.2.3', 'a6e3863e40aac07eb8cf739cbd14ecf8')
# Variants. PLUMED by default builds a number of optional modules.
# The ones listed here are not built by default for various reasons,
# such as stability, lack of testing, or lack of demand.
# FIXME: This needs to be an optional
variant(
'optional_modules',
default='all',
values=lambda x: True,
description='String that is used to build optional modules'
)
variant('shared', default=True, description='Builds shared libraries')
variant('mpi', default=True, description='Activates MPI support')
variant('gsl', default=True, description='Activates GSL support')
# Dependencies. LAPACK and BLAS are recommended but not essential.
depends_on('zlib')
depends_on('blas')
depends_on('lapack')
depends_on('mpi', when='+mpi')
depends_on('gsl', when='+gsl')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
force_autoreconf = True
parallel = False
def apply_patch(self, other):
# The name of MD engines differ slightly from the ones used in Spack
format_strings = collections.defaultdict(
lambda: '{0.name}-{0.version}'
)
format_strings['espresso'] = 'q{0.name}-{0.version}'
format_strings['amber'] = '{0.name}{0.version}'
get_md = lambda x: format_strings[x.name].format(x)
# Get available patches
plumed_patch = Executable(
join_path(self.spec.prefix.bin, 'plumed-patch')
)
out = plumed_patch('-q', '-l', output=str)
available = out.split(':')[-1].split()
# Check that `other` is among the patchable applications
if get_md(other) not in available:
msg = '{0.name}@{0.version} is not among the MD engine'
msg += ' that can be patched by {1.name}@{1.version}.\n'
msg += 'Supported engines are:\n'
for x in available:
msg += x + '\n'
raise RuntimeError(msg.format(other, self.spec))
# Call plumed-patch to patch executables
target = format_strings[other.name].format(other)
plumed_patch('-p', '-e', target)
def setup_dependent_package(self, module, dependent_spec):
# Make plumed visible from dependent packages
module.plumed = dependent_spec['plumed'].command
@run_before('autoreconf')
def filter_gslcblas(self):
# This part is needed to avoid linking with gsl cblas
# interface which will mask the cblas interface
# provided by optimized libraries due to linking order
filter_file('-lgslcblas', '', 'configure.ac')
def configure_args(self):
spec = self.spec
# From plumed docs :
# Also consider that this is different with respect to what some other
# configure script does in that variables such as MPICXX are
# completely ignored here. In case you work on a machine where CXX is
# set to a serial compiler and MPICXX to a MPI compiler, to compile
# with MPI you should use:
#
# > ./configure CXX="$MPICXX"
# The configure.ac script may detect the wrong linker for
# LD_RO which causes issues at link time. Here we work around
# the issue saying we have no LD_RO executable.
configure_opts = ['--disable-ld-r']
# If using MPI then ensure the correct compiler wrapper is used.
if '+mpi' in spec:
configure_opts.extend([
'--enable-mpi',
'CXX={0}'.format(spec['mpi'].mpicxx)
])
# If the MPI dependency is provided by the intel-mpi package then
# the following additional argument is required to allow it to
# build.
if 'intel-mpi' in spec:
configure_opts.extend([
'STATIC_LIBS=-mt_mpi'
])
# Set flags to help find gsl
if '+gsl' in self.spec:
gsl_libs = self.spec['gsl'].libs
blas_libs = self.spec['blas'].libs
configure_opts.append('LDFLAGS={0}'.format(
(gsl_libs + blas_libs).ld_flags
))
# Additional arguments
configure_opts.extend([
'--enable-shared={0}'.format('yes' if '+shared' in spec else 'no'),
'--enable-gsl={0}'.format('yes' if '+gsl' in spec else 'no')
])
# Construct list of optional modules
# If we have specified any optional modules then add the argument to
# enable or disable them.
optional_modules = self.spec.variants['optional_modules'].value
if optional_modules:
# From 'configure --help' @2.3:
# all/none/reset or : separated list such as
# +crystallization:-bias default: reset
configure_opts.append(
'--enable-modules={0}'.format(optional_modules)
)
return configure_opts
|
lgpl-2.1
|
sovietspy2/uzletiProject
|
python/Lib/encodings/iso2022_jp_2004.py
|
61
|
1112
|
#
# iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
gpl-3.0
|
forYali/yali
|
setup.py
|
1
|
4733
|
#!/usr/bin/env python
#
# Copyright (C) 2005-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
import os
import glob
import shutil
import sipconfig
from distutils.core import setup, Extension
from distutils.sysconfig import get_python_lib
from distutils.cmd import Command
from distutils.command.build import build
from distutils.command.clean import clean
from distutils.command.install import install
from distutils.spawn import find_executable, spawn
I18N_DOMAIN = "yali"
I18N_LANGUAGES = ["tr", "nl", "it", "fr", "de", "pt_BR", "es", "pl", "ca", "sv", "hu", "ru", "hr"]
def qt_ui_files():
ui_files = "yali/gui/Ui/*.ui"
return glob.glob(ui_files)
def py_file_name(ui_file):
return os.path.splitext(ui_file)[0] + '.py'
class YaliBuild(build):
def changeQRCPath(self, ui_file):
py_file = py_file_name(ui_file)
lines = open(py_file, "r").readlines()
replaced = open(py_file, "w")
for line in lines:
if line.find("data_rc") != -1:
continue
replaced.write(line)
def compileUI(self, ui_file):
pyqt_configuration = sipconfig.Configuration()
pyuic_exe = find_executable('py2uic5', pyqt_configuration.default_bin_dir)
if not pyuic_exe:
pyuic_exe = find_executable('py2uic5')
cmd = [pyuic_exe, ui_file, '-o']
cmd.append(py_file_name(ui_file))
#cmd.append("-g \"yali\"")
os.system(' '.join(cmd))
def run(self):
for ui_file in qt_ui_files():
print ui_file
self.compileUI(ui_file)
self.changeQRCPath(ui_file)
build.run(self)
class YaliClean(clean):
def run(self):
clean.run(self)
for ui_file in qt_ui_files():
ui_file = py_file_name(ui_file)
if os.path.exists(ui_file):
os.unlink(ui_file)
if os.path.exists("build"):
shutil.rmtree("build")
class YaliUninstall(Command):
user_options = [ ]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
yali_dir = os.path.join(get_python_lib(), "yali")
if os.path.exists(yali_dir):
print "removing: ", yali_dir
shutil.rmtree(yali_dir)
conf_dir = "/etc/yali"
if os.path.exists(conf_dir):
print "removing: ", conf_dir
shutil.rmtree(conf_dir)
if os.path.exists("/usr/share/applications/yali.desktop"):
print "removing: rest of installation",
os.unlink("/usr/share/applications/yali.desktop")
os.unlink("/usr/bin/yali-bin")
os.unlink("/usr/bin/start-yali")
os.unlink("/usr/bin/bindYali")
os.unlink("/lib/udev/rules.d/70-yali.rules")
class I18nInstall(install):
def run(self):
install.run(self)
for lang in I18N_LANGUAGES:
print "Installing '%s' translations..." % lang
os.popen("msgfmt po/%s.po -o po/%s.mo" % (lang, lang))
if not self.root:
self.root = "/"
destpath = os.path.join(self.root, "usr/share/locale/%s/LC_MESSAGES" % lang)
try:
os.makedirs(destpath)
except:
pass
shutil.copy("po/%s.mo" % lang, os.path.join(destpath, "%s.mo" % I18N_DOMAIN))
setup(name="yali",
version= "3.0.3",
description="YALI (Yet Another Linux Installer)",
long_description="Pisi Linux System Installer.",
license="Latest GNU GPL version",
author="Pisi Linux Developers",
author_email="[email protected]",
url="https://github.com/pisilinux/project",
packages = ['yali', 'yali.gui', 'yali.gui.Ui', 'yali.storage',\
'yali.storage.devices', 'yali.storage.formats', 'yali.storage.library'],
data_files = [('/etc/yali', glob.glob("conf/*")),
('/lib/udev/rules.d', ["70-yali.rules"]),
('/usr/share/applications', ["yali.desktop"])],
scripts = ['yali-bin', 'start-yali', 'bindYali'],
ext_modules = [Extension('yali._sysutils',
sources = ['yali/_sysutils.c'],
libraries = ["ext2fs"],
extra_compile_args = ['-Wall'])],
cmdclass = {
'build' : YaliBuild,
'clean' : YaliClean,
'install': I18nInstall,
'uninstall': YaliUninstall
}
)
|
gpl-2.0
|
qspin/qtaste
|
doc/src/docbkx/scripts/lib/PyGithub/github/PullRequest.py
|
72
|
23575
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Michael Stead <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.PaginatedList
import github.PullRequestMergeStatus
import github.NamedUser
import github.PullRequestPart
import github.PullRequestComment
import github.File
import github.IssueComment
import github.Commit
class PullRequest(github.GithubObject.CompletableGithubObject):
"""
This class represents PullRequests. The reference can be found here http://developer.github.com/v3/pulls/
"""
@property
def additions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._additions)
return self._additions.value
@property
def assignee(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._assignee)
return self._assignee.value
@property
def base(self):
"""
:type: :class:`github.PullRequestPart.PullRequestPart`
"""
self._completeIfNotSet(self._base)
return self._base.value
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def changed_files(self):
"""
:type: integer
"""
self._completeIfNotSet(self._changed_files)
return self._changed_files.value
@property
def closed_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._closed_at)
return self._closed_at.value
@property
def comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits(self):
"""
:type: integer
"""
self._completeIfNotSet(self._commits)
return self._commits.value
@property
def commits_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def deletions(self):
"""
:type: integer
"""
self._completeIfNotSet(self._deletions)
return self._deletions.value
@property
def diff_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._diff_url)
return self._diff_url.value
@property
def head(self):
"""
:type: :class:`github.PullRequestPart.PullRequestPart`
"""
self._completeIfNotSet(self._head)
return self._head.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_url)
return self._issue_url.value
@property
def merge_commit_sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._merge_commit_sha)
return self._merge_commit_sha.value
@property
def mergeable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._mergeable)
return self._mergeable.value
@property
def mergeable_state(self):
"""
:type: string
"""
self._completeIfNotSet(self._mergeable_state)
return self._mergeable_state.value
@property
def merged(self):
"""
:type: bool
"""
self._completeIfNotSet(self._merged)
return self._merged.value
@property
def merged_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._merged_at)
return self._merged_at.value
@property
def merged_by(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._merged_by)
return self._merged_by.value
@property
def milestone(self):
"""
:type: :class:`github.Milestone.Milestone`
"""
self._completeIfNotSet(self._milestone)
return self._milestone.value
@property
def number(self):
"""
:type: integer
"""
self._completeIfNotSet(self._number)
return self._number.value
@property
def patch_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._patch_url)
return self._patch_url.value
@property
def review_comment_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._review_comment_url)
return self._review_comment_url.value
@property
def review_comments(self):
"""
:type: integer
"""
self._completeIfNotSet(self._review_comments)
return self._review_comments.value
@property
def review_comments_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._review_comments_url)
return self._review_comments_url.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def title(self):
"""
:type: string
"""
self._completeIfNotSet(self._title)
return self._title.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def create_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.create_review_comment(body, commit_id, path, position)
def create_review_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(body, (str, unicode)), body
assert isinstance(commit_id, github.Commit.Commit), commit_id
assert isinstance(path, (str, unicode)), path
assert isinstance(position, (int, long)), position
post_parameters = {
"body": body,
"commit_id": commit_id._identity,
"path": path,
"position": position,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/comments",
input=post_parameters
)
return github.PullRequestComment.PullRequestComment(self._requester, headers, data, completed=True)
def create_issue_comment(self, body):
"""
:calls: `POST /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:param body: string
:rtype: :class:`github.IssueComment.IssueComment`
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
self._parentUrl(self._parentUrl(self.url)) + "/issues/" + str(self.number) + "/comments",
input=post_parameters
)
return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
def edit(self, title=github.GithubObject.NotSet, body=github.GithubObject.NotSet, state=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/pulls/:number <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param state: string
:rtype: None
"""
assert title is github.GithubObject.NotSet or isinstance(title, (str, unicode)), title
assert body is github.GithubObject.NotSet or isinstance(body, (str, unicode)), body
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
post_parameters = dict()
if title is not github.GithubObject.NotSet:
post_parameters["title"] = title
if body is not github.GithubObject.NotSet:
post_parameters["body"] = body
if state is not github.GithubObject.NotSet:
post_parameters["state"] = state
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param id: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_review_comment(id)
def get_review_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_
:param id: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._parentUrl(self.url) + "/comments/" + str(id)
)
return github.PullRequestComment.PullRequestComment(self._requester, headers, data, completed=True)
def get_comments(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return self.get_review_comments()
def get_review_comments(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
return github.PaginatedList.PaginatedList(
github.PullRequestComment.PullRequestComment,
self._requester,
self.url + "/comments",
None
)
def get_commits(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/commits <http://developer.github.com/v3/pulls>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Commit.Commit`
"""
return github.PaginatedList.PaginatedList(
github.Commit.Commit,
self._requester,
self.url + "/commits",
None
)
def get_files(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/files <http://developer.github.com/v3/pulls>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.File.File`
"""
return github.PaginatedList.PaginatedList(
github.File.File,
self._requester,
self.url + "/files",
None
)
def get_issue_comment(self, id):
"""
:calls: `GET /repos/:owner/:repo/issues/comments/:id <http://developer.github.com/v3/issues/comments>`_
:param id: integer
:rtype: :class:`github.IssueComment.IssueComment`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
self._parentUrl(self._parentUrl(self.url)) + "/issues/comments/" + str(id)
)
return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
def get_issue_comments(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueComment.IssueComment`
"""
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self._parentUrl(self._parentUrl(self.url)) + "/issues/" + str(self.number) + "/comments",
None
)
def is_merged(self):
"""
:calls: `GET /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:rtype: bool
"""
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/merge"
)
return status == 204
def merge(self, commit_message=github.GithubObject.NotSet):
"""
:calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_
:param commit_message: string
:rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus`
"""
assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message
post_parameters = dict()
if commit_message is not github.GithubObject.NotSet:
post_parameters["commit_message"] = commit_message
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/merge",
input=post_parameters
)
return github.PullRequestMergeStatus.PullRequestMergeStatus(self._requester, headers, data, completed=True)
def _initAttributes(self):
self._additions = github.GithubObject.NotSet
self._assignee = github.GithubObject.NotSet
self._base = github.GithubObject.NotSet
self._body = github.GithubObject.NotSet
self._changed_files = github.GithubObject.NotSet
self._closed_at = github.GithubObject.NotSet
self._comments = github.GithubObject.NotSet
self._comments_url = github.GithubObject.NotSet
self._commits = github.GithubObject.NotSet
self._commits_url = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._deletions = github.GithubObject.NotSet
self._diff_url = github.GithubObject.NotSet
self._head = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue_url = github.GithubObject.NotSet
self._merge_commit_sha = github.GithubObject.NotSet
self._mergeable = github.GithubObject.NotSet
self._mergeable_state = github.GithubObject.NotSet
self._merged = github.GithubObject.NotSet
self._merged_at = github.GithubObject.NotSet
self._merged_by = github.GithubObject.NotSet
self._milestone = github.GithubObject.NotSet
self._number = github.GithubObject.NotSet
self._patch_url = github.GithubObject.NotSet
self._review_comment_url = github.GithubObject.NotSet
self._review_comments = github.GithubObject.NotSet
self._review_comments_url = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._title = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "additions" in attributes: # pragma no branch
self._additions = self._makeIntAttribute(attributes["additions"])
if "assignee" in attributes: # pragma no branch
self._assignee = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["assignee"])
if "base" in attributes: # pragma no branch
self._base = self._makeClassAttribute(github.PullRequestPart.PullRequestPart, attributes["base"])
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "changed_files" in attributes: # pragma no branch
self._changed_files = self._makeIntAttribute(attributes["changed_files"])
if "closed_at" in attributes: # pragma no branch
self._closed_at = self._makeDatetimeAttribute(attributes["closed_at"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits" in attributes: # pragma no branch
self._commits = self._makeIntAttribute(attributes["commits"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "deletions" in attributes: # pragma no branch
self._deletions = self._makeIntAttribute(attributes["deletions"])
if "diff_url" in attributes: # pragma no branch
self._diff_url = self._makeStringAttribute(attributes["diff_url"])
if "head" in attributes: # pragma no branch
self._head = self._makeClassAttribute(github.PullRequestPart.PullRequestPart, attributes["head"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_url" in attributes: # pragma no branch
self._issue_url = self._makeStringAttribute(attributes["issue_url"])
if "merge_commit_sha" in attributes: # pragma no branch
self._merge_commit_sha = self._makeStringAttribute(attributes["merge_commit_sha"])
if "mergeable" in attributes: # pragma no branch
self._mergeable = self._makeBoolAttribute(attributes["mergeable"])
if "mergeable_state" in attributes: # pragma no branch
self._mergeable_state = self._makeStringAttribute(attributes["mergeable_state"])
if "merged" in attributes: # pragma no branch
self._merged = self._makeBoolAttribute(attributes["merged"])
if "merged_at" in attributes: # pragma no branch
self._merged_at = self._makeDatetimeAttribute(attributes["merged_at"])
if "merged_by" in attributes: # pragma no branch
self._merged_by = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["merged_by"])
if "milestone" in attributes: # pragma no branch
self._milestone = self._makeClassAttribute(github.Milestone.Milestone, attributes["milestone"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "patch_url" in attributes: # pragma no branch
self._patch_url = self._makeStringAttribute(attributes["patch_url"])
if "review_comment_url" in attributes: # pragma no branch
self._review_comment_url = self._makeStringAttribute(attributes["review_comment_url"])
if "review_comments" in attributes: # pragma no branch
self._review_comments = self._makeIntAttribute(attributes["review_comments"])
if "review_comments_url" in attributes: # pragma no branch
self._review_comments_url = self._makeStringAttribute(attributes["review_comments_url"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
|
lgpl-3.0
|
petteyg/intellij-community
|
python/lib/Lib/site-packages/django/utils/unittest/__init__.py
|
571
|
3069
|
"""
unittest2
unittest2 is a backport of the new features added to the unittest testing
framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
To use unittest2 instead of unittest simply replace ``import unittest`` with
``import unittest2``.
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
import sys
# Django hackery to load the appropriate version of unittest
try:
# check the system path first
from unittest2 import *
except ImportError:
if sys.version_info >= (2,7):
# unittest2 features are native in Python 2.7
from unittest import *
else:
# otherwise use our bundled version
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', '__version__', 'collector']
__version__ = '0.5.1'
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from django.utils.unittest.collector import collector
from django.utils.unittest.result import TestResult
from django.utils.unittest.case import \
TestCase, FunctionTestCase, SkipTest, skip, skipIf,\
skipUnless, expectedFailure
from django.utils.unittest.suite import BaseTestSuite, TestSuite
from django.utils.unittest.loader import \
TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,\
findTestCases
from django.utils.unittest.main import TestProgram, main, main_
from django.utils.unittest.runner import TextTestRunner, TextTestResult
try:
from django.utils.unittest.signals import\
installHandler, registerResult, removeResult, removeHandler
except ImportError:
# Compatibility with platforms that don't have the signal module
pass
else:
__all__.extend(['installHandler', 'registerResult', 'removeResult',
'removeHandler'])
# deprecated
_TextTestResult = TextTestResult
__unittest = True
|
apache-2.0
|
infoclock/OlympicTracker
|
allauthdemo/demo/views.py
|
1
|
3564
|
import math
from django.shortcuts import render
from django.views import generic
from allauthdemo.auth.models import DemoUser
from allauthdemo.demo.models import Problem
from fileupload.models import Submission
from allauthdemo.demo.models import ContestParticipation
class ProblemView(generic.ListView):
model = Problem
template_name = '../templates/bases/problem-list.html'
context_object_name = 'problems_retrieved'
def get_queryset(self):
return Problem.objects.all()
class SubmissionView(generic.ListView):
model = Submission
template_name = '../templates/bases/submission-list.html'
context_object_name = 'submissions'
def get_queryset(self):
good_result = []
bad_result = []
submissions = Submission.objects.filter(user=self.request.user)
for problem in Problem.objects.all():
submission = submissions.filter(problem=problem).order_by('-last_modified').first()
if submission:
good_result.append(submission)
else:
bad_result.append(problem)
return {'good': good_result, 'bad':bad_result}
class RankingView(generic.TemplateView):
template_name = '../templates/bases/ranking-list.html'
def get_context_data(self, **kwargs):
context = super(RankingView, self).get_context_data(**kwargs)
context['minimum'] = 13 * 3.0
users = []
for user in DemoUser.objects.all():
d = {}
d['name'] = user.get_full_name()
d['id'] = user.id
d['codeforces_handle'] = user.codeforces_handle
d['echivaleaza'] = user.is_participating_2016
d['codeforces_points'] = sum([x.score for x in ContestParticipation.objects.filter(user=user)])
d['codeforces_grade'] = round(min(d['codeforces_points'] / context['minimum'] * 10, 10.0), 1)
d['1st'] = user.problems_solved_first_exam
d['2nd'] = user.problems_solved_second_exam
best_solved = max(user.problems_solved_first_exam, user.problems_solved_second_exam)
d['exam_grade'] = 0
if best_solved > 4:
d['exam_grade'] = 10.0
elif best_solved < 2:
d['exam_grade'] = '-'
else:
distribution = {
# no solved problems - grade
2: 5,
3: 7,
4: 9,
}
d['exam_grade'] = distribution[best_solved]
if d['exam_grade'] != '-':
d['final_grade'] = (d['exam_grade'] + d['codeforces_grade']) / 2
else:
d['final_grade'] = 0
if d['echivaleaza']:
users.append(d)
context['users'] = sorted(users, key=lambda x: (x['final_grade'], x['codeforces_points']), reverse=True)
return context
class ParticipantView(generic.TemplateView):
template_name = '../templates/bases/participant.html'
def get_context_data(self, **kwargs):
context = super(ParticipantView, self).get_context_data(**kwargs)
user = DemoUser.objects.get(pk=int(context['user_id']))
context['name'] = user.get_full_name()
res = []
for participation in ContestParticipation.objects.filter(user=user):
p = {}
p['name'] = participation.name
p['place'] = participation.place
p['score'] = participation.score
res.append(p)
context['participations'] = res
return context
|
mit
|
Joev-/HoNCore
|
honcore/requester.py
|
1
|
3523
|
"""
HoNCore. Python library providing connectivity and functionality
with HoN's chat server.
"""
import hashlib, urllib2
from exceptions import *
from httplib import BadStatusLine
""" Sends requests to the HoN master servers.
These are just basic HTTP get requests which return serialised php.
A version of '2.1.0' forced the connection to be dropped by the server, changing it back to a 4 'digit' version fixes. Guess the version string
must be 4 digits..
TODO:
* If sending a logout request times out then it's a bit... confusing as to what's going on. Could need cleaning up.
"""
_config_defaults = {
"masterserver" : "http://masterserver.hon.s2games.com/",
"basicserver" : "http://heroesofnewerth.com/",
"version": "2.5.7.0"
}
class Requester:
def __init__(self):
self.config = _config_defaults
def httpget(self, base, url):
url = base + url
header = { 'User-Agent' : "S2 Games/Heroes of Newerth/%s/lac/x86-biarch" % self.config["version"]}
req = urllib2.Request(url, None, header)
try:
response = urllib2.urlopen(req, timeout=20)
return response.read()
except urllib2.HTTPError, e:
# TODO : Find out what errors to catch.
print e.code
print e.read()
raise MasterServerError(107)
except urllib2.URLError, e:
code = e.reason[0]
if code == 104: # Connection reset by peer
raise MasterServerError(110)
elif code == 111:
raise MasterServerError(111)
elif code == "timed out":
raise MasterServerError(112)
elif code == -5:
raise MasterServerError(114)
else:
print e
print code
raise MasterServerError(107)
except BadStatusLine, e:
raise MasterServerError(109, e)
def httpost(self, url):
""" WHY """
pass
def login(self, username, password):
""" Requests basic information about the user's account """
url = "client_requester.php?f=auth&login=%s&password=%s" % (username, password)
return self.httpget(self.config['masterserver'], url)
def logout(self, cookie):
""" Sends a logout 'request'.
Returns a:2:{i:0;b:1;s:12:"client_disco";s:2:"OK";} on a successful logout.
"""
url = "client_requester.php?f=logout&cookie=%s" % cookie
return self.httpget(self.config['masterserver'], url)
def motd(self):
""" Requests the message of the day list from the server.
Contains the last 6 message of the day(s??) Messages of the day?.
"""
url = "/gen/client_motd3.php?data=retrieve"
return self.httpget(self.config['basicserver'], url)
def server_list(self, cookie, gametype):
pass
def nick2id(self, nickname):
pass
def new_buddy(self, cookie, aid, bid):
pass
def remove_buddy(self, cookie, aid, bid):
pass
def new_banned(self, cookie, aid, bid, reason):
pass
def remove_banned(self, cookie, aid, bid, reason):
pass
def new_ignored(self, cookie, aid, iid, reason):
pass
def remove_ignored(self, cookie, aid, iid, reason):
pass
def stats_request(self, aid):
pass
def stats_request_ranked(self, aid):
pass
def patcher(self, version, os, arch):
pass
|
unlicense
|
NaturalGIS/QGIS
|
tests/src/python/test_qgsnullsymbolrenderer.py
|
45
|
3186
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsnullsymbolrenderer.py
-----------------------------
Date : April 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'April 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsNullSymbolRenderer)
from qgis.testing import start_app, unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsNullSymbolRenderer(unittest.TestCase):
def setUp(self):
self.iface = get_iface()
myShpFile = os.path.join(TEST_DATA_DIR, 'polys.shp')
self.layer = QgsVectorLayer(myShpFile, 'Polys', 'ogr')
QgsProject.instance().addMapLayer(self.layer)
self.renderer = QgsNullSymbolRenderer()
self.layer.setRenderer(self.renderer)
rendered_layers = [self.layer]
self.mapsettings = self.iface.mapCanvas().mapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-163, 22, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def testRender(self):
# test no features are rendered
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('null_renderer')
renderchecker.setControlName('expected_nullrenderer_render')
result = renderchecker.runTest('nullrenderer_render')
assert result
def testSelected(self):
# select a feature and render
self.layer.select([1, 2, 3])
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('null_renderer')
renderchecker.setControlName('expected_nullrenderer_selected')
result = renderchecker.runTest('nullrenderer_selected')
assert result
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
gazpachoking/Flexget
|
flexget/components/notify/notifiers/email.py
|
4
|
7177
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.utils import text_to_native_str
import logging
import smtplib
import socket
import getpass
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTPAuthenticationError, SMTPServerDisconnected, SMTPSenderRefused
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
plugin_name = 'email'
log = logging.getLogger(plugin_name)
class EmailNotifier(object):
"""
Send an e-mail with the list of all succeeded (downloaded) entries.
Configuration options
=============== ===================================================================
Option Description
=============== ===================================================================
from The email address from which the email will be sent (required)
to The email address of the recipient (required)
smtp_host The host of the smtp server
smtp_port The port of the smtp server
smtp_username The username to use to connect to the smtp server
smtp_password The password to use to connect to the smtp server
smtp_tls Should we use TLS to connect to the smtp server
smtp_ssl Should we use SSL to connect to the smtp server
=============== ===================================================================
Config basic example::
notify:
entries:
via:
- email:
from: [email protected]
to: [email protected]
smtp_host: smtp.host.com
Config example with smtp login::
notify:
entries:
via:
- email:
from: [email protected]
to: [email protected]
smtp_host: smtp.host.com
smtp_port: 25
smtp_login: true
smtp_username: my_smtp_login
smtp_password: my_smtp_password
smtp_tls: true
GMAIL example::
notify:
entries:
via:
- email:
from: [email protected]
to: [email protected]
smtp_host: smtp.gmail.com
smtp_port: 587
smtp_login: true
smtp_username: gmailUser
smtp_password: gmailPassword
smtp_tls: true
Default values for the config elements::
notify:
entries:
via:
- email:
smtp_host: localhost
smtp_port: 25
smtp_login: False
smtp_username:
smtp_password:
smtp_tls: False
smtp_ssl: False
"""
def __init__(self):
self.mail_server = None
self.host = None
self.port = None
self.username = None
self.password = None
self.ssl = None
self.tls = None
def connect_to_smtp_server(self, config):
self.host = config['smtp_host']
self.port = config['smtp_port']
self.ssl = config['smtp_ssl']
self.tls = config['smtp_tls']
self.username = config.get('smtp_username')
self.password = config.get('smtp_password')
try:
log.debug('connecting to smtp server %s:%s', self.host, self.port)
self.mail_server = smtplib.SMTP_SSL if self.ssl else smtplib.SMTP
self.mail_server = self.mail_server(self.host, self.port)
if self.tls:
self.mail_server.ehlo()
self.mail_server.starttls()
self.mail_server.ehlo()
except (socket.error, OSError) as e:
raise PluginWarning(str(e))
try:
if self.username:
# Forcing to use `str` type
log.debug('logging in to smtp server using username: %s', self.username)
self.mail_server.login(
text_to_native_str(self.username), text_to_native_str(self.password)
)
except (IOError, SMTPAuthenticationError) as e:
raise PluginWarning(str(e))
schema = {
'type': 'object',
'properties': {
'to': one_or_more({'type': 'string', 'format': 'email'}),
'from': {
'type': 'string',
'default': '[email protected]',
'format': 'email',
},
'autofrom': {'type': 'boolean', 'default': False},
'smtp_host': {'type': 'string', 'default': 'localhost'},
'smtp_port': {'type': 'integer', 'default': 25},
'smtp_username': {'type': 'string'},
'smtp_password': {'type': 'string'},
'smtp_tls': {'type': 'boolean', 'default': False},
'smtp_ssl': {'type': 'boolean', 'default': False},
'html': {'type': 'boolean', 'default': False},
},
'required': ['to'],
'dependencies': {
'smtp_username': ['smtp_password'],
'smtp_password': ['smtp_username'],
'smtp_ssl': ['smtp_tls'],
},
'additionalProperties': False,
}
def notify(self, title, message, config):
"""
Send an email notification
:param str message: message body
:param str title: message subject
:param dict config: email plugin config
"""
if not isinstance(config['to'], list):
config['to'] = [config['to']]
email = MIMEMultipart('alternative')
email['To'] = ','.join(config['to'])
email['From'] = (
getpass.getuser() + '@' + socket.getfqdn() if config['autofrom'] else config['from']
)
email['Subject'] = title
email['Date'] = formatdate(localtime=True)
content_type = 'html' if config['html'] else 'plain'
email.attach(MIMEText(message.encode('utf-8'), content_type, _charset='utf-8'))
# Making sure mail server connection will remain open per host or username
# (in case several mail servers are used in the same task)
if not self.mail_server or not (
self.host == config['smtp_host'] and self.username == config.get('smtp_username')
):
self.connect_to_smtp_server(config)
connection_error = None
while True:
try:
self.mail_server.sendmail(email['From'], config['to'], email.as_string())
break
except (SMTPServerDisconnected, SMTPSenderRefused) as e:
if not connection_error:
self.connect_to_smtp_server(config)
connection_error = e
else:
raise PluginWarning('Could not connect to SMTP server: %s' % str(e))
@event('plugin.register')
def register_plugin():
plugin.register(EmailNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
|
mit
|
Yen-Chung-En/w16b_test
|
static/Brython3.1.3-20150514-095342/Lib/xml/sax/expatreader.py
|
870
|
14659
|
"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error as e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
bs = self._source.getByteStream()
if bs is not None:
bs.close()
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax.saxutils
p = create_parser()
p.setContentHandler(xml.sax.saxutils.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml")
|
agpl-3.0
|
mancoast/CPythonPyc_test
|
fail/323_test_socket.py
|
1
|
76285
|
#!/usr/bin/env python3
import unittest
from test import support
import errno
import io
import socket
import select
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
try:
import fcntl
except ImportError:
fcntl = False
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True
if that has been possible."""
try:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.bind((host, port))
except (socket.error, socket.gaierror):
return False
else:
sock.close()
return True
def linux_version():
try:
# platform.release() is something like '2.6.33.7-desktop-2mnb'
version_string = platform.release().split('-')[0]
return tuple(map(int, version_string.split('.')))
except ValueError:
return 0, 0, 0
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf8') ## test unicode string and carriage return
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
self.__setUp()
if not self.server_ready.is_set():
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
self.assertTrue(repr(s).startswith("<socket.socket object"))
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
def raise_error(*args, **kwargs):
raise socket.error
def raise_herror(*args, **kwargs):
raise socket.herror
def raise_gaierror(*args, **kwargs):
raise socket.gaierror
self.assertRaises(socket.error, raise_error,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_herror,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_gaierror,
"Error raising socket exception.")
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
if hasattr(sys, "getrefcount"):
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith('linux') or
sys.platform.startswith('freebsd') or
sys.platform.startswith('netbsd') or
sys.platform == 'darwin'):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf it it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def testIPv4_inet_aton_fourbytes(self):
if not hasattr(socket, 'inet_aton'):
return # No inet_aton, nothing to check
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
def testIPv4toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(socket.error, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
def testIPv6toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(socket.error, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
def testStringToIPv4(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(socket.error, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
def testStringToIPv6(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(socket.error, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
host = '0.0.0.0'
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
sock = socket.socket()
try:
self.assertRaises(OverflowError, sock.bind, (host, big_port))
self.assertRaises(OverflowError, sock.bind, (host, neg_port))
sock.bind((host, port))
finally:
sock.close()
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if SUPPORTS_IPV6:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
for family, _, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(socket.error, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
support.requires('network')
# these should all be successful
socket.gethostbyname('испытание.python.org')
socket.gethostbyname_ex('испытание.python.org')
socket.getaddrinfo('испытание.python.org',0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * (1024**2))
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall, b"x" * (1024**2))
finally:
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def testListenBacklog0(self):
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
# backlog = 0
srv.listen(0)
srv.close()
@unittest.skipUnless(SUPPORTS_IPV6, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
('::1',0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, ('::1', 0, -10))
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertRaises(socket.error, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
if hasattr(socket, "SOCK_NONBLOCK"):
def testInitNonBlocking(self):
v = linux_version()
if v < (2, 6, 28):
self.skipTest("Linux kernel 2.6.28 or higher required, not %s"
% ".".join(map(str, v)))
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
# actual testing
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(IOError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(socket.error, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise socket.error(errno.EINTR)
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(socket.error, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * (1024 ** 2)
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf8')
write_mode = 'w'
write_msg = MSG.decode('utf8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(socket.error) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
if not hasattr(signal, "alarm"):
return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(socket.error, Exception))
self.assertTrue(issubclass(socket.herror, socket.error))
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen(1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(socket.error, s.bind, address)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
if support.verbose:
print("TIPC module is not loaded, please 'sudo modprobe tipc'")
return False
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(socket.error, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(socket.error, sock.sendall, b'foo')
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@unittest.skipUnless(fcntl, "module fcntl not available")
class CloexecConstantTest(unittest.TestCase):
def test_SOCK_CLOEXEC(self):
v = linux_version()
if v < (2, 6, 28):
self.skipTest("Linux kernel 2.6.28 or higher required, not %s"
% ".".join(map(str, v)))
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertTrue(fcntl.fcntl(s, fcntl.F_GETFD) & fcntl.FD_CLOEXEC)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
def test_SOCK_NONBLOCK(self):
v = linux_version()
if v < (2, 6, 28):
self.skipTest("Linux kernel 2.6.28 or higher required, not %s"
% ".".join(map(str, v)))
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
CloexecConstantTest,
NonblockConstantTest
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
if sys.platform == 'linux2':
tests.append(TestLinuxAbstractNamespace)
if isTipcAvailable():
tests.append(TIPCTest)
tests.append(TIPCThreadableTest)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
melbit-kevinwessel/ansible-modules-extras
|
packaging/language/npm.py
|
73
|
8566
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chris Hoffman <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: npm
short_description: Manage node.js packages with npm
description:
- Manage node.js packages with Node Package Manager (npm)
version_added: 1.2
author: "Chris Hoffman (@chrishoffman)"
options:
name:
description:
- The name of a node.js library to install
required: false
path:
description:
- The base path where to install the node.js libraries
required: false
version:
description:
- The version to be installed
required: false
global:
description:
- Install the node.js library globally
required: false
default: no
choices: [ "yes", "no" ]
executable:
description:
- The executable location for npm.
- This is useful if you are using a version manager, such as nvm
required: false
ignore_scripts:
description:
- Use the --ignore-scripts flag when installing.
required: false
choices: [ "yes", "no" ]
default: no
version_added: "1.8"
production:
description:
- Install dependencies in production mode, excluding devDependencies
required: false
choices: [ "yes", "no" ]
default: no
registry:
description:
- The registry to install modules from.
required: false
version_added: "1.6"
state:
description:
- The state of the node.js library
required: false
default: present
choices: [ "present", "absent", "latest" ]
'''
EXAMPLES = '''
description: Install "coffee-script" node.js package.
- npm: name=coffee-script path=/app/location
description: Install "coffee-script" node.js package on version 1.6.1.
- npm: name=coffee-script version=1.6.1 path=/app/location
description: Install "coffee-script" node.js package globally.
- npm: name=coffee-script global=yes
description: Remove the globally package "coffee-script".
- npm: name=coffee-script global=yes state=absent
description: Install "coffee-script" node.js package from custom registry.
- npm: name=coffee-script registry=http://registry.mysite.com
description: Install packages based on package.json.
- npm: path=/app/location
description: Update packages based on package.json to their latest version.
- npm: path=/app/location state=latest
description: Install packages based on package.json using the npm installed with nvm v0.10.1.
- npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present
'''
import os
try:
import json
except ImportError:
import simplejson as json
class Npm(object):
def __init__(self, module, **kwargs):
self.module = module
self.glbl = kwargs['glbl']
self.name = kwargs['name']
self.version = kwargs['version']
self.path = kwargs['path']
self.registry = kwargs['registry']
self.production = kwargs['production']
self.ignore_scripts = kwargs['ignore_scripts']
if kwargs['executable']:
self.executable = kwargs['executable'].split(' ')
else:
self.executable = [module.get_bin_path('npm', True)]
if kwargs['version']:
self.name_version = self.name + '@' + self.version
else:
self.name_version = self.name
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = self.executable + args
if self.glbl:
cmd.append('--global')
if self.production:
cmd.append('--production')
if self.ignore_scripts:
cmd.append('--ignore-scripts')
if self.name:
cmd.append(self.name_version)
if self.registry:
cmd.append('--registry')
cmd.append(self.registry)
#If path is specified, cd into that path and run the command.
cwd = None
if self.path:
self.path = os.path.abspath(os.path.expanduser(self.path))
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="path %s is not a directory" % self.path)
cwd = self.path
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out
return ''
def list(self):
cmd = ['list', '--json']
installed = list()
missing = list()
data = json.loads(self._exec(cmd, True, False))
if 'dependencies' in data:
for dep in data['dependencies']:
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
missing.append(dep)
elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
missing.append(dep)
else:
installed.append(dep)
if self.name and self.name not in installed:
missing.append(self.name)
#Named dependency not installed
else:
missing.append(self.name)
return installed, missing
def install(self):
return self._exec(['install'])
def update(self):
return self._exec(['update'])
def uninstall(self):
return self._exec(['uninstall'])
def list_outdated(self):
outdated = list()
data = self._exec(['outdated'], True, False)
for dep in data.splitlines():
if dep:
# node.js v0.10.22 changed the `npm outdated` module separator
# from "@" to " ". Split on both for backwards compatibility.
pkg, other = re.split('\s|@', dep, 1)
outdated.append(pkg)
return outdated
def main():
arg_spec = dict(
name=dict(default=None),
path=dict(default=None),
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default='no', type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
glbl = module.params['global']
production = module.params['production']
executable = module.params['executable']
registry = module.params['registry']
state = module.params['state']
ignore_scripts = module.params['ignore_scripts']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \
executable=executable, registry=registry, ignore_scripts=ignore_scripts)
changed = False
if state == 'present':
installed, missing = npm.list()
if len(missing):
changed = True
npm.install()
elif state == 'latest':
installed, missing = npm.list()
outdated = npm.list_outdated()
if len(missing) or len(outdated):
changed = True
npm.install()
else: #absent
installed, missing = npm.list()
if name in installed:
changed = True
npm.uninstall()
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
jschnurr/scrapyscript
|
tests/test_scrapyscript.py
|
1
|
2302
|
import unittest
from scrapy.settings import Settings
from scrapy.spiders import Spider
import scrapy
from scrapyscript import Job, Processor, ScrapyScriptException
class MySpider(Spider):
name = 'myspider'
def start_requests(self):
yield scrapy.Request(self.url)
def parse(self, response):
title = response.xpath('//title/text()').extract()
ret = []
ret.append({'bot': self.settings['BOT_NAME']})
ret.append({'title': title})
return ret
class BadSpider(Spider):
name = 'badspider'
def start_requests(self):
yield scrapy.Request('http://www.python.org')
def parse(self, response):
return True
class ParamReturnSpider(Spider):
name = 'myspider'
start_urls = ['http://www.python.org']
def __init__(self, category=None, *args, **kwargs):
super(ParamReturnSpider, self).__init__(*args, **kwargs)
self.category = category
def parse(self, response):
return dict(category=self.category, fruit=self.fruit)
class ScrapyScriptTests(unittest.TestCase):
def test_create_valid_job(self):
spider = MySpider
job = Job(spider)
self.assertIsInstance(job, Job)
def test_parameters_passed_to_spider(self):
spider = ParamReturnSpider
job = Job(spider, 'cat1', fruit='banana')
result = Processor().run(job)
self.assertEqual(result, [dict(category='cat1', fruit='banana')])
def test_no_spider_provided(self):
self.assertRaises(TypeError, Job)
def test_settings_flow_through_to_spider(self):
settings = Settings()
settings['BOT_NAME'] = 'alpha'
job = Job(MySpider, url='http://www.python.org')
results = Processor(settings=settings).run(job)
self.assertIn({'bot': 'alpha'}, results)
def test_mulitple_jobs(self):
jobs = [
Job(MySpider, url='http://www.python.org'),
Job(MySpider, url='http://www.github.com')
]
results = Processor().run(jobs)
self.assertEqual(len(results), 4)
def test_bad_return_value(self):
job = Job(BadSpider, url='http://www.python.org')
results = Processor().run(job)
self.assertEqual(results, [])
if __name__ == '__main__':
unittest.main()
|
mit
|
bkrukowski/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/config/ports_mock.py
|
121
|
2482
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MockPort(object):
def name(self):
return "MockPort"
def check_webkit_style_command(self):
return ["mock-check-webkit-style"]
def update_webkit_command(self, non_interactive=False):
return ["mock-update-webkit"]
def build_webkit_command(self, build_style=None):
return ["mock-build-webkit"]
def prepare_changelog_command(self):
return ['mock-prepare-ChangeLog']
def run_python_unittests_command(self):
return ['mock-test-webkitpy']
def run_perl_unittests_command(self):
return ['mock-test-webkitperl']
def run_javascriptcore_tests_command(self):
return ['mock-run-javacriptcore-tests']
def run_webkit_unit_tests_command(self):
return ['mock-run-webkit-unit-tests']
def run_webkit_tests_command(self):
return ['mock-run-webkit-tests']
def run_bindings_tests_command(self):
return ['mock-run-bindings-tests']
|
bsd-3-clause
|
cbmoore/statsmodels
|
statsmodels/sandbox/tools/tools_pca.py
|
39
|
4142
|
# -*- coding: utf-8 -*-
"""Principal Component Analysis
Created on Tue Sep 29 20:11:23 2009
Author: josef-pktd
TODO : add class for better reuse of results
"""
import numpy as np
def pca(data, keepdim=0, normalize=0, demean=True):
'''principal components with eigenvector decomposition
similar to princomp in matlab
Parameters
----------
data : ndarray, 2d
data with observations by rows and variables in columns
keepdim : integer
number of eigenvectors to keep
if keepdim is zero, then all eigenvectors are included
normalize : boolean
if true, then eigenvectors are normalized by sqrt of eigenvalues
demean : boolean
if true, then the column mean is subtracted from the data
Returns
-------
xreduced : ndarray, 2d, (nobs, nvars)
projection of the data x on the kept eigenvectors
factors : ndarray, 2d, (nobs, nfactors)
factor matrix, given by np.dot(x, evecs)
evals : ndarray, 2d, (nobs, nfactors)
eigenvalues
evecs : ndarray, 2d, (nobs, nfactors)
eigenvectors, normalized if normalize is true
Notes
-----
See Also
--------
pcasvd : principal component analysis using svd
'''
x = np.array(data)
#make copy so original doesn't change, maybe not necessary anymore
if demean:
m = x.mean(0)
else:
m = np.zeros(x.shape[1])
x -= m
# Covariance matrix
xcov = np.cov(x, rowvar=0)
# Compute eigenvalues and sort into descending order
evals, evecs = np.linalg.eig(xcov)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:,indices]
evals = evals[indices]
if keepdim > 0 and keepdim < x.shape[1]:
evecs = evecs[:,:keepdim]
evals = evals[:keepdim]
if normalize:
#for i in range(shape(evecs)[1]):
# evecs[:,i] / linalg.norm(evecs[:,i]) * sqrt(evals[i])
evecs = evecs/np.sqrt(evals) #np.sqrt(np.dot(evecs.T, evecs) * evals)
# get factor matrix
#x = np.dot(evecs.T, x.T)
factors = np.dot(x, evecs)
# get original data from reduced number of components
#xreduced = np.dot(evecs.T, factors) + m
#print x.shape, factors.shape, evecs.shape, m.shape
xreduced = np.dot(factors, evecs.T) + m
return xreduced, factors, evals, evecs
def pcasvd(data, keepdim=0, demean=True):
'''principal components with svd
Parameters
----------
data : ndarray, 2d
data with observations by rows and variables in columns
keepdim : integer
number of eigenvectors to keep
if keepdim is zero, then all eigenvectors are included
demean : boolean
if true, then the column mean is subtracted from the data
Returns
-------
xreduced : ndarray, 2d, (nobs, nvars)
projection of the data x on the kept eigenvectors
factors : ndarray, 2d, (nobs, nfactors)
factor matrix, given by np.dot(x, evecs)
evals : ndarray, 2d, (nobs, nfactors)
eigenvalues
evecs : ndarray, 2d, (nobs, nfactors)
eigenvectors, normalized if normalize is true
See Also
-------
pca : principal component analysis using eigenvector decomposition
Notes
-----
This doesn't have yet the normalize option of pca.
'''
nobs, nvars = data.shape
#print nobs, nvars, keepdim
x = np.array(data)
#make copy so original doesn't change
if demean:
m = x.mean(0)
else:
m = 0
## if keepdim == 0:
## keepdim = nvars
## "print reassigning keepdim to max", keepdim
x -= m
U, s, v = np.linalg.svd(x.T, full_matrices=1)
factors = np.dot(U.T, x.T).T #princomps
if keepdim:
xreduced = np.dot(factors[:,:keepdim], U[:,:keepdim].T) + m
else:
xreduced = data
keepdim = nvars
"print reassigning keepdim to max", keepdim
# s = evals, U = evecs
# no idea why denominator for s is with minus 1
evals = s**2/(x.shape[0]-1)
#print keepdim
return xreduced, factors[:,:keepdim], evals[:keepdim], U[:,:keepdim] #, v
__all__ = ['pca', 'pcasvd']
|
bsd-3-clause
|
Metaswitch/calico-neutron
|
neutron/plugins/cisco/l3/rpc/devices_cfgagent_rpc_cb.py
|
16
|
2020
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class DeviceCfgRpcCallbackMixin(object):
"""Mixin for Cisco cfg agent device reporting rpc support."""
def report_non_responding_hosting_devices(self, context, host,
hosting_device_ids):
"""Report that a hosting device cannot be contacted.
@param: context - contains user information
@param: host - originator of callback
@param: hosting_device_ids - list of non-responding hosting devices
@return: -
"""
self._l3plugin.handle_non_responding_hosting_devices(
context, host, hosting_device_ids)
def register_for_duty(self, context, host):
"""Report that Cisco cfg agent is ready for duty.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
@param: context - contains user information
@param: host - originator of callback
@return: True if succesfully registered, False if not successfully
registered, None if no handler found
If unsuccessful the agent should retry registration a few
seconds later
"""
# schedule any non-handled hosting devices
return self._l3plugin.auto_schedule_hosting_devices(context, host)
|
apache-2.0
|
solarpermit/solarpermit
|
website/migrations/0083_auto__add_field_view_view_type__add_field_view_user__add_field_view_ju.py
|
1
|
57706
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'View.view_type'
db.add_column('website_view', 'view_type',
self.gf('django.db.models.fields.CharField')(default='', max_length=8, db_index=True, blank=True),
keep_default=False)
# Adding field 'View.user'
db.add_column('website_view', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True),
keep_default=False)
# Adding field 'View.jurisdiction'
db.add_column('website_view', 'jurisdiction',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'View.view_type'
db.delete_column('website_view', 'view_type')
# Deleting field 'View.user'
db.delete_column('website_view', 'user_id')
# Deleting field 'View.jurisdiction'
db.delete_column('website_view', 'jurisdiction_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.view': {
'Meta': {'object_name': 'View'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.vieworgs': {
'Meta': {'object_name': 'ViewOrgs'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.viewquestions': {
'Meta': {'object_name': 'ViewQuestions'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
bsd-3-clause
|
chunywang/crosswalk-test-suite
|
embeddingapi/embedding-build-android-tests/gradle/crosswalk_online_gradle_build_app.py
|
18
|
2264
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Zhu, Yongyong <[email protected]>
import unittest
import os
import commands
import glob
import sys;
sys.path.append(os.getcwd())
sys.path.append(os.path.realpath('..'))
import comm
class TestOnlineGradleBuild(unittest.TestCase):
def test_build(self):
comm.setUp()
app_name = "Demo"
pkg_name = "com.example.demo"
comm.create(app_name, pkg_name, self)
comm.build_gradle(app_name, False, False, self)
comm.app_install(app_name, pkg_name, self)
comm.app_launch(app_name, pkg_name, self)
self.assertTrue(comm.check_app_launched(pkg_name, self))
comm.app_stop(pkg_name, self)
comm.app_uninstall(pkg_name, self)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
skycucumber/restful
|
python/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py
|
551
|
2512
|
# urllib3/filepost.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import mimetypes
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
|
gpl-2.0
|
kieferbonk/xbmc-finnish-tv
|
plugin.video.yleareena/osx/Crypto/SelfTest/Protocol/test_rfc1751.py
|
132
|
2208
|
#
# Test script for Crypto.Util.RFC1751.
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import binascii
import unittest
from Crypto.Util import RFC1751
from Crypto.Util.py3compat import *
test_data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
class RFC1751Test_k2e (unittest.TestCase):
def runTest (self):
"Check converting keys to English"
for key, words in test_data:
key=binascii.a2b_hex(b(key))
self.assertEqual(RFC1751.key_to_english(key), words)
class RFC1751Test_e2k (unittest.TestCase):
def runTest (self):
"Check converting English strings to keys"
for key, words in test_data:
key=binascii.a2b_hex(b(key))
self.assertEqual(RFC1751.english_to_key(words), key)
# class RFC1751Test
def get_tests(config={}):
return [RFC1751Test_k2e(), RFC1751Test_e2k()]
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
|
marionleborgne/nupic.research
|
projects/sequence_prediction/discrete_sequences/plotMultiplePrediction.py
|
12
|
3551
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot sequence prediction experiment with multiple possible outcomes
"""
import os
from matplotlib import pyplot
import matplotlib as mpl
import numpy
from plot import computeAccuracy
from plot import plotAccuracy
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
pyplot.ion()
pyplot.close('all')
if __name__ == '__main__':
experiments = []
for num_prediction in [2, 4]:
experiments.append(os.path.join("tm/results",
"high-order-distributed-random-multiple-predictions",
"num_predictions{:2.1f}".format(num_prediction),
"0.log"))
# for num_prediction in [2, 4]:
# experiments.append(os.path.join("lstm/results",
# "high-order-distributed-random-multiple-predictions",
# "seed0.0num_predictions{:2.1f}".format(num_prediction),
# "0.log"))
for num_prediction in [2, 4]:
experiments.append(os.path.join("lstm/results",
"high-order-basic-random-multiple-predictions",
"seed0.0num_predictions{:2.1f}".format(num_prediction),
"0.log"))
# for num_prediction in [2, 4]:
# experiments.append(os.path.join("elm/results",
# "high-order-basic-random-multiple-predictions",
# "seed0.0num_predictions{:2.1f}".format(num_prediction),
# "0.log"))
for experiment in experiments:
data = readExperiment(experiment)
(accuracy, x) = computeAccuracy(data['predictions'],
data['truths'],
data['iterations'],
resets=data['resets'],
randoms=data['randoms'])
# perturbAt = data['sequenceCounter'][10000]
plotAccuracy((accuracy, x),
data['trains'],
window=200,
type=type,
label='NoiseExperiment',
hideTraining=True,
lineSize=1.0)
# pyplot.xlim([1200, 1750])
pyplot.xlabel('# of elements seen')
pyplot.legend(['HTM: 2 predictions',
'HTM: 4 predictions',
'LSTM: 2 predictions',
'LSTM: 4 predictions'], loc=4)
# pyplot.legend(['LSTM', 'HTM'])
pyplot.savefig('./result/model_performance_multiple_prediction.pdf')
|
agpl-3.0
|
aladdinwang/django-cms
|
cms/plugins/picture/migrations/0012_auto__chg_field_picture_height__chg_field_picture_width.py
|
1
|
7553
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Picture.height'
db.alter_column(u'cmsplugin_picture', 'height', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
# Changing field 'Picture.width'
db.alter_column(u'cmsplugin_picture', 'width', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
def backwards(self, orm):
# Changing field 'Picture.height'
db.alter_column(u'cmsplugin_picture', 'height', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Picture.width'
db.alter_column(u'cmsplugin_picture', 'width', self.gf('django.db.models.fields.IntegerField')(null=True))
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'content_template_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'picture.picture': {
'Meta': {'object_name': 'Picture', 'db_table': "u'cmsplugin_picture'", '_ormbases': ['cms.CMSPlugin']},
'alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'float': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'longdesc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['picture']
|
bsd-3-clause
|
bbangert/velruse
|
velruse/providers/renren.py
|
3
|
4133
|
"""Renren Authentication Views"""
from pyramid.httpexceptions import HTTPFound
from pyramid.security import NO_PERMISSION_REQUIRED
import requests
from ..api import (
AuthenticationComplete,
AuthenticationDenied,
register_provider,
)
from ..exceptions import ThirdPartyFailure
from ..settings import ProviderSettings
from ..utils import flat_url
class RenrenAuthenticationComplete(AuthenticationComplete):
"""Renren auth complete"""
def includeme(config):
config.add_directive('add_renren_login', add_renren_login)
config.add_directive('add_renren_login_from_settings',
add_renren_login_from_settings)
def add_renren_login_from_settings(config, prefix='velruse.renren.'):
settings = config.registry.settings
p = ProviderSettings(settings, prefix)
p.update('consumer_key', required=True)
p.update('consumer_secret', required=True)
p.update('scope')
p.update('login_path')
p.update('callback_path')
config.add_renren_login(**p.kwargs)
def add_renren_login(config,
consumer_key,
consumer_secret,
scope='',
login_path='/login/renren',
callback_path='/login/renren/callback',
name='renren'):
"""
Add a Renren login provider to the application.
"""
provider = RenrenProvider(name, consumer_key, consumer_secret, scope)
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
register_provider(config, name, provider)
class RenrenProvider(object):
def __init__(self, name, consumer_key, consumer_secret, scope):
self.name = name
self.type = 'renren'
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.scope = scope
self.login_route = 'velruse.%s-login' % name
self.callback_route = 'velruse.%s-callback' % name
def login(self, request):
"""Initiate a renren login"""
scope = request.POST.get('scope', self.scope)
url = flat_url('https://graph.renren.com/oauth/authorize',
scope=scope,
client_id=self.consumer_key,
response_type='code',
redirect_uri=request.route_url(self.callback_route))
return HTTPFound(url)
def callback(self, request):
"""Process the renren redirect"""
code = request.GET.get('code')
if not code:
reason = request.GET.get('error', 'No reason provided.')
return AuthenticationDenied(reason,
provider_name=self.name,
provider_type=self.type)
access_url = flat_url(
'https://graph.renren.com/oauth/token',
client_id=self.consumer_key,
client_secret=self.consumer_secret,
grant_type='authorization_code',
redirect_uri=request.route_url(self.callback_route),
code=code)
r = requests.get(access_url)
if r.status_code != 200:
raise ThirdPartyFailure("Status %s: %s" % (
r.status_code, r.content))
data = r.json()
access_token = data['access_token']
profile = {
'accounts': [
{'domain': 'renren.com', 'userid': data['user']['id']},
],
'displayName': data['user']['name'],
'preferredUsername': data['user']['name'],
}
cred = {'oauthAccessToken': access_token}
return RenrenAuthenticationComplete(profile=profile,
credentials=cred,
provider_name=self.name,
provider_type=self.type)
|
mit
|
klunwebale/odoo
|
addons/mail/wizard/mail_compose_message.py
|
218
|
20346
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import re
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
# main mako-like expression pattern
EXPRESSION_PATTERN = re.compile('(\$\{.+?\})')
class mail_compose_message(osv.TransientModel):
""" Generic message composition wizard. You may inherit from this wizard
at model and view levels to provide specific features.
The behavior of the wizard depends on the composition_mode field:
- 'comment': post on a record. The wizard is pre-populated via ``get_record_data``
- 'mass_mail': wizard in mass mailing mode where the mail details can
contain template placeholders that will be merged with actual data
before being sent to each recipient.
"""
_name = 'mail.compose.message'
_inherit = 'mail.message'
_description = 'Email composition wizard'
_log_access = True
_batch_size = 500
def default_get(self, cr, uid, fields, context=None):
""" Handle composition mode. Some details about context keys:
- comment: default mode, model and ID of a record the user comments
- default_model or active_model
- default_res_id or active_id
- reply: active_id of a message the user replies to
- default_parent_id or message_id or active_id: ID of the
mail.message we reply to
- message.res_model or default_model
- message.res_id or default_res_id
- mass_mail: model and IDs of records the user mass-mails
- active_ids: record IDs
- default_model or active_model
"""
if context is None:
context = {}
result = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
# v6.1 compatibility mode
result['composition_mode'] = result.get('composition_mode', context.get('mail.compose.message.mode', 'comment'))
result['model'] = result.get('model', context.get('active_model'))
result['res_id'] = result.get('res_id', context.get('active_id'))
result['parent_id'] = result.get('parent_id', context.get('message_id'))
if not result['model'] or not self.pool.get(result['model']) or not hasattr(self.pool[result['model']], 'message_post'):
result['no_auto_thread'] = True
# default values according to composition mode - NOTE: reply is deprecated, fall back on comment
if result['composition_mode'] == 'reply':
result['composition_mode'] = 'comment'
vals = {}
if 'active_domain' in context: # not context.get() because we want to keep global [] domains
vals['use_active_domain'] = True
vals['active_domain'] = '%s' % context.get('active_domain')
if result['composition_mode'] == 'comment':
vals.update(self.get_record_data(cr, uid, result, context=context))
for field in vals:
if field in fields:
result[field] = vals[field]
# TDE HACK: as mailboxes used default_model='res.users' and default_res_id=uid
# (because of lack of an accessible pid), creating a message on its own
# profile may crash (res_users does not allow writing on it)
# Posting on its own profile works (res_users redirect to res_partner)
# but when creating the mail.message to create the mail.compose.message
# access rights issues may rise
# We therefore directly change the model and res_id
if result['model'] == 'res.users' and result['res_id'] == uid:
result['model'] = 'res.partner'
result['res_id'] = self.pool.get('res.users').browse(cr, uid, uid).partner_id.id
if fields is not None:
[result.pop(field, None) for field in result.keys() if field not in fields]
return result
def _get_composition_mode_selection(self, cr, uid, context=None):
return [('comment', 'Post on a document'),
('mass_mail', 'Email Mass Mailing'),
('mass_post', 'Post on Multiple Documents')]
_columns = {
'composition_mode': fields.selection(
lambda s, *a, **k: s._get_composition_mode_selection(*a, **k),
string='Composition mode'),
'partner_ids': fields.many2many('res.partner',
'mail_compose_message_res_partner_rel',
'wizard_id', 'partner_id', 'Additional Contacts'),
'use_active_domain': fields.boolean('Use active domain'),
'active_domain': fields.char('Active domain', readonly=True),
'attachment_ids': fields.many2many('ir.attachment',
'mail_compose_message_ir_attachments_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'is_log': fields.boolean('Log an Internal Note',
help='Whether the message is an internal note (comment mode only)'),
# mass mode options
'notify': fields.boolean('Notify followers',
help='Notify followers of the document (mass post only)'),
}
_defaults = {
'composition_mode': 'comment',
'body': lambda self, cr, uid, ctx={}: '',
'subject': lambda self, cr, uid, ctx={}: False,
'partner_ids': lambda self, cr, uid, ctx={}: [],
}
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.compose.message:
- create: if
- model, no res_id, I create a message in mass mail mode
- then: fall back on mail.message acces rules
"""
if isinstance(ids, (int, long)):
ids = [ids]
# Author condition (CREATE (mass_mail))
if operation == 'create' and uid != SUPERUSER_ID:
# read mail_compose_message.ids to have their values
message_values = {}
cr.execute('SELECT DISTINCT id, model, res_id FROM "%s" WHERE id = ANY (%%s) AND res_id = 0' % self._table, (ids,))
for id, rmod, rid in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid}
# remove from the set to check the ids that mail_compose_message accepts
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('model') and not message.get('res_id')]
ids = list(set(ids) - set(author_ids))
return super(mail_compose_message, self).check_access_rule(cr, uid, ids, operation, context=context)
def _notify(self, cr, uid, newid, context=None, force_send=False, user_signature=True):
""" Override specific notify method of mail.message, because we do
not want that feature in the wizard. """
return
def get_record_data(self, cr, uid, values, context=None):
""" Returns a defaults-like dict with initial values for the composition
wizard when sending an email related a previous email (parent_id) or
a document (model, res_id). This is based on previously computed default
values. """
if context is None:
context = {}
result, subject = {}, False
if values.get('parent_id'):
parent = self.pool.get('mail.message').browse(cr, uid, values.get('parent_id'), context=context)
result['record_name'] = parent.record_name,
subject = tools.ustr(parent.subject or parent.record_name or '')
if not values.get('model'):
result['model'] = parent.model
if not values.get('res_id'):
result['res_id'] = parent.res_id
partner_ids = values.get('partner_ids', list()) + [partner.id for partner in parent.partner_ids]
if context.get('is_private') and parent.author_id: # check message is private then add author also in partner list.
partner_ids += [parent.author_id.id]
result['partner_ids'] = partner_ids
elif values.get('model') and values.get('res_id'):
doc_name_get = self.pool[values.get('model')].name_get(cr, uid, [values.get('res_id')], context=context)
result['record_name'] = doc_name_get and doc_name_get[0][1] or ''
subject = tools.ustr(result['record_name'])
re_prefix = _('Re:')
if subject and not (subject.startswith('Re:') or subject.startswith(re_prefix)):
subject = "%s %s" % (re_prefix, subject)
result['subject'] = subject
return result
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def send_mail(self, cr, uid, ids, context=None):
""" Process the wizard content and proceed with sending the related
email(s), rendering any template patterns on the fly if needed. """
context = dict(context or {})
# clean the context (hint: mass mailing sets some default values that
# could be wrongly interpreted by mail_mail)
context.pop('default_email_to', None)
context.pop('default_partner_ids', None)
for wizard in self.browse(cr, uid, ids, context=context):
mass_mode = wizard.composition_mode in ('mass_mail', 'mass_post')
active_model_pool = self.pool[wizard.model if wizard.model else 'mail.thread']
if not hasattr(active_model_pool, 'message_post'):
context['thread_model'] = wizard.model
active_model_pool = self.pool['mail.thread']
# wizard works in batch mode: [res_id] or active_ids or active_domain
if mass_mode and wizard.use_active_domain and wizard.model:
res_ids = self.pool[wizard.model].search(cr, uid, eval(wizard.active_domain), context=context)
elif mass_mode and wizard.model and context.get('active_ids'):
res_ids = context['active_ids']
else:
res_ids = [wizard.res_id]
batch_size = int(self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'mail.batch_size')) or self._batch_size
sliced_res_ids = [res_ids[i:i + batch_size] for i in range(0, len(res_ids), batch_size)]
for res_ids in sliced_res_ids:
all_mail_values = self.get_mail_values(cr, uid, wizard, res_ids, context=context)
for res_id, mail_values in all_mail_values.iteritems():
if wizard.composition_mode == 'mass_mail':
self.pool['mail.mail'].create(cr, uid, mail_values, context=context)
else:
subtype = 'mail.mt_comment'
if wizard.is_log or (wizard.composition_mode == 'mass_post' and not wizard.notify): # log a note: subtype is False
subtype = False
if wizard.composition_mode == 'mass_post':
context = dict(context,
mail_notify_force_send=False, # do not send emails directly but use the queue instead
mail_create_nosubscribe=True) # add context key to avoid subscribing the author
active_model_pool.message_post(cr, uid, [res_id], type='comment', subtype=subtype, context=context, **mail_values)
return {'type': 'ir.actions.act_window_close'}
def get_mail_values(self, cr, uid, wizard, res_ids, context=None):
"""Generate the values that will be used by send_mail to create mail_messages
or mail_mails. """
results = dict.fromkeys(res_ids, False)
rendered_values, default_recipients = {}, {}
mass_mail_mode = wizard.composition_mode == 'mass_mail'
# render all template-based value at once
if mass_mail_mode and wizard.model:
rendered_values = self.render_message_batch(cr, uid, wizard, res_ids, context=context)
# compute alias-based reply-to in batch
reply_to_value = dict.fromkeys(res_ids, None)
if mass_mail_mode and not wizard.no_auto_thread:
reply_to_value = self.pool['mail.thread'].message_get_reply_to(cr, uid, res_ids, default=wizard.email_from, context=dict(context, thread_model=wizard.model))
for res_id in res_ids:
# static wizard (mail.message) values
mail_values = {
'subject': wizard.subject,
'body': wizard.body or '',
'parent_id': wizard.parent_id and wizard.parent_id.id,
'partner_ids': [partner.id for partner in wizard.partner_ids],
'attachment_ids': [attach.id for attach in wizard.attachment_ids],
'author_id': wizard.author_id.id,
'email_from': wizard.email_from,
'record_name': wizard.record_name,
'no_auto_thread': wizard.no_auto_thread,
}
# mass mailing: rendering override wizard static values
if mass_mail_mode and wizard.model:
# always keep a copy, reset record name (avoid browsing records)
mail_values.update(notification=True, model=wizard.model, res_id=res_id, record_name=False)
# auto deletion of mail_mail
if 'mail_auto_delete' in context:
mail_values['auto_delete'] = context.get('mail_auto_delete')
# rendered values using template
email_dict = rendered_values[res_id]
mail_values['partner_ids'] += email_dict.pop('partner_ids', [])
mail_values.update(email_dict)
if not wizard.no_auto_thread:
mail_values.pop('reply_to')
if reply_to_value.get(res_id):
mail_values['reply_to'] = reply_to_value[res_id]
if wizard.no_auto_thread and not mail_values.get('reply_to'):
mail_values['reply_to'] = mail_values['email_from']
# mail_mail values: body -> body_html, partner_ids -> recipient_ids
mail_values['body_html'] = mail_values.get('body', '')
mail_values['recipient_ids'] = [(4, id) for id in mail_values.pop('partner_ids', [])]
# process attachments: should not be encoded before being processed by message_post / mail_mail create
mail_values['attachments'] = [(name, base64.b64decode(enc_cont)) for name, enc_cont in email_dict.pop('attachments', list())]
attachment_ids = []
for attach_id in mail_values.pop('attachment_ids'):
new_attach_id = self.pool.get('ir.attachment').copy(cr, uid, attach_id, {'res_model': self._name, 'res_id': wizard.id}, context=context)
attachment_ids.append(new_attach_id)
mail_values['attachment_ids'] = self.pool['mail.thread']._message_preprocess_attachments(
cr, uid, mail_values.pop('attachments', []),
attachment_ids, 'mail.message', 0, context=context)
results[res_id] = mail_values
return results
#------------------------------------------------------
# Template rendering
#------------------------------------------------------
def render_message_batch(self, cr, uid, wizard, res_ids, context=None):
"""Generate template-based values of wizard, for the document records given
by res_ids. This method is meant to be inherited by email_template that
will produce a more complete dictionary, using Jinja2 templates.
Each template is generated for all res_ids, allowing to parse the template
once, and render it multiple times. This is useful for mass mailing where
template rendering represent a significant part of the process.
Default recipients are also computed, based on mail_thread method
message_get_default_recipients. This allows to ensure a mass mailing has
always some recipients specified.
:param browse wizard: current mail.compose.message browse record
:param list res_ids: list of record ids
:return dict results: for each res_id, the generated template values for
subject, body, email_from and reply_to
"""
subjects = self.render_template_batch(cr, uid, wizard.subject, wizard.model, res_ids, context=context)
bodies = self.render_template_batch(cr, uid, wizard.body, wizard.model, res_ids, context=context, post_process=True)
emails_from = self.render_template_batch(cr, uid, wizard.email_from, wizard.model, res_ids, context=context)
replies_to = self.render_template_batch(cr, uid, wizard.reply_to, wizard.model, res_ids, context=context)
ctx = dict(context, thread_model=wizard.model)
default_recipients = self.pool['mail.thread'].message_get_default_recipients(cr, uid, res_ids, context=ctx)
results = dict.fromkeys(res_ids, False)
for res_id in res_ids:
results[res_id] = {
'subject': subjects[res_id],
'body': bodies[res_id],
'email_from': emails_from[res_id],
'reply_to': replies_to[res_id],
}
results[res_id].update(default_recipients.get(res_id, dict()))
return results
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
""" Render the given template text, replace mako-like expressions ``${expr}``
with the result of evaluating these expressions with an evaluation context
containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to
:param list res_ids: list of record ids
"""
if context is None:
context = {}
results = dict.fromkeys(res_ids, False)
for res_id in res_ids:
def merge(match):
exp = str(match.group()[2:-1]).strip()
result = eval(exp, {
'user': self.pool.get('res.users').browse(cr, uid, uid, context=context),
'object': self.pool[model].browse(cr, uid, res_id, context=context),
'context': dict(context), # copy context to prevent side-effects of eval
})
return result and tools.ustr(result) or ''
results[res_id] = template and EXPRESSION_PATTERN.sub(merge, template)
return results
# Compatibility methods
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
def render_message(self, cr, uid, wizard, res_id, context=None):
return self.render_message_batch(cr, uid, wizard, [res_id], context)[res_id]
|
agpl-3.0
|
jiasir/playback
|
playback/cli/swift_storage.py
|
2
|
21875
|
import sys
import logging
import os
from playback.cli.cliutil import priority
from playback.api import SwiftStorage
from cliff.command import Command
def make_target(args):
try:
target = SwiftStorage(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename,
password=args.password)
except AttributeError:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
return target
def prepare_disks(args):
target = make_target(args)
target.prepare_disks(args.name)
def install(args):
target = make_target(args)
target.install(args.address, args.bind_ip)
def create_account_builder_file(args):
target = make_target(args)
target.create_account_builder_file(args.partitions, args.replicas, args.moving)
def account_builder_add(args):
target = make_target(args)
target.account_builder_add(args.region, args.zone,
args.ip, args.device, args.weight)
def create_container_builder_file(args):
target = make_target(args)
target.create_container_builder_file(args.partitions,
args.replicas,
args.moving)
def container_builder_add(args):
target = make_target(args)
target.container_builder_add(args.region,
args.zone, args.ip,
args.device, args.weight)
def create_object_builder_file(args):
target = make_target(args)
target.create_object_builder_file(args.partitions,
args.replicas, args.moving)
def object_builder_add(args):
target = make_target(args)
target.object_builder_add(args.region, args.zone,
args.ip, args.device, args.weight)
def sync_builder_file(args):
target = make_target(args)
target.get_builder_file()
target.sync_builder_file(hosts=args.to.split(','))
os.remove('account.ring.gz')
os.remove('container.ring.gz')
os.remove('object.ring.gz')
def account_builder_rebalance(args):
target = make_target(args)
target.account_builder_rebalance()
def container_builder_rebalance(args):
target = make_target(args)
target.container_builder_rebalance()
def object_builder_rebalance(args):
target = make_target(args)
target.object_builder_rebalance()
class PrepareDisks(Command):
"""prepare the disks for storage"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(PrepareDisks, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--name',
help='the device name, e.g. sdb,sdc',
action='store', default=None, dest='name')
return parser
def take_action(self, parsed_args):
prepare_disks(parsed_args)
class Install(Command):
"""install swift storage"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Install, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--address',
help='the management interface ip for rsync',
action='store',
dest='address')
parser.add_argument('--bind-ip',
help='the management interface ip for swift storage binding',
action='store',
dest='bind_ip')
return parser
def take_action(self, parsed_args):
install(parsed_args)
class CreateAccountBuilderFile(Command):
"""create account ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(CreateAccountBuilderFile, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--partitions',
help='2^10 (1024) maximum partitions e.g. 10',
action='store', default=None, dest='partitions')
parser.add_argument('--replicas',
help='3 replicas of each object e.g. 3',
action='store', default=None, dest='replicas')
parser.add_argument('--moving',
help='1 hour minimum time between moving a partition more than once e.g. 1',
action='store', default=None, dest='moving')
return parser
def take_action(self, parsed_args):
create_account_builder_file(parsed_args)
class AccountBuilderAdd(Command):
"""add each storage node to the account ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(AccountBuilderAdd, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--region',
help='swift storage region e.g. 1',
action='store', default=None, dest='region')
parser.add_argument('--zone',
help='swift storage zone e.g. 1',
action='store', default=None, dest='zone')
parser.add_argument('--ip',
help='the IP address of the management network on the storage node e.g. STORAGE_NODE_IP',
action='store', default=None, dest='ip')
parser.add_argument('--device',
help='a storage device name on the same storage node e.g. sdb',
action='store', default=None, dest='device')
parser.add_argument('--weight',
help='the storage device weight e.g. 100',
action='store', default=None, dest='weight')
return parser
def take_action(self, parsed_args):
account_builder_add(parsed_args)
class CreateContainerBuilderFile(Command):
"""create container ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(CreateContainerBuilderFile, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--partitions',
help='2^10 (1024) maximum partitions e.g. 10',
action='store', default=None, dest='partitions')
parser.add_argument('--replicas',
help='3 replicas of each object e.g. 3',
action='store', default=None, dest='replicas')
parser.add_argument('--moving',
help='1 hour minimum time between moving a partition more than once e.g. 1',
action='store', default=None, dest='moving')
return parser
def take_action(self, parsed_args):
create_container_builder_file(parsed_args)
class ContainerBuilderAdd(Command):
"""add each storage node to the container ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(ContainerBuilderAdd, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--region',
help='swift storage region e.g. 1',
action='store', default=None, dest='region')
parser.add_argument('--zone',
help='swift storage zone e.g. 1',
action='store', default=None, dest='zone')
parser.add_argument('--ip',
help='the IP address of the management network on the storage node e.g. STORAGE_NODE_IP',
action='store', default=None, dest='ip')
parser.add_argument('--device',
help='a storage device name on the same storage node e.g. sdb',
action='store', default=None, dest='device')
parser.add_argument('--weight',
help='the storage device weight e.g. 100',
action='store', default=None, dest='weight')
return parser
def take_action(self, parsed_args):
container_builder_add(parsed_args)
class CreateObjectBuilderFile(Command):
"""create object ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(CreateObjectBuilderFile, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--partitions',
help='2^10 (1024) maximum partitions e.g. 10',
action='store', default=None, dest='partitions')
parser.add_argument('--replicas',
help='3 replicas of each object e.g. 3',
action='store', default=None, dest='replicas')
parser.add_argument('--moving',
help='1 hour minimum time between moving a partition more than once e.g. 1',
action='store', default=None, dest='moving')
return parser
def take_action(self, parsed_args):
create_object_builder_file(parsed_args)
class ObjectBuilderAdd(Command):
"""add each storage node to the object ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(ObjectBuilderAdd, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--region',
help='swift storage region e.g. 1',
action='store', default=None, dest='region')
parser.add_argument('--zone',
help='swift storage zone e.g. 1',
action='store', default=None, dest='zone')
parser.add_argument('--ip',
help='the IP address of the management network on the storage node e.g. STORAGE_NODE_IP',
action='store', default=None, dest='ip')
parser.add_argument('--device',
help='a storage device name on the same storage node e.g. sdb',
action='store', default=None, dest='device')
parser.add_argument('--weight',
help='the storage device weight e.g. 100',
action='store', default=None, dest='weight')
return parser
def take_action(self, parsed_args):
object_builder_add(parsed_args)
class SyncBuilderFile(Command):
"""copy the account.ring.gz, container.ring.gz, and object.ring.gz files to the /etc/swift directory on each storage node and any additional nodes running the proxy service"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(SyncBuilderFile, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
parser.add_argument('--to',
help='the target hosts where the *.ring.gz file to be added',
action='store', default=None, dest='to')
return parser
def take_action(self, parsed_args):
sync_builder_file(parsed_args)
class AccountBuilderRebalance(Command):
"""rebalance the account ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(AccountBuilderRebalance, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
return parser
def take_action(self, parsed_args):
account_builder_rebalance(parsed_args)
class ContainerBuilderRebalance(Command):
"""rebalance the container ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(ContainerBuilderRebalance, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
return parser
def take_action(self, parsed_args):
container_builder_rebalance(parsed_args)
class ObjectBuilderRebalance(Command):
"""rebalance the object ring"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(ObjectBuilderRebalance, self).get_parser(prog_name)
parser.add_argument('--user',
help='the username to connect to the remote host',
action='store', default='ubuntu', dest='user')
parser.add_argument('--hosts',
help='the remote host to connect to ',
action='store', default=None, dest='hosts')
parser.add_argument('-i', '--key-filename',
help='referencing file paths to SSH key files to try when connecting',
action='store', dest='key_filename', default=None)
parser.add_argument('--password',
help='the password used by the SSH layer when connecting to remote hosts',
action='store', dest='password', default=None)
return parser
def take_action(self, parsed_args):
object_builder_rebalance(parsed_args)
|
mit
|
minorua/QGIS
|
python/testing/mocked.py
|
45
|
1873
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
mocked
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthias Kuhn'
import os
import sys
import mock
from qgis.gui import QgisInterface, QgsMapCanvas
from qgis.core import QgsApplication
from qgis.PyQt.QtWidgets import QMainWindow
from qgis.PyQt.QtCore import QSize
from qgis.testing import start_app
def get_iface():
"""
Will return a mock QgisInterface object with some methods implemented in a generic way.
You can further control its behavior
by using the mock infrastructure. Refer to https://docs.python.org/3/library/unittest.mock.html
for more details.
Returns
-------
QgisInterface
A mock QgisInterface
"""
start_app()
my_iface = mock.Mock(spec=QgisInterface)
my_iface.mainWindow.return_value = QMainWindow()
canvas = QgsMapCanvas(my_iface.mainWindow())
canvas.resize(QSize(400, 400))
my_iface.mapCanvas.return_value = canvas
return my_iface
|
gpl-2.0
|
skuda/client-python
|
kubernetes/test/test_autoscaling_v2alpha1_api.py
|
1
|
2946
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.autoscaling_v2alpha1_api import AutoscalingV2alpha1Api
class TestAutoscalingV2alpha1Api(unittest.TestCase):
""" AutoscalingV2alpha1Api unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.autoscaling_v2alpha1_api.AutoscalingV2alpha1Api()
def tearDown(self):
pass
def test_create_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for create_namespaced_horizontal_pod_autoscaler
"""
pass
def test_delete_collection_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for delete_collection_namespaced_horizontal_pod_autoscaler
"""
pass
def test_delete_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for delete_namespaced_horizontal_pod_autoscaler
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_horizontal_pod_autoscaler_for_all_namespaces(self):
"""
Test case for list_horizontal_pod_autoscaler_for_all_namespaces
"""
pass
def test_list_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for list_namespaced_horizontal_pod_autoscaler
"""
pass
def test_patch_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for patch_namespaced_horizontal_pod_autoscaler
"""
pass
def test_patch_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for patch_namespaced_horizontal_pod_autoscaler_status
"""
pass
def test_read_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for read_namespaced_horizontal_pod_autoscaler
"""
pass
def test_read_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for read_namespaced_horizontal_pod_autoscaler_status
"""
pass
def test_replace_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for replace_namespaced_horizontal_pod_autoscaler
"""
pass
def test_replace_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for replace_namespaced_horizontal_pod_autoscaler_status
"""
pass
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
BonexGu/Blik2D-SDK
|
Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/python/kernel_tests/one_hot_op_test.py
|
139
|
12639
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.one_hot_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class OneHotTest(test.TestCase):
def _testOneHot(self,
truth,
use_gpu=False,
expected_err_re=None,
raises=None,
**inputs):
with self.test_session(use_gpu=use_gpu):
if raises is not None:
with self.assertRaises(raises):
array_ops.one_hot(**inputs)
else:
ans = array_ops.one_hot(**inputs)
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllEqual(tf_ans, truth)
self.assertEqual(tf_ans.shape, ans.get_shape())
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def _testBothOneHot(self, truth, expected_err_re=None, raises=None, **inputs):
self._testOneHot(truth, True, expected_err_re, raises, **inputs)
self._testOneHot(truth, False, expected_err_re, raises, **inputs)
def _testBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=0,
dtype=dtype,
truth=truth.T) # Output is transpose version in this case
def _testDefaultBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices, depth=depth, axis=0,
truth=truth.T) # Output is transpose version in this case
def testFloatBasic(self):
self._testBasic(np.float32)
self._testDefaultBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
self._testDefaultBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
self._testDefaultBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
self._testDefaultBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
self._testDefaultBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
self._testDefaultBasic(np.complex128)
def _testBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testDefaultValuesBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
truth = np.asarray(
[[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(indices=indices, depth=depth, dtype=dtype, truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testValueTypeBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testEmpty(self, dtype):
indices = np.zeros((0, 16), dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.empty((0, 16, 3), dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
def testHalfBatch(self):
self._testEmpty(np.float16)
self._testBatch(np.float16)
self._testDefaultValuesBatch(np.float16)
self._testValueTypeBatch(np.float16)
def testFloatBatch(self):
self._testEmpty(np.float32)
self._testBatch(np.float32)
self._testDefaultValuesBatch(np.float32)
self._testValueTypeBatch(np.float32)
def testDoubleBatch(self):
self._testEmpty(np.float64)
self._testBatch(np.float64)
self._testDefaultValuesBatch(np.float64)
self._testValueTypeBatch(np.float64)
def testInt32Batch(self):
self._testEmpty(np.int32)
self._testBatch(np.int32)
self._testDefaultValuesBatch(np.int32)
self._testValueTypeBatch(np.int32)
def testInt64Batch(self):
self._testEmpty(np.int64)
self._testBatch(np.int64)
self._testDefaultValuesBatch(np.int64)
self._testValueTypeBatch(np.int64)
def testComplexBatch(self):
self._testEmpty(np.complex64)
self._testBatch(np.complex64)
# self._testDefaultValuesBatch(np.complex64)
self._testValueTypeBatch(np.complex64)
def testSimpleCases(self):
indices = [0, 1, 2]
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
self._testBothOneHot(
indices=indices, depth=depth, dtype=np.int32, truth=truth)
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, -1, -1], [-1, 1, -1], [-1, -1, 1]], dtype=np.int32)
self._testBothOneHot(
indices=indices, depth=depth, on_value=1, off_value=-1, truth=truth)
def testSingleValueGiven(self):
# Only on_value provided
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1, truth=truth)
# Only off_value provided
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
self._testBothOneHot(
indices=indices, depth=depth, off_value=0.0, truth=truth)
def testString(self):
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[b"1.0", b"0.0", b"0.0"], [b"0.0", b"1.0", b"0.0"],
[b"0.0", b"0.0", b"1.0"]])
on_value = np.asarray(b"1.0")
off_value = np.asarray(b"0.0")
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtypes.string,
truth=truth)
on_value = constant_op.constant(b"1.0")
off_value = constant_op.constant(b"0.0")
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtypes.string,
truth=truth)
on_value = b"1.0"
off_value = b"0.0"
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtypes.string,
truth=truth)
def testIndicesTypes(self):
tf_types = [dtypes.uint8, dtypes.int32, dtypes.int64]
np_types = [np.int32, np.int64]
for itype in tf_types + np_types:
# Note: to keep the tests simple in the case of uint8 the index -1 below
# maps to 255 which is out of the depth range, just like -1.
if itype in tf_types:
indices = constant_op.constant(
[[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
elif itype in np_types:
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
depth = 3
on_value = np.asarray(1.0, dtype=np.float32)
off_value = np.asarray(-1.0, dtype=np.float32)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=np.float32)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def testPrefixDimOverflow(self):
for itype in [dtypes.int32, dtypes.int64, dtypes.uint8]:
prefix_dim_size = 65536
depth = 2
x = [i % depth for i in range(prefix_dim_size)]
indices = constant_op.constant(x, dtype=itype)
truth = np.zeros((prefix_dim_size, depth), np.float32)
for i in range(prefix_dim_size):
truth[i, x[i]] = 1.0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=1.0,
off_value=0.0,
truth=truth)
def testOnOffMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float64)
off_value = np.asarray(0.0, np.float32)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=None,
raises=TypeError)
def testDtypeMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float32)
off_value = np.asarray(0.0, np.float32)
dtype = np.int32
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
dtype=dtype,
truth=None,
raises=TypeError)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=off_value,
dtype=dtype,
truth=None,
raises=TypeError)
if __name__ == "__main__":
test.main()
|
mit
|
dhoffman34/django
|
django/core/files/uploadedfile.py
|
91
|
4334
|
"""
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files.base import File
from django.core.files import temp as tempfile
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
|
bsd-3-clause
|
Glottotopia/aagd
|
moin/local/moin/MoinMoin/userprefs/suid.py
|
2
|
3485
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - switch user form
@copyright: 2001-2004 Juergen Hermann <[email protected]>,
2003-2007 MoinMoin:ThomasWaldmann
2007 MoinMoin:JohannesBerg
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import user, util, wikiutil
from MoinMoin.widget import html
from MoinMoin.userprefs import UserPrefBase
class Settings(UserPrefBase):
def __init__(self, request):
""" Initialize setuid settings form. """
UserPrefBase.__init__(self, request)
self.request = request
self._ = request.getText
self.cfg = request.cfg
_ = self._
self.title = _("Switch user")
self.name = 'suid'
def allowed(self):
return (self.request.user.auth_method in self.request.cfg.auth_can_logout and
UserPrefBase.allowed(self) and self.request.user.isSuperUser())
def handle_form(self):
_ = self._
request = self.request
form = request.form
if form.has_key('cancel'):
return
if request.method != 'POST':
return
if not wikiutil.checkTicket(request, form['ticket']):
return
uid = form.get('selected_user', '')
if not uid:
return 'error', _("No user selected")
theuser = user.User(request, uid, auth_method='setuid')
if not theuser or not theuser.exists():
return 'error', _("No user selected")
# set valid to True so superusers can even switch
# to disable accounts
theuser.valid = True
request._setuid_real_user = request.user
# now continue as the other user
request.user = theuser
return _("You can now change the settings of the selected user account; log out to get back to your account.")
def _user_select(self):
options = []
users = user.getUserList(self.request)
current_uid = self.request.user.id
for uid in users:
if uid != current_uid:
name = user.User(self.request, id=uid).name
options.append((uid, name))
options.sort(lambda x, y: cmp(x[1].lower(), y[1].lower()))
if not options:
_ = self._
self._only = True
return _("You are the only user.")
self._only = False
size = min(10, len(options))
return util.web.makeSelection('selected_user', options, current_uid, size=size)
def create_form(self):
""" Create the complete HTML form code. """
_ = self._
form = self.make_form(html.Text(_('As a superuser, you can temporarily '
'assume the identity of another user.')))
ticket = wikiutil.createTicket(self.request)
self.make_row(_('Select User'), [self._user_select()], valign="top")
form.append(html.INPUT(type="hidden", name="ticket", value="%s" % ticket))
if not self._only:
buttons = [html.INPUT(type="submit", name="select_user",
value=_('Select User')),
' ', ]
else:
buttons = []
buttons.append(html.INPUT(type="submit", name="cancel",
value=_('Cancel')))
self.make_row('', buttons)
return unicode(form)
|
mit
|
MediaSapiens/wavesf
|
django/contrib/localflavor/se/forms.py
|
311
|
5623
|
# -*- coding: utf-8 -*-
"""
Swedish specific Form helpers
"""
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.validators import EMPTY_VALUES
from django.contrib.localflavor.se.utils import (id_number_checksum,
validate_id_birthday, format_personal_id_number, valid_organisation,
format_organisation_number)
__all__ = ('SECountySelect', 'SEOrganisationNumberField',
'SEPersonalIdentityNumberField', 'SEPostalCodeField')
SWEDISH_ID_NUMBER = re.compile(r'^(?P<century>\d{2})?(?P<year>\d{2})(?P<month>\d{2})(?P<day>\d{2})(?P<sign>[\-+])?(?P<serial>\d{3})(?P<checksum>\d)$')
SE_POSTAL_CODE = re.compile(r'^[1-9]\d{2} ?\d{2}$')
class SECountySelect(forms.Select):
"""
A Select form widget that uses a list of the Swedish counties (län) as its
choices.
The cleaned value is the official county code -- see
http://en.wikipedia.org/wiki/Counties_of_Sweden for a list.
"""
def __init__(self, attrs=None):
from se_counties import COUNTY_CHOICES
super(SECountySelect, self).__init__(attrs=attrs,
choices=COUNTY_CHOICES)
class SEOrganisationNumberField(forms.CharField):
"""
A form field that validates input as a Swedish organisation number
(organisationsnummer).
It accepts the same input as SEPersonalIdentityField (for sole
proprietorships (enskild firma). However, co-ordination numbers are not
accepted.
It also accepts ordinary Swedish organisation numbers with the format
NNNNNNNNNN.
The return value will be YYYYMMDDXXXX for sole proprietors, and NNNNNNNNNN
for other organisations.
"""
default_error_messages = {
'invalid': _('Enter a valid Swedish organisation number.'),
}
def clean(self, value):
value = super(SEOrganisationNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = SWEDISH_ID_NUMBER.match(value)
if not match:
raise forms.ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
# Compare the calculated value with the checksum
if id_number_checksum(gd) != int(gd['checksum']):
raise forms.ValidationError(self.error_messages['invalid'])
# First: check if this is a real organisation_number
if valid_organisation(gd):
return format_organisation_number(gd)
# Is this a single properitor (enskild firma)?
try:
birth_day = validate_id_birthday(gd, False)
return format_personal_id_number(birth_day, gd)
except ValueError:
raise forms.ValidationError(self.error_messages['invalid'])
class SEPersonalIdentityNumberField(forms.CharField):
"""
A form field that validates input as a Swedish personal identity number
(personnummer).
The correct formats are YYYYMMDD-XXXX, YYYYMMDDXXXX, YYMMDD-XXXX,
YYMMDDXXXX and YYMMDD+XXXX.
A + indicates that the person is older than 100 years, which will be taken
into consideration when the date is validated.
The checksum will be calculated and checked. The birth date is checked to
be a valid date.
By default, co-ordination numbers (samordningsnummer) will be accepted. To
only allow real personal identity numbers, pass the keyword argument
coordination_number=False to the constructor.
The cleaned value will always have the format YYYYMMDDXXXX.
"""
def __init__(self, coordination_number=True, *args, **kwargs):
self.coordination_number = coordination_number
super(SEPersonalIdentityNumberField, self).__init__(*args, **kwargs)
default_error_messages = {
'invalid': _('Enter a valid Swedish personal identity number.'),
'coordination_number': _('Co-ordination numbers are not allowed.'),
}
def clean(self, value):
value = super(SEPersonalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = SWEDISH_ID_NUMBER.match(value)
if match is None:
raise forms.ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
# compare the calculated value with the checksum
if id_number_checksum(gd) != int(gd['checksum']):
raise forms.ValidationError(self.error_messages['invalid'])
# check for valid birthday
try:
birth_day = validate_id_birthday(gd)
except ValueError:
raise forms.ValidationError(self.error_messages['invalid'])
# make sure that co-ordination numbers do not pass if not allowed
if not self.coordination_number and int(gd['day']) > 60:
raise forms.ValidationError(self.error_messages['coordination_number'])
return format_personal_id_number(birth_day, gd)
class SEPostalCodeField(forms.RegexField):
"""
A form field that validates input as a Swedish postal code (postnummer).
Valid codes consist of five digits (XXXXX). The number can optionally be
formatted with a space after the third digit (XXX XX).
The cleaned value will never contain the space.
"""
default_error_messages = {
'invalid': _('Enter a Swedish postal code in the format XXXXX.'),
}
def __init__(self, *args, **kwargs):
super(SEPostalCodeField, self).__init__(SE_POSTAL_CODE, *args, **kwargs)
def clean(self, value):
return super(SEPostalCodeField, self).clean(value).replace(' ', '')
|
bsd-3-clause
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/s2/create_s2_account.py
|
1
|
3527
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import json
from qingcloud.cli.iaas_client.actions.base import BaseAction
class CreateS2AccountAction(BaseAction):
action = 'CreateS2Account'
command = 'create-s2-account'
usage = '%(prog)s -T <account_type> [-n <account_name> ...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-T", "--account-type", dest="account_type",
action="store", type=str, default=None,
help="valid values is NFS or SMB.")
parser.add_argument("-n", "--account-name", dest="account_name",
action="store", type=str, default=None,
help="the name of account.")
parser.add_argument("-s", "--smb-name", dest="smb_name",
action="store", type=str, default=None,
help="the user name of smb.")
parser.add_argument("-S", "--smb-passwd", dest="smb_passwd",
action="store", type=str, default=None,
help="the password of smb.")
parser.add_argument("-N", "--nfs-ipaddr", dest="nfs_ipaddr",
action="store", type=str, default=None,
help="ip address available in NFS.")
parser.add_argument("-g", "--s2-groups", dest="s2_groups",
action="store", type=str, default=None,
help="the JSON form of groups. e.g. '[{\"group_id\":\"s2g-xxxx\", \"rw_flag\": \"rw\"}]'")
parser.add_argument("-o", "--opt-parameters", dest="opt_parameters",
action="store", type=str, default=None,
help="options parameters for NFS.")
parser.add_argument("-d", "--description", dest="description",
action="store", type=str, default=None,
help="the detailed description of the resource.")
@classmethod
def build_directive(cls, options):
for key in ['account_type']:
if not hasattr(options, key):
print("error: [%s] should be specified." % key)
return None
directive = {
"account_type": options.account_type,
"account_name": options.account_name,
"smb_name": options.smb_name,
"smb_passwd": options.smb_passwd,
"nfs_ipaddr": options.nfs_ipaddr,
"s2_groups": json.loads(options.s2_groups),
"opt_parameters": options.opt_parameters,
"description": options.description,
}
return directive
|
apache-2.0
|
snnn/tensorflow
|
tensorflow/contrib/boosted_trees/python/kernel_tests/prediction_ops_test.py
|
25
|
59018
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow prediction Ops.
The tests cover tree traversal and additive models for single and
multi class problems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def _append_to_leaf(leaf, c_id, w):
"""Helper method for building tree leaves.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_id: class Id for the weight update.
w: weight contribution value.
"""
leaf.sparse_vector.index.append(c_id)
leaf.sparse_vector.value.append(w)
def _append_multi_values_to_leaf(leaf, c_ids, w):
"""Helper method for building tree leaves with sparse vector of values.
Appends weight contributions for the given class index to a leaf node.
Args:
leaf: leaf node to append to.
c_ids: list of class ids
w: corresponding weight contributions for the classes in c_ids
"""
for i in range(len(c_ids)):
leaf.sparse_vector.index.append(c_ids[i])
leaf.sparse_vector.value.append(w[i])
def _append_multi_values_to_dense_leaf(leaf, w):
"""Helper method for building tree leaves with dense vector of values.
Appends weight contributions to a leaf. w is assumed to be for all classes.
Args:
leaf: leaf node to append to.
w: corresponding weight contributions for all classes.
"""
for x in w:
leaf.vector.value.append(x)
def _set_float_split(split, feat_col, thresh, l_id, r_id, feature_dim_id=None):
"""Helper method for building tree float splits.
Sets split feature column, threshold and children.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
l_id: left child Id.
r_id: right child Id.
feature_dim_id: dimension of the feature column to be used in the split.
"""
split.feature_column = feat_col
split.threshold = thresh
split.left_id = l_id
split.right_id = r_id
if feature_dim_id is not None:
split.dimension_id = feature_dim_id
def _set_float_oblivious_split(split, feat_col, thresh):
"""Helper method for building tree float splits.
Sets split feature column and threshold.
Args:
split: split node to update.
feat_col: feature column for the split.
thresh: threshold to split on forming rule x <= thresh.
"""
split.feature_column = feat_col
split.threshold = thresh
def _set_categorical_id_split(split, feat_col, feat_id, l_id, r_id):
"""Helper method for building tree categorical id splits.
Sets split feature column, feature id and children.
Args:
split: categorical id split node.
feat_col: feature column for the split.
feat_id: feature id forming rule x == id.
l_id: left child Id.
r_id: right child Id.
"""
split.feature_column = feat_col
split.feature_id = feat_id
split.left_id = l_id
split.right_id = r_id
class PredictionOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the prediction tests.
Creates, a batch of two examples having three dense float, two sparse float
single valued, one sparse float multidimensional and one sparse int
features. The data looks like the following:
|Instance |Dense0 |Dense1 |Dense2 |SparseF0 |SparseF1 |SparseI0 |SparseM
| 0 | 7 | 1 | 2 | -3 | | 9,1 | __, 5.0
| 1 | -2 | 2 | 0.5 | | 4 | | 3, ___
"""
super(PredictionOpsTest, self).setUp()
self._dense_float_tensor1 = np.array([[7.0], [-2.0]])
self._dense_float_tensor2 = np.array([[1.0], [2.0]])
self._dense_float_tensor3 = np.array([[2.0], [0.5]])
self._sparse_float_indices1 = np.array([[0, 0]])
self._sparse_float_values1 = np.array([-3.0])
self._sparse_float_shape1 = np.array([2, 1])
self._sparse_float_indices2 = np.array([[1, 0]])
self._sparse_float_values2 = np.array([4.0])
self._sparse_float_shape2 = np.array([2, 1])
# Multi dimensional sparse float
self._sparse_float_indices_m = np.array([[0, 1], [1, 0]])
self._sparse_float_values_m = np.array([5.0, 3.0])
self._sparse_float_shape_m = np.array([2, 2])
self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])
self._sparse_int_values1 = np.array([9, 1])
self._sparse_int_shape1 = np.array([2, 2])
self._seed = 123
def _get_predictions(self,
tree_ensemble_handle,
learner_config,
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=False):
return prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1],
learner_config=learner_config,
apply_dropout=apply_dropout,
apply_averaging=apply_averaging,
center_bias=center_bias,
reduce_dim=reduce_dim)
def _get_predictions_oblivious_case(self,
tree_ensemble_handle,
learner_config,
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=False):
return prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [
self._dense_float_tensor1, self._dense_float_tensor2,
self._dense_float_tensor3
], [], [], [], [], [], [],
learner_config=learner_config,
apply_dropout=apply_dropout,
apply_averaging=apply_averaging,
center_bias=center_bias,
reduce_dim=reduce_dim)
def testEmptyEnsemble(self):
with self.cached_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="empty")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
self.assertAllEqual([[0], [0]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testBiasEnsembleSingleClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="bias")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
self.assertAllClose([[-0.4], [-0.4]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testBiasEnsembleMultiClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
leaf = tree.nodes.add().leaf
_append_to_leaf(leaf, 0, -0.4)
_append_to_leaf(leaf, 1, 0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="multiclass")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
self.assertAllClose([[-0.4, 0.9], [-0.4, 0.9]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testFullEnsembleSingleClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8.
self.assertAllClose([[-1.3], [0.8]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testObliviousEnsemble(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
_set_float_oblivious_split(
tree2.nodes.add().oblivious_dense_float_binary_split, 0, 5.0)
_set_float_oblivious_split(
tree2.nodes.add().oblivious_dense_float_binary_split, 1, 3.0)
_set_float_oblivious_split(
tree2.nodes.add().oblivious_dense_float_binary_split, 2, 1.0)
for i in range(1, 9):
_append_to_leaf(tree2.nodes.add().leaf, 0, i / 10.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions_oblivious_case(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and 0.6 from
# the 5th leaf of the second tree corresponding to node_id = 8, hence a
# prediction of 0.2.
# The second example will get bias -0.4 and 0.1 from the 0th leaf of the
# second tree corresponding to node_id = 3, hence a prediction of -0.3
self.assertAllClose([[0.2], [-0.3]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testFullEnsembleWithMultidimensionalSparseSingleClass(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
# Use feature column 2 (sparse multidimensional), split on first value
# node 0.
_set_float_split(
tree2.nodes.add().sparse_float_binary_split_default_right.split,
2,
7.0,
1,
2,
feature_dim_id=0)
# Leafs split on second dimension of sparse multidimensional feature.
# Node 1.
_set_float_split(
tree2.nodes.add().sparse_float_binary_split_default_left.split,
2,
4.5,
3,
4,
feature_dim_id=1)
# Node 2.
_set_float_split(
tree2.nodes.add().sparse_float_binary_split_default_right.split,
2,
9,
5,
6,
feature_dim_id=1)
# Node 3.
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.6)
# Node 4.
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.3)
# Node 5.
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.1)
# Node 6.
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.8)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
self._seed, [self._dense_float_tensor1], [
self._sparse_float_indices1, self._sparse_float_indices2,
self._sparse_float_indices_m
], [
self._sparse_float_values1, self._sparse_float_values2,
self._sparse_float_values_m
], [
self._sparse_float_shape1, self._sparse_float_shape2,
self._sparse_float_shape_m
], [self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 5 payload of -0.1 hence -0.5, the second example will
# get the same bias -0.4 and leaf 3 payload (0.6) hence 0.2
self.assertAllClose([[-0.5], [0.2]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testExcludeNonFinalTree(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = False
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# All the examples should get only the bias since the second tree is
# non-finalized
self.assertAllClose([[-0.4], [-0.4]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testIncludeNonFinalTree(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = False
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8. Note that the non-finalized tree is included.
self.assertAllClose([[-1.3], [0.8]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testMetadataMissing(self):
# Sometimes we want to do prediction on trees that are not added to ensemble
# (for example in
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree.
tree1 = tree_ensemble_config.trees.add()
_append_to_leaf(tree1.nodes.add().leaf, 0, -0.4)
# Depth 3 tree.
tree2 = tree_ensemble_config.trees.add()
# We are not setting the tree_ensemble_config.tree_metadata in this test.
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 0, 1.2)
_set_categorical_id_split(tree2.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.7)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias -0.4 from first tree and
# leaf 4 payload of -0.9 hence -1.3, the second example will
# get the same bias -0.4 and leaf 3 payload (sparse feature missing)
# of 1.2 hence 0.8.
self.assertAllClose([[-1.3], [0.8]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For TREE_PER_CLASS strategy, predictions size is num_classes-1
def testFullEnsembleMultiClassTreePerClassStrategy(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=True)
# The first example will get bias class 1 -0.2 from first tree and
# leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
# the second example will get the same bias class 1 -0.2 and leaf 3
# payload of class 1 1.2 hence [0.0, 1.0].
self.assertAllClose([[0.5, -0.2], [0, 1.0]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For tree-per-class multiclass handling strategies, predictions vec
# will have the size of the number of classes.
# This test is when leafs have SPARSE weights stored (class id and
# contribution).
def testFullEnsembleMultiNotClassTreePerClassStrategySparseVector(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
_append_multi_values_to_leaf(tree2.nodes.add().leaf, [1, 2], [1.2, -0.7])
_append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=False)
# The first example will get bias class 1 -0.2 from first tree and
# leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
# the second example will get the same bias class 1 -0.2 and leaf 3
# payload of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -0.7].
self.assertAllClose([[0.5, -0.2, 0.0], [0, 1.0, -0.7]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
# For all non-tree-per class multiclass handling strategies, predictions vec
# will have the size of the number of classes.
# This test is when leafs have DENSE weights stored (weight for each class)
def testFullEnsembleMultiNotClassTreePerClassStrategyDenseVector(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Bias tree only for second class.
tree1 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_multi_values_to_dense_leaf(tree1.nodes.add().leaf, [0, -0.2, -2])
# Depth 2 tree.
tree2 = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_set_float_split(tree2.nodes.add()
.sparse_float_binary_split_default_right.split, 1, 4.0,
1, 2)
_set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3, 4)
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0.5, 0, 0])
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [0, 1.2, -0.7])
_append_multi_values_to_dense_leaf(tree2.nodes.add().leaf, [-0.9, 0, 0])
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="ensemble_multi_class")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
reduce_dim=False)
# The first example will get bias class 1 -0.2 and -2 for class 2 from
# first tree and leaf 2 payload (sparse feature missing) of 0.5 hence
# 0.5, -0.2], the second example will get the same bias and leaf 3 payload
# of class 1 1.2 and class 2-0.7 hence [0.0, 1.0, -2.7].
self.assertAllClose([[0.5, -0.2, -2.0], [0, 1.0, -2.7]], result.eval())
# Empty dropout.
self.assertAllEqual([[], []], dropout_info.eval())
def testDropout(self):
with self.cached_session():
# Empty tree ensenble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 1000 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
# Apply dropout.
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# We expect approx 500 trees were dropped.
dropout_info = dropout_info.eval()
self.assertIn(dropout_info[0].size, range(400, 601))
self.assertEqual(dropout_info[0].size, dropout_info[1].size)
for i in range(dropout_info[0].size):
dropped_index = dropout_info[0][i]
dropped_weight = dropout_info[1][i]
# We constructed the trees so tree number + 1 is the tree weight, so
# we can check here the weights for dropped trees.
self.assertEqual(dropped_index + 1, dropped_weight)
# Don't apply dropout.
result_no_dropout, no_dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
self.assertEqual(result.eval().size, result_no_dropout.eval().size)
for i in range(result.eval().size):
self.assertNotEqual(result.eval()[i], result_no_dropout.eval()[i])
# We expect none of the trees were dropped.
self.assertAllEqual([[], []], no_dropout_info.eval())
def testDropoutCenterBiasNoGrowingMeta(self):
# This is for normal non-batch mode where ensemble does not contain the tree
# that is being built currently.
num_trees = 10
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, num_trees):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
# Drop all the trees.
learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
result_center, dropout_info_center = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=True,
reduce_dim=True)
dropout_info = dropout_info.eval()
dropout_info_center = dropout_info_center.eval()
# With centering, the bias tree is not dropped.
num_dropped = dropout_info[0].size
self.assertEqual(num_dropped, num_trees)
num_dropped_center = dropout_info_center[0].size
self.assertEqual(num_dropped_center, num_trees - 1)
result = result.eval()
result_center = result_center.eval()
for i in range(result.size):
self.assertNotEqual(result[i], result_center[i])
# First dropped tree is a bias tree 0.
self.assertEqual(0, dropout_info[0][0])
# Last dropped tree is the last tree.
self.assertEqual(num_trees - 1, dropout_info[0][num_dropped - 1])
# First dropped tree is a tree 1.
self.assertEqual(1, dropout_info_center[0][0])
# Last dropped tree is the last tree.
self.assertEqual(num_trees - 1, dropout_info_center[0][num_dropped_center
- 1])
def testDropoutCenterBiasWithGrowingMeta(self):
# This is batch mode where ensemble already contains the tree that we are
# building. This tree should never be dropped.
num_trees = 10
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, num_trees):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Add growing metadata to indicate batch mode.
tree_ensemble_config.growing_metadata.num_trees_attempted = num_trees
tree_ensemble_config.growing_metadata.num_layers_attempted = num_trees
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
# Drop all the trees.
learner_config.learning_rate_tuner.dropout.dropout_probability = 1.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
result_center, dropout_info_center = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=True,
reduce_dim=True)
dropout_info = dropout_info.eval()
dropout_info_center = dropout_info_center.eval()
# Last tree is never dropped, the bias tree can be dropped.
num_dropped = dropout_info[0].size
self.assertEqual(num_dropped, num_trees - 1)
num_dropped_center = dropout_info_center[0].size
self.assertEqual(num_dropped_center, num_trees - 2)
result = result.eval()
result_center = result_center.eval()
for i in range(result.size):
self.assertNotEqual(result[i], result_center[i])
# First dropped tree is a bias tree 0.
self.assertEqual(0, dropout_info[0][0])
# Last dropped tree is not the last tree (not tree num_trees-1).
self.assertNotEqual(num_trees - 1, dropout_info[0][num_dropped - 1])
# First dropped tree is a tree 1.
self.assertEqual(1, dropout_info_center[0][0])
# Last dropped tree is not the last tree in ensemble.
self.assertNotEqual(num_trees - 1,
dropout_info_center[0][num_dropped_center - 1])
def testDropoutSeed(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 10 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Prepare learner/dropout config.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.5
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="empty")
resources.initialize_resources(resources.shared_resources()).run()
_, dropout_info_1 = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
_, dropout_info_2 = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# Different seed.
_, dropout_info_3 = prediction_ops.gradient_trees_prediction(
tree_ensemble_handle,
112314, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1],
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
# First seed with centering bias.
_, dropout_info_4 = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=True,
reduce_dim=True)
# The same seed returns the same results.
self.assertAllEqual(dropout_info_1.eval(), dropout_info_2.eval())
# Different seeds give diff results.
self.assertNotEqual(dropout_info_3.eval().shape,
dropout_info_2.eval().shape)
# With centering bias and the same seed does not give the same result.
self.assertNotEqual(dropout_info_4.eval(), dropout_info_1.eval())
# With centering bias has 1 less tree dropped (bias tree is not dropped).
self.assertEqual(
len(dropout_info_4.eval()[0]) + 1, len(dropout_info_1.eval()[0]))
def testDropOutZeroProb(self):
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Add 1000 trees with some weights.
for i in range(0, 999):
tree = tree_ensemble_config.trees.add()
tree_ensemble_config.tree_metadata.add().is_finalized = True
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_weights.append(i + 1)
# Dropout with 0 probability.
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.dropout.dropout_probability = 0.0
learner_config.learning_rate_tuner.dropout.learning_rate = 1.0
learner_config.num_classes = 2
# Apply dropout, but expect nothing dropped.
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=True,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
result_no_dropout, _ = self._get_predictions(
tree_ensemble_handle,
learner_config=learner_config.SerializeToString(),
apply_dropout=False,
apply_averaging=False,
center_bias=False,
reduce_dim=True)
self.assertAllEqual([[], []], dropout_info.eval())
self.assertAllClose(result.eval(), result_no_dropout.eval())
def testAveragingAllTrees(self):
with self.cached_session():
# Empty tree ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# Add 100 trees with some weights.
# When averaging is applied, the tree weights will essentially change to
# 1, 98/99, 97/99 etc, so lets create the ensemble with such weights.
# too
total_num = 100
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (total_num - i) / total_num)
# Prepare learner config WITH AVERAGING.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.averaging_config.average_last_percent_trees = 1.0
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
# Do averaging.
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
pattern_result, pattern_dropout_info = self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging.SerializeToString(),
apply_averaging=False,
reduce_dim=True)
self.assertAllEqual(result.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())
def testAveragingSomeTrees(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# Add 1000 trees with some weights.
total_num = 100
num_averaged = 25
j = 0
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging - we are adjusting
# the weights of the last 25 trees
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
if i >= 75:
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (num_averaged - j) / num_averaged)
j += 1
else:
adjusted_tree_ensemble_config.tree_weights.append(1.0)
# Prepare learner config WITH AVERAGING.
learner_config_1 = learner_pb2.LearnerConfig()
learner_config_1.num_classes = 2
learner_config_1.averaging_config.average_last_percent_trees = 0.25
# This is equivalent.
learner_config_2 = learner_pb2.LearnerConfig()
learner_config_2.num_classes = 2
learner_config_2.averaging_config.average_last_n_trees = 25
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
result_1, dropout_info_1 = self._get_predictions(
tree_ensemble_handle,
learner_config_1.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
result_2, dropout_info_2 = self._get_predictions(
tree_ensemble_handle,
learner_config_2.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
pattern_result, pattern_dropout_info = self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging.SerializeToString(),
apply_averaging=False,
reduce_dim=True)
self.assertAllEqual(result_1.eval(), pattern_result.eval())
self.assertAllEqual(result_2.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info_1.eval(), pattern_dropout_info.eval())
self.assertAllEqual(dropout_info_2.eval(), pattern_dropout_info.eval())
def testAverageMoreThanNumTreesExist(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
adjusted_tree_ensemble_config = (
tree_config_pb2.DecisionTreeEnsembleConfig())
# When we say to average over more trees than possible, it is averaging
# across all trees.
total_num = 100
for i in range(0, total_num):
tree = tree_ensemble_config.trees.add()
_append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_config.tree_weights.append(1.0)
# This is how the weight will look after averaging
copy_tree = adjusted_tree_ensemble_config.trees.add()
_append_to_leaf(copy_tree.nodes.add().leaf, 0, -0.4)
adjusted_tree_ensemble_config.tree_metadata.add().is_finalized = True
adjusted_tree_ensemble_config.tree_weights.append(
1.0 * (total_num - i) / total_num)
# Prepare learner config WITH AVERAGING.
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
# We have only 100 trees but we ask to average over 250.
learner_config.averaging_config.average_last_n_trees = 250
# No averaging config.
learner_config_no_averaging = learner_pb2.LearnerConfig()
learner_config_no_averaging.num_classes = 2
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="existing")
# This is how our ensemble will "look" during averaging
adjusted_tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=adjusted_tree_ensemble_config.SerializeToString(
),
name="adjusted")
resources.initialize_resources(resources.shared_resources()).run()
result, dropout_info = self._get_predictions(
tree_ensemble_handle,
learner_config.SerializeToString(),
apply_averaging=True,
reduce_dim=True)
pattern_result, pattern_dropout_info = self._get_predictions(
adjusted_tree_ensemble_handle,
learner_config_no_averaging.SerializeToString(),
apply_averaging=False,
reduce_dim=True)
self.assertAllEqual(result.eval(), pattern_result.eval())
self.assertAllEqual(dropout_info.eval(), pattern_dropout_info.eval())
class PartitionExamplesOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the prediction tests.
Create a batch of two examples having three dense float, two sparse float
and one sparse int features.
The data looks like the following:
|Instance |Dense0 |Dense1 |Dense2 |SparseF0 |SparseF1 |SparseI0 |
| 0 | 7 | 1 | 2 | -3 | | 9,1 |
| 1 | -2 | 2 | 0.5 | | 4 | |
"""
super(PartitionExamplesOpsTest, self).setUp()
self._dense_float_tensor1 = np.array([[7.0], [-2.0]])
self._dense_float_tensor2 = np.array([[1.0], [2.0]])
self._dense_float_tensor3 = np.array([[2.0], [0.5]])
self._sparse_float_indices1 = np.array([[0, 0]])
self._sparse_float_values1 = np.array([-3.0])
self._sparse_float_shape1 = np.array([2, 1])
self._sparse_float_indices2 = np.array([[1, 0]])
self._sparse_float_values2 = np.array([4.0])
self._sparse_float_shape2 = np.array([2, 1])
self._sparse_int_indices1 = np.array([[0, 0], [0, 1]])
self._sparse_int_values1 = np.array([9, 1])
self._sparse_int_shape1 = np.array([2, 2])
def testEnsembleEmpty(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1])
self.assertAllEqual([0, 0], result.eval())
def testTreeNonFinalized(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree1.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)
_set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = False
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1])
self.assertAllEqual([5, 3], result.eval())
def testTreeFinalized(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_split(tree1.nodes.add().dense_float_binary_split, 0, 9.0, 1, 2)
_set_float_split(tree1.nodes.add()
.sparse_float_binary_split_default_left.split, 0, -20.0,
3, 4)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.2)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.3)
_set_categorical_id_split(tree1.nodes.add().categorical_id_binary_split,
0, 9, 5, 6)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.5)
_append_to_leaf(tree1.nodes.add().leaf, 0, 0.6)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = True
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [self._dense_float_tensor1],
[self._sparse_float_indices1, self._sparse_float_indices2],
[self._sparse_float_values1, self._sparse_float_values2],
[self._sparse_float_shape1, self._sparse_float_shape2],
[self._sparse_int_indices1], [self._sparse_int_values1],
[self._sparse_int_shape1])
self.assertAllEqual([0, 0], result.eval())
def testObliviousTreeNonFinalized(self):
with self.cached_session():
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
# Depth 3 tree.
tree1 = tree_ensemble_config.trees.add()
_set_float_oblivious_split(
tree1.nodes.add().oblivious_dense_float_binary_split, 0, 5.0)
_set_float_oblivious_split(
tree1.nodes.add().oblivious_dense_float_binary_split, 1, 3.0)
_set_float_oblivious_split(
tree1.nodes.add().oblivious_dense_float_binary_split, 2, 1.0)
for i in range(1, 9):
_append_to_leaf(tree1.nodes.add().leaf, 0, i / 10.0)
tree_ensemble_config.tree_weights.append(1.0)
tree_ensemble_config.tree_metadata.add().is_finalized = False
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="full_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
result = prediction_ops.gradient_trees_partition_examples(
tree_ensemble_handle, [
self._dense_float_tensor1,
self._dense_float_tensor2,
self._dense_float_tensor3
], [], [], [], [], [], [])
# The first example goes right, left, right in the tree and the second
# example goes lef, left, left. Since the depth of the tree is 3, the
# partition id's are as follows:
# First example: 3 + 5 = 8
# Second exampel: 3 + 0 = 3
self.assertAllEqual([8, 3], result.eval())
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
rocknamx8/rocknamx8.github.io
|
node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py
|
1569
|
23354
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.