repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Jay-Jay-D/LeanSTP | Algorithm.Python/ScheduledUniverseSelectionModelRegressionAlgorithm.py | 2 | 5460 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Selection import *
from datetime import datetime, timedelta
### <summary>
### Regression algorithm for testing ScheduledUniverseSelectionModel scheduling functions.
### </summary>
class ScheduledUniverseSelectionModelRegressionAlgorithm(QCAlgorithm):
'''Regression algorithm for testing ScheduledUniverseSelectionModel scheduling functions.'''
def Initialize(self):
self.UniverseSettings.Resolution = Resolution.Hour
self.SetStartDate(2017, 1, 1)
self.SetEndDate(2017, 2, 1)
# selection will run on mon/tues/thurs at 00:00/06:00/12:00/18:00
self.SetUniverseSelection(ScheduledUniverseSelectionModel(
self.DateRules.Every(DayOfWeek.Monday, DayOfWeek.Tuesday, DayOfWeek.Thursday),
self.TimeRules.Every(timedelta(hours = 12)),
self.SelectSymbols
))
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(1)))
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
# some days of the week have different behavior the first time -- less securities to remove
self.seenDays = []
def SelectSymbols(self, dateTime):
symbols = []
weekday = dateTime.weekday()
if weekday == 0 or weekday == 1:
symbols.append(Symbol.Create('SPY', SecurityType.Equity, Market.USA))
elif weekday == 2:
# given the date/time rules specified in Initialize, this symbol will never be selected (not invoked on wednesdays)
symbols.append(Symbol.Create('AAPL', SecurityType.Equity, Market.USA))
else:
symbols.append(Symbol.Create('IBM', SecurityType.Equity, Market.USA))
if weekday == 1 or weekday == 3:
symbols.append(Symbol.Create('EURUSD', SecurityType.Forex, Market.FXCM))
elif weekday == 4:
# given the date/time rules specified in Initialize, this symbol will never be selected (every 6 hours never lands on hour==1)
symbols.append(Symbol.Create('EURGBP', SecurityType.Forex, Market.FXCM))
else:
symbols.append(Symbol.Create('NZDUSD', SecurityType.Forex, Market.FXCM))
return symbols
def OnSecuritiesChanged(self, changes):
self.Log("{}: {}".format(self.Time, changes))
weekday = self.Time.weekday()
if weekday == 0:
self.ExpectAdditions(changes, 'SPY', 'NZDUSD')
if weekday not in self.seenDays:
self.seenDays.append(weekday)
self.ExpectRemovals(changes, None)
else:
self.ExpectRemovals(changes, 'EURUSD', 'IBM')
if weekday == 1:
self.ExpectAdditions(changes, 'EURUSD')
if weekday not in self.seenDays:
self.seenDays.append(weekday)
self.ExpectRemovals(changes, 'NZDUSD')
else:
self.ExpectRemovals(changes, 'NZDUSD')
if weekday == 2 or weekday == 4:
# selection function not invoked on wednesdays (2) or friday (4)
self.ExpectAdditions(changes, None)
self.ExpectRemovals(changes, None)
if weekday == 3:
self.ExpectAdditions(changes, "IBM")
self.ExpectRemovals(changes, "SPY")
def OnOrderEvent(self, orderEvent):
self.Log("{}: {}".format(self.Time, orderEvent))
def ExpectAdditions(self, changes, *tickers):
if tickers is None and changes.AddedSecurities.Count > 0:
raise Exception("{}: Expected no additions: {}".format(self.Time, self.Time.weekday()))
for ticker in tickers:
if ticker is not None and ticker not in [s.Symbol.Value for s in changes.AddedSecurities]:
raise Exception("{}: Expected {} to be added: {}".format(self.Time, ticker, self.Time.weekday()))
def ExpectRemovals(self, changes, *tickers):
if tickers is None and changes.RemovedSecurities.Count > 0:
raise Exception("{}: Expected no removals: {}".format(self.Time, self.Time.weekday()))
for ticker in tickers:
if ticker is not None and ticker not in [s.Symbol.Value for s in changes.RemovedSecurities]:
raise Exception("{}: Expected {} to be removed: {}".format(self.Time, ticker, self.Time.weekday()))
| apache-2.0 |
RT-Thread/rt-thread | bsp/stm32/stm32f107-uc-eval/rtconfig.py | 28 | 4023 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| apache-2.0 |
kdar/Wox | PythonHome/Lib/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit |
jvoegele/picard | picard/ui/collectionmenu.py | 2 | 3181 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2013 Michael Wiencek
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import locale
from PyQt4 import QtCore, QtGui
from picard.collection import user_collections, load_user_collections
class CollectionMenu(QtGui.QMenu):
def __init__(self, albums, *args):
QtGui.QMenu.__init__(self, *args)
self.ids = set(a.id for a in albums)
self.update_collections()
def update_collections(self):
self.clear()
for id, collection in sorted(user_collections.iteritems(),
key=lambda (k, v):
(locale.strxfrm(v.name.encode('utf-8')), k)):
action = QtGui.QWidgetAction(self)
action.setDefaultWidget(CollectionCheckBox(self, collection))
self.addAction(action)
self.addSeparator()
self.refresh_action = self.addAction(_("Refresh List"))
def refresh_list(self):
self.refresh_action.setEnabled(False)
load_user_collections(self.update_collections)
def mouseReleaseEvent(self, event):
# Not using self.refresh_action.triggered because it closes the menu
if self.actionAt(event.pos()) == self.refresh_action and self.refresh_action.isEnabled():
self.refresh_list()
class CollectionCheckBox(QtGui.QCheckBox):
def __init__(self, menu, collection):
self.menu = menu
self.collection = collection
QtGui.QCheckBox.__init__(self, self.label())
releases = collection.releases & menu.ids
if len(releases) == len(menu.ids):
self.setCheckState(QtCore.Qt.Checked)
elif not releases:
self.setCheckState(QtCore.Qt.Unchecked)
else:
self.setCheckState(QtCore.Qt.PartiallyChecked)
def nextCheckState(self):
ids = self.menu.ids
if ids & self.collection.pending:
return
diff = ids - self.collection.releases
if diff:
self.collection.add_releases(diff, self.updateText)
self.setCheckState(QtCore.Qt.Checked)
else:
self.collection.remove_releases(ids & self.collection.releases, self.updateText)
self.setCheckState(QtCore.Qt.Unchecked)
def updateText(self):
self.setText(self.label())
def label(self):
c = self.collection
return ungettext("%s (%i release)", "%s (%i releases)", c.size) % (c.name, c.size)
| gpl-2.0 |
ctb/cvxpy | cvxpy/atoms/affine/upper_tri.py | 11 | 2344 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
import cvxpy.utilities as u
import cvxpy.interface as intf
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class upper_tri(AffAtom):
"""The vectorized strictly upper triagonal entries.
"""
def __init__(self, expr):
super(upper_tri, self).__init__(expr)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Vectorize the upper triagonal entries.
"""
value = np.zeros(self.size[0])
count = 0
for i in range(values[0].shape[0]):
for j in range(values[0].shape[1]):
if i < j:
value[count] = values[0][i, j]
count += 1
return value
def validate_arguments(self):
"""Checks that the argument is a square matrix.
"""
if not self.args[0].size[0] == self.args[0].size[1]:
raise ValueError(
"Argument to upper_tri must be a square matrix."
)
def shape_from_args(self):
"""A vector.
"""
rows, cols = self.args[0].size
return u.Shape(rows*(cols-1)//2, 1)
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Vectorized strictly upper triagonal entries.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.upper_tri(arg_objs[0]), [])
| gpl-3.0 |
nikhilprathapani/python-for-android | python-modules/zope/zope/interface/tests/test_interface.py | 50 | 16077 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test Interface implementation
"""
import doctest
import unittest
import sys
class InterfaceTests(unittest.TestCase):
def _makeDerivedInterface(self):
from zope.interface import Interface
from zope.interface import Attribute
class _I1(Interface):
a1 = Attribute("This is an attribute")
def f11():
pass
def f12():
pass
f12.optional = 1
class _I1_(_I1):
pass
class _I1__(_I1_):
pass
class _I2(_I1__):
def f21():
pass
def f22():
pass
f23 = f22
return _I2
def testInterfaceSetOnAttributes(self):
from zope.interface.tests.unitfixtures import FooInterface
self.assertEqual(FooInterface['foobar'].interface,
FooInterface)
self.assertEqual(FooInterface['aMethod'].interface,
FooInterface)
def testClassImplements(self):
from zope.interface.tests.unitfixtures import A
from zope.interface.tests.unitfixtures import B
from zope.interface.tests.unitfixtures import C
from zope.interface.tests.unitfixtures import D
from zope.interface.tests.unitfixtures import E
from zope.interface.tests.unitfixtures import I1
from zope.interface.tests.unitfixtures import I2
from zope.interface.tests.unitfixtures import IC
self.assert_(IC.implementedBy(C))
self.assert_(I1.implementedBy(A))
self.assert_(I1.implementedBy(B))
self.assert_(not I1.implementedBy(C))
self.assert_(I1.implementedBy(D))
self.assert_(I1.implementedBy(E))
self.assert_(not I2.implementedBy(A))
self.assert_(I2.implementedBy(B))
self.assert_(not I2.implementedBy(C))
# No longer after interfacegeddon
# self.assert_(not I2.implementedBy(D))
self.assert_(not I2.implementedBy(E))
def testUtil(self):
from zope.interface import implementedBy
from zope.interface import providedBy
from zope.interface.tests.unitfixtures import A
from zope.interface.tests.unitfixtures import B
from zope.interface.tests.unitfixtures import C
from zope.interface.tests.unitfixtures import I1
from zope.interface.tests.unitfixtures import I2
from zope.interface.tests.unitfixtures import IC
self.assert_(IC in implementedBy(C))
self.assert_(I1 in implementedBy(A))
self.assert_(not I1 in implementedBy(C))
self.assert_(I2 in implementedBy(B))
self.assert_(not I2 in implementedBy(C))
self.assert_(IC in providedBy(C()))
self.assert_(I1 in providedBy(A()))
self.assert_(not I1 in providedBy(C()))
self.assert_(I2 in providedBy(B()))
self.assert_(not I2 in providedBy(C()))
def testObjectImplements(self):
from zope.interface.tests.unitfixtures import A
from zope.interface.tests.unitfixtures import B
from zope.interface.tests.unitfixtures import C
from zope.interface.tests.unitfixtures import D
from zope.interface.tests.unitfixtures import E
from zope.interface.tests.unitfixtures import I1
from zope.interface.tests.unitfixtures import I2
from zope.interface.tests.unitfixtures import IC
self.assert_(IC.providedBy(C()))
self.assert_(I1.providedBy(A()))
self.assert_(I1.providedBy(B()))
self.assert_(not I1.providedBy(C()))
self.assert_(I1.providedBy(D()))
self.assert_(I1.providedBy(E()))
self.assert_(not I2.providedBy(A()))
self.assert_(I2.providedBy(B()))
self.assert_(not I2.providedBy(C()))
# Not after interface geddon
# self.assert_(not I2.providedBy(D()))
self.assert_(not I2.providedBy(E()))
def testDeferredClass(self):
from zope.interface.tests.unitfixtures import A
from zope.interface.exceptions import BrokenImplementation
a = A()
self.assertRaises(BrokenImplementation, a.ma)
def testInterfaceExtendsInterface(self):
from zope.interface.tests.unitfixtures import BazInterface
from zope.interface.tests.unitfixtures import BarInterface
from zope.interface.tests.unitfixtures import BobInterface
from zope.interface.tests.unitfixtures import FunInterface
self.assert_(BazInterface.extends(BobInterface))
self.assert_(BazInterface.extends(BarInterface))
self.assert_(BazInterface.extends(FunInterface))
self.assert_(not BobInterface.extends(FunInterface))
self.assert_(not BobInterface.extends(BarInterface))
self.assert_(BarInterface.extends(FunInterface))
self.assert_(not BarInterface.extends(BazInterface))
def testVerifyImplementation(self):
from zope.interface.verify import verifyClass
from zope.interface import Interface
from zope.interface.tests.unitfixtures import Foo
from zope.interface.tests.unitfixtures import FooInterface
from zope.interface.tests.unitfixtures import I1
self.assert_(verifyClass(FooInterface, Foo))
self.assert_(Interface.providedBy(I1))
def test_names(self):
iface = self._makeDerivedInterface()
names = list(iface.names())
names.sort()
self.assertEqual(names, ['f21', 'f22', 'f23'])
all = list(iface.names(all=True))
all.sort()
self.assertEqual(all, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
def test_namesAndDescriptions(self):
iface = self._makeDerivedInterface()
names = [nd[0] for nd in iface.namesAndDescriptions()]
names.sort()
self.assertEqual(names, ['f21', 'f22', 'f23'])
names = [nd[0] for nd in iface.namesAndDescriptions(1)]
names.sort()
self.assertEqual(names, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
for name, d in iface.namesAndDescriptions(1):
self.assertEqual(name, d.__name__)
def test_getDescriptionFor(self):
iface = self._makeDerivedInterface()
self.assertEqual(iface.getDescriptionFor('f11').__name__, 'f11')
self.assertEqual(iface.getDescriptionFor('f22').__name__, 'f22')
self.assertEqual(iface.queryDescriptionFor('f33', self), self)
self.assertRaises(KeyError, iface.getDescriptionFor, 'f33')
def test___getitem__(self):
iface = self._makeDerivedInterface()
self.assertEqual(iface['f11'].__name__, 'f11')
self.assertEqual(iface['f22'].__name__, 'f22')
self.assertEqual(iface.get('f33', self), self)
self.assertRaises(KeyError, iface.__getitem__, 'f33')
def test___contains__(self):
iface = self._makeDerivedInterface()
self.failUnless('f11' in iface)
self.failIf('f33' in iface)
def test___iter__(self):
iface = self._makeDerivedInterface()
names = list(iter(iface))
names.sort()
self.assertEqual(names, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
def testAttr(self):
iface = self._makeDerivedInterface()
description = iface.getDescriptionFor('a1')
self.assertEqual(description.__name__, 'a1')
self.assertEqual(description.__doc__, 'This is an attribute')
def testFunctionAttributes(self):
# Make sure function attributes become tagged values.
from zope.interface import Interface
class ITest(Interface):
def method():
pass
method.optional = 1
method = ITest['method']
self.assertEqual(method.getTaggedValue('optional'), 1)
def testInvariant(self):
from zope.interface.exceptions import Invalid
from zope.interface import directlyProvides
from zope.interface.tests.unitfixtures import BarGreaterThanFoo
from zope.interface.tests.unitfixtures import ifFooThenBar
from zope.interface.tests.unitfixtures import IInvariant
from zope.interface.tests.unitfixtures import InvariantC
from zope.interface.tests.unitfixtures import ISubInvariant
# set up
o = InvariantC()
directlyProvides(o, IInvariant)
# a helper
def errorsEqual(self, o, error_len, error_msgs, iface=None):
if iface is None:
iface = IInvariant
self.assertRaises(Invalid, iface.validateInvariants, o)
e = []
try:
iface.validateInvariants(o, e)
except Invalid, error:
self.assertEquals(error.args[0], e)
else:
self._assert(0) # validateInvariants should always raise
# Invalid
self.assertEquals(len(e), error_len)
msgs = [error.args[0] for error in e]
msgs.sort()
for msg in msgs:
self.assertEquals(msg, error_msgs.pop(0))
# the tests
self.assertEquals(IInvariant.getTaggedValue('invariants'),
[ifFooThenBar])
self.assertEquals(IInvariant.validateInvariants(o), None)
o.bar = 27
self.assertEquals(IInvariant.validateInvariants(o), None)
o.foo = 42
self.assertEquals(IInvariant.validateInvariants(o), None)
del o.bar
errorsEqual(self, o, 1, ['If Foo, then Bar!'])
# nested interfaces with invariants:
self.assertEquals(ISubInvariant.getTaggedValue('invariants'),
[BarGreaterThanFoo])
o = InvariantC()
directlyProvides(o, ISubInvariant)
o.foo = 42
# even though the interface has changed, we should still only have one
# error.
errorsEqual(self, o, 1, ['If Foo, then Bar!'], ISubInvariant)
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
o.foo = 2
o.bar = 1
errorsEqual(self, o, 1, ['Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
o.foo = 1
o.bar = 0
errorsEqual(self, o, 2, ['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'],
ISubInvariant)
# for a happy ending, we'll make the invariants happy
o.foo = 1
o.bar = 2
self.assertEquals(IInvariant.validateInvariants(o), None) # woohoo
# now we'll do two invariants on the same interface,
# just to make sure that a small
# multi-invariant interface is at least minimally tested.
o = InvariantC()
directlyProvides(o, IInvariant)
o.foo = 42
old_invariants = IInvariant.getTaggedValue('invariants')
invariants = old_invariants[:]
invariants.append(BarGreaterThanFoo) # if you really need to mutate,
# then this would be the way to do it. Probably a bad idea, though. :-)
IInvariant.setTaggedValue('invariants', invariants)
#
# even though the interface has changed, we should still only have one
# error.
errorsEqual(self, o, 1, ['If Foo, then Bar!'])
# however, if we set foo to 0 (Boolean False) and bar to a negative
# number then we'll get the new error
o.foo = 2
o.bar = 1
errorsEqual(self, o, 1, ['Please, Boo MUST be greater than Foo!'])
# and if we set foo to a positive number and boo to 0, we'll
# get both errors!
o.foo = 1
o.bar = 0
errorsEqual(self, o, 2, ['If Foo, then Bar!',
'Please, Boo MUST be greater than Foo!'])
# for another happy ending, we'll make the invariants happy again
o.foo = 1
o.bar = 2
self.assertEquals(IInvariant.validateInvariants(o), None) # bliss
# clean up
IInvariant.setTaggedValue('invariants', old_invariants)
def test___doc___element(self):
from zope.interface import Interface
from zope.interface import Attribute
class I(Interface):
"xxx"
self.assertEqual(I.__doc__, "xxx")
self.assertEqual(list(I), [])
class I(Interface):
"xxx"
__doc__ = Attribute('the doc')
self.assertEqual(I.__doc__, "")
self.assertEqual(list(I), ['__doc__'])
def testIssue228(self):
from zope.interface import Interface
# Test for http://collector.zope.org/Zope3-dev/228
if sys.version[0] == '3':
# No old style classes in Python 3, so the test becomes moot.
return
class I(Interface):
"xxx"
class Bad:
__providedBy__ = None
# Old style classes don't have a '__class__' attribute
self.failUnlessRaises(AttributeError, I.providedBy, Bad)
if sys.version_info >= (2, 4):
def test_invariant_as_decorator():
"""Invaiants can be deined in line
>>> from zope.interface.exceptions import Invalid
>>> from zope.interface import Interface
>>> from zope.interface import Attribute
>>> from zope.interface import implements
>>> from zope.interface import invariant
>>> class IRange(Interface):
... min = Attribute("Lower bound")
... max = Attribute("Upper bound")
...
... @invariant
... def range_invariant(ob):
... if ob.max < ob.min:
... raise Invalid('max < min')
>>> class Range(object):
... implements(IRange)
...
... def __init__(self, min, max):
... self.min, self.max = min, max
>>> from zope.interface.exceptions import Invalid
>>> IRange.validateInvariants(Range(1,2))
>>> IRange.validateInvariants(Range(1,1))
>>> try:
... IRange.validateInvariants(Range(2,1))
... except Invalid, e:
... str(e)
'max < min'
"""
def test_description_cache_management():
""" See https://bugs.launchpad.net/zope.interface/+bug/185974
There was a bug where the cache used by Specification.get() was not
cleared when the bases were changed.
>>> from zope.interface import Interface
>>> from zope.interface import Attribute
>>> class I1(Interface):
... a = Attribute('a')
>>> class I2(I1):
... pass
>>> class I3(I2):
... pass
>>> I3.get('a') is I1.get('a')
True
>>> I2.__bases__ = (Interface,)
>>> I3.get('a') is None
True
"""
def test_suite():
suite = unittest.makeSuite(InterfaceTests)
suite.addTest(doctest.DocTestSuite("zope.interface.interface"))
if sys.version_info >= (2, 4):
suite.addTest(doctest.DocTestSuite())
suite.addTest(doctest.DocFileSuite(
'../README.txt',
globs={'__name__': '__main__'},
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS,
))
suite.addTest(doctest.DocFileSuite(
'../README.ru.txt',
globs={'__name__': '__main__'},
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS,
))
return suite
| apache-2.0 |
Kraymer/beets | beetsplug/bucket.py | 13 | 8178 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the %bucket{} function for path formatting.
"""
from __future__ import division, absolute_import, print_function
from datetime import datetime
import re
import string
from six.moves import zip
from itertools import tee
from beets import plugins, ui
ASCII_DIGITS = string.digits + string.ascii_lowercase
class BucketError(Exception):
pass
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def span_from_str(span_str):
"""Build a span dict from the span string representation.
"""
def normalize_year(d, yearfrom):
"""Convert string to a 4 digits year
"""
if yearfrom < 100:
raise BucketError(u"%d must be expressed on 4 digits" % yearfrom)
# if two digits only, pick closest year that ends by these two
# digits starting from yearfrom
if d < 100:
if (d % 100) < (yearfrom % 100):
d = (yearfrom - yearfrom % 100) + 100 + d
else:
d = (yearfrom - yearfrom % 100) + d
return d
years = [int(x) for x in re.findall('\d+', span_str)]
if not years:
raise ui.UserError(u"invalid range defined for year bucket '%s': no "
u"year found" % span_str)
try:
years = [normalize_year(x, years[0]) for x in years]
except BucketError as exc:
raise ui.UserError(u"invalid range defined for year bucket '%s': %s" %
(span_str, exc))
res = {'from': years[0], 'str': span_str}
if len(years) > 1:
res['to'] = years[-1]
return res
def complete_year_spans(spans):
"""Set the `to` value of spans if empty and sort them chronologically.
"""
spans.sort(key=lambda x: x['from'])
for (x, y) in pairwise(spans):
if 'to' not in x:
x['to'] = y['from'] - 1
if spans and 'to' not in spans[-1]:
spans[-1]['to'] = datetime.now().year
def extend_year_spans(spans, spanlen, start=1900, end=2014):
"""Add new spans to given spans list so that every year of [start,end]
belongs to a span.
"""
extended_spans = spans[:]
for (x, y) in pairwise(spans):
# if a gap between two spans, fill the gap with as much spans of
# spanlen length as necessary
for span_from in range(x['to'] + 1, y['from'], spanlen):
extended_spans.append({'from': span_from})
# Create spans prior to declared ones
for span_from in range(spans[0]['from'] - spanlen, start, -spanlen):
extended_spans.append({'from': span_from})
# Create spans after the declared ones
for span_from in range(spans[-1]['to'] + 1, end, spanlen):
extended_spans.append({'from': span_from})
complete_year_spans(extended_spans)
return extended_spans
def build_year_spans(year_spans_str):
"""Build a chronologically ordered list of spans dict from unordered spans
stringlist.
"""
spans = []
for elem in year_spans_str:
spans.append(span_from_str(elem))
complete_year_spans(spans)
return spans
def str2fmt(s):
"""Deduces formatting syntax from a span string.
"""
regex = re.compile(r"(?P<bef>\D*)(?P<fromyear>\d+)(?P<sep>\D*)"
r"(?P<toyear>\d*)(?P<after>\D*)")
m = re.match(regex, s)
res = {'fromnchars': len(m.group('fromyear')),
'tonchars': len(m.group('toyear'))}
res['fmt'] = "%s%%s%s%s%s" % (m.group('bef'),
m.group('sep'),
'%s' if res['tonchars'] else '',
m.group('after'))
return res
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation.
"""
args = (str(yearfrom)[-fromnchars:])
if tonchars:
args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:])
return fmt % args
def extract_modes(spans):
"""Extract the most common spans lengths and representation formats
"""
rangelen = sorted([x['to'] - x['from'] + 1 for x in spans])
deflen = sorted(rangelen, key=rangelen.count)[-1]
reprs = [str2fmt(x['str']) for x in spans]
deffmt = sorted(reprs, key=reprs.count)[-1]
return deflen, deffmt
def build_alpha_spans(alpha_spans_str, alpha_regexs):
"""Extract alphanumerics from string and return sorted list of chars
[from...to]
"""
spans = []
for elem in alpha_spans_str:
if elem in alpha_regexs:
spans.append(re.compile(alpha_regexs[elem]))
else:
bucket = sorted([x for x in elem.lower() if x.isalnum()])
if bucket:
begin_index = ASCII_DIGITS.index(bucket[0])
end_index = ASCII_DIGITS.index(bucket[-1])
else:
raise ui.UserError(u"invalid range defined for alpha bucket "
u"'%s': no alphanumeric character found" %
elem)
spans.append(
re.compile(
"^[" + ASCII_DIGITS[begin_index:end_index + 1] +
ASCII_DIGITS[begin_index:end_index + 1].upper() + "]"
)
)
return spans
class BucketPlugin(plugins.BeetsPlugin):
def __init__(self):
super(BucketPlugin, self).__init__()
self.template_funcs['bucket'] = self._tmpl_bucket
self.config.add({
'bucket_year': [],
'bucket_alpha': [],
'bucket_alpha_regex': {},
'extrapolate': False
})
self.setup()
def setup(self):
"""Setup plugin from config options
"""
self.year_spans = build_year_spans(self.config['bucket_year'].get())
if self.year_spans and self.config['extrapolate']:
[self.ys_len_mode,
self.ys_repr_mode] = extract_modes(self.year_spans)
self.year_spans = extend_year_spans(self.year_spans,
self.ys_len_mode)
self.alpha_spans = build_alpha_spans(
self.config['bucket_alpha'].get(),
self.config['bucket_alpha_regex'].get()
)
def find_bucket_year(self, year):
"""Return bucket that matches given year or return the year
if no matching bucket.
"""
for ys in self.year_spans:
if ys['from'] <= int(year) <= ys['to']:
if 'str' in ys:
return ys['str']
else:
return format_span(self.ys_repr_mode['fmt'],
ys['from'], ys['to'],
self.ys_repr_mode['fromnchars'],
self.ys_repr_mode['tonchars'])
return year
def find_bucket_alpha(self, s):
"""Return alpha-range bucket that matches given string or return the
string initial if no matching bucket.
"""
for (i, span) in enumerate(self.alpha_spans):
if span.match(s):
return self.config['bucket_alpha'].get()[i]
return s[0].upper()
def _tmpl_bucket(self, text, field=None):
if not field and len(text) == 4 and text.isdigit():
field = 'year'
if field == 'year':
func = self.find_bucket_year
else:
func = self.find_bucket_alpha
return func(text)
| mit |
jeffmurphy/cif-router | poc/cif-router.py | 1 | 21349 | #!/usr/bin/python
#
#
# cif-router proof of concept
#
# cif-router [-p pubport] [-r routerport] [-m myname] [-h]
# -p default: 5556
# -r default: 5555
# -m default: cif-router
#
# cif-router is a zmq device with the following sockets:
# XPUB
# for republishing messages
# XSUB
# for subscribing to message feeds
# ROUTER
# for routing REQ/REP messages between clients
# also for accepting REQs from clients
# locally accepted types:
# REGISTER, UNREGISTER, LIST-CLIENTS
# locally generated replies:
# UNAUTHORIZED, OK, FAILED
#
# communication between router and clients is via CIF.msg passing
# the 'ControlStruct' portion of CIF.msg is used for communication
#
# a typical use case:
#
# cif-smrt's REQ connects to ROUTER and sends a REGISTER message with dst=cif-router
# cif-router's ROUTER responds with SUCCESS (if valid) or UNAUTHORIZED (if not valid)
# the apikey will be validated during this step
# cif-router's XSUB connects to cif-smrt's XPUB
# cif-smrt begins publishing CIF messages
# cif-router re-publishes the CIF messages to clients connected to cif-router's XPUB
# clients may be: cif-correlator, cif-db
import sys
import zmq
import time
import datetime
import threading
import getopt
import json
import pprint
import struct
sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py')
import msg_pb2
import feed_pb2
import RFC5070_IODEF_v1_pb2
import MAEC_v2_pb2
import control_pb2
import cifsupport
sys.path.append('../../libcif/lib')
from CIF.RouterStats import *
from CIF.CtrlCommands.Clients import *
from CIF.CtrlCommands.Ping import *
from CIFRouter.MiniClient import *
from CIF.CtrlCommands.ThreadTracker import ThreadTracker
myname = "cif-router"
def dosubscribe(client, m):
client = m.src
if client in publishers :
print "dosubscribe: we've seen this client before. re-using old connection."
return control_pb2.ControlType.SUCCESS
elif clients.isregistered(client) == True:
if clients.apikey(client) == m.apikey:
print "dosubscribe: New publisher to connect to " + client
publishers[client] = time.time()
addr = m.iPublishRequest.ipaddress
port = m.iPublishRequest.port
print "dosubscribe: connect our xsub -> xpub on " + addr + ":" + str(port)
xsub.connect("tcp://" + addr + ":" + str(port))
return control_pb2.ControlType.SUCCESS
print "dosubscribe: iPublish from a registered client with a bad apikey: " + client + " " + m.apikey
print "dosubscribe: iPublish from a client who isnt registered: \"" + client + "\""
return control_pb2.ControlType.FAILED
def list_clients(client, apikey):
if clients.isregistered(client) == True and clients.apikey(client) == apikey:
return clients.asmessage()
return None
def make_register_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.REGISTER
msg.dst = msgfrom
msg.src = "cif-router"
print "mrr " + _apikey
msg.apikey = _apikey
return msg
def make_unregister_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.UNREGISTER
msg.dst = msgfrom
msg.src = "cif-router"
msg.apikey = _apikey
return msg
def make_msg_seq(msg):
_md5 = hashlib.md5()
_md5.update(msg.SerializeToString())
return _md5.digest()
def handle_miniclient_reply(socket, routerport, publisherport):
pending_registers = miniclient.pending_apikey_lookups()
print "pending_apikey_lookups: ", pending_registers
for apikey in pending_registers:
if apikey in register_wait_map:
reply_to = register_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_register_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
msg.status = control_pb2.ControlType.SUCCESS
clients.register(reply_to['msgfrom'], reply_to['from_zmqid'], apikey)
print " Register succeeded."
else:
print " Register failed: key expired"
else:
print " Register failed: key revoked"
else:
print " Register failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del register_wait_map[apikey]
elif apikey in unregister_wait_map:
reply_to = unregister_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_unregister_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.status = control_pb2.ControlType.SUCCESS
clients.unregister(reply_to['msgfrom'])
print " Unregister succeeded."
else:
print " Unregister failed: key expired"
else:
print " Unregister failed: key revoked"
else:
print " Unregister failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del unregister_wait_map[apikey]
miniclient.remove_pending_apikey(apikey)
def myrelay(pubport):
relaycount = 0
print "[myrelay] Create XPUB socket on " + str(pubport)
xpub = context.socket(zmq.PUB)
xpub.bind("tcp://*:" + str(pubport))
while True:
try:
relaycount = relaycount + 1
m = xsub.recv()
_m = msg_pb2.MessageType()
_m.ParseFromString(m)
if _m.type == msg_pb2.MessageType.QUERY:
mystats.setrelayed(1, 'QUERY')
elif _m.type == msg_pb2.MessageType.REPLY:
mystats.setrelayed(1, 'REPLY')
elif _m.type == msg_pb2.MessageType.SUBMISSION:
mystats.setrelayed(1, 'SUBMISSION')
for bmt in _m.submissionRequest:
mystats.setrelayed(1, bmt.baseObjectType)
print "[myrelay] total:%d got:%d bytes" % (relaycount, len(m))
#print "[myrelay] got msg on our xsub socket: " , m
xpub.send(m)
except Exception as e:
print "[myrelay] invalid message received: ", e
def usage():
print "cif-router [-r routerport] [-p pubport] [-m myid] [-a myapikey] [-dn dbname] [-dk dbkey] [-h]"
print " routerport = 5555, pubport = 5556, myid = cif-router"
print " dbkey = a8fd97c3-9f8b-477b-b45b-ba06719a0088"
print " dbname = cif-db"
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:r:m:h')
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
global mystats
global clients
global thread_tracker
context = zmq.Context()
clients = Clients()
mystats = RouterStats()
publishers = {}
routerport = 5555
publisherport = 5556
myid = "cif-router"
dbkey = 'a8fd97c3-9f8b-477b-b45b-ba06719a0088'
dbname = 'cif-db'
global apikey
apikey = 'a1fd11c1-1f1b-477b-b45b-ba06719a0088'
miniclient = None
miniclient_id = myid + "-miniclient"
register_wait_map = {}
unregister_wait_map = {}
for o, a in opts:
if o == "-r":
routerport = a
elif o == "-p":
publisherport = a
elif o == "-m":
myid = a
elif o == "-dk":
dbkey = a
elif o == "-dn":
dbname = a
elif o == "-a":
apikey = a
elif o == "-h":
usage()
sys.exit(2)
print "Create ROUTER socket on " + str(routerport)
global socket
socket = context.socket(zmq.ROUTER)
socket.bind("tcp://*:" + str(routerport))
socket.setsockopt(zmq.IDENTITY, myname)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
print "Create XSUB socket"
xsub = context.socket(zmq.SUB)
xsub.setsockopt(zmq.SUBSCRIBE, '')
print "Connect XSUB<->XPUB"
thread = threading.Thread(target=myrelay, args=(publisherport,))
thread.start()
while not thread.isAlive():
print "waiting for pubsub relay thread to become alive"
time.sleep(1)
thread_tracker = ThreadTracker(False)
thread_tracker.add(id=thread.ident, user='Router', host='localhost', state='Running', info="PUBSUB Relay")
print "Entering event loop"
try:
open_for_business = False
while True:
sockets_with_data_ready = dict(poller.poll(1000))
#print "[up " + str(int(mystats.getuptime())) + "s]: Wakeup: "
if miniclient != None:
if miniclient.pending() == True:
print "\tMiniclient has replies we need to handle."
handle_miniclient_reply(socket, routerport, publisherport)
if sockets_with_data_ready and sockets_with_data_ready.get(socket) == zmq.POLLIN:
print "[up " + str(int(mystats.getuptime())) + "s]: Got an inbound message"
rawmsg = socket.recv_multipart()
#print " Got ", rawmsg
msg = control_pb2.ControlType()
try:
msg.ParseFromString(rawmsg[2])
except Exception as e:
print "Received message isn't a protobuf: ", e
mystats.setbad()
else:
from_zmqid = rawmsg[0] # save the ZMQ identity of who sent us this message
#print "Got msg: "#, msg.seq
try:
cifsupport.versionCheck(msg)
except Exception as e:
print "\tReceived message has incompatible version: ", e
mystats.setbadversion(1, msg.version)
else:
if cifsupport.isControl(msg):
msgfrom = msg.src
msgto = msg.dst
msgcommand = msg.command
msgcommandtext = control_pb2._CONTROLTYPE_COMMANDTYPE.values_by_number[msg.command].name
msgid = msg.seq
if msgfrom != '' and msg.apikey != '':
if msgto == myname and msg.type == control_pb2.ControlType.REPLY:
print "\tREPLY for me: ", msgcommand
if msgcommand == control_pb2.ControlType.APIKEY_GET:
print "\tReceived a REPLY for an APIKEY_GET"
elif msgto == myname and msg.type == control_pb2.ControlType.COMMAND:
print "\tCOMMAND for me: ", msgcommandtext
mystats.setcontrols(1, msgcommandtext)
"""
For REGISTER:
We allow only the db to register with us while we are not
open_for_business. Once the DB registers, we are open_for_business
since we can then start validating apikeys. Until that time, we can
only validate the dbkey that is specified on the command line when
you launch this program.
"""
if msgcommand == control_pb2.ControlType.REGISTER:
print "\tREGISTER from: " + msgfrom
msg.status = control_pb2.ControlType.FAILED
msg.type = control_pb2.ControlType.REPLY
msg.seq = msgid
if msgfrom == miniclient_id and msg.apikey == apikey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
print "\tMiniClient has registered."
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif msgfrom == dbname and msg.apikey == dbkey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
open_for_business = True
print "\tDB has connected successfully. Sending reply to DB."
print "\tStarting embedded client"
miniclient = MiniClient(apikey, "127.0.0.1", "127.0.0.1:" + str(routerport), 5557, miniclient_id, thread_tracker, True)
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif open_for_business == True:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
register_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
else:
print "\tNot open_for_business yet. Go away."
elif msgcommand == control_pb2.ControlType.UNREGISTER:
"""
If the database unregisters, then we are not open_for_business any more.
"""
print "\tUNREGISTER from: " + msgfrom
if open_for_business == True:
if msgfrom == dbname and msg.apikey == dbkey:
print "\t\tDB unregistered. Closing for business."
open_for_business = False
clients.unregister(msgfrom)
msg.status = control_pb2.ControlType.SUCCESS
msg.seq = msgid
socket.send_multipart([ from_zmqid, '', msg.SerializeToString()])
else:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to the client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
unregister_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
elif msgcommand == control_pb2.ControlType.LISTCLIENTS:
print "\tLIST-CLIENTS for: " + msgfrom
if open_for_business == True:
rv = list_clients(msg.src, msg.apikey)
msg.seq = msgid
msg.status = msg.status | control_pb2.ControlType.FAILED
if rv != None:
msg.status = msg.status | control_pb2.ControlType.SUCCESS
msg.listClientsResponse.client.extend(rv.client)
msg.listClientsResponse.connectTimestamp.extend(rv.connectTimestamp)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.STATS:
print "\tSTATS for: " + msgfrom
if open_for_business == True:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
msg.statsResponse.statsType = control_pb2.StatsResponse.ROUTER
msg.statsResponse.stats = mystats.asjson()
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.THREADS_LIST:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
thread_tracker.asmessage(msg.listThreadsResponse)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
if msg.command == control_pb2.ControlType.PING:
c = Ping.makereply(msg)
socket.send_multipart( [ from_zmqid, '', c.SerializeToString() ] )
elif msgcommand == control_pb2.ControlType.IPUBLISH:
print "\tIPUBLISH from: " + msgfrom
if open_for_business == True:
rv = dosubscribe(from_zmqid, msg)
msg.status = rv
socket.send_multipart( [from_zmqid, '', msg.SerializeToString()] )
else:
print "\tCOMMAND for someone else: cmd=", msgcommandtext, "src=", msgfrom, " dst=", msgto
msgto_zmqid = clients.getzmqidentity(msgto)
if msgto_zmqid != None:
socket.send_multipart([msgto_zmqid, '', msg.SerializeToString()])
else:
print "\tUnknown message destination: ", msgto
else:
print "\tmsgfrom and/or msg.apikey is empty"
except KeyboardInterrupt:
print "Shut down."
if thread.isAlive():
try:
thread._Thread__stop()
except:
print(str(thread.getName()) + ' could not be terminated')
sys.exit(0)
| bsd-3-clause |
liyu1990/sklearn | sklearn/linear_model/stochastic_gradient.py | 31 | 50760 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
Timurdov/bionic | bionic/Lib/site-packages/pip-1.3.1-py2.7.egg/pip/backwardcompat/__init__.py | 63 | 3519 | """Stuff that differs in different Python versions"""
import os
import imp
import sys
import site
__all__ = ['WindowsError']
uses_pycache = hasattr(imp, 'cache_from_source')
class NeverUsedException(Exception):
"""this exception should never be raised"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = NeverUsedException
try:
#new in Python 3.3
PermissionError = PermissionError
except NameError:
PermissionError = NeverUsedException
console_encoding = sys.__stdout__.encoding
if sys.version_info >= (3,):
from io import StringIO, BytesIO
from functools import reduce
from urllib.error import URLError, HTTPError
from queue import Queue, Empty
from urllib.request import url2pathname
from urllib.request import urlretrieve
from email import message as emailmessage
import urllib.parse as urllib
import urllib.request as urllib2
import configparser as ConfigParser
import xmlrpc.client as xmlrpclib
import urllib.parse as urlparse
import http.client as httplib
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
def console_to_str(s):
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def fwrite(f, s):
f.buffer.write(b(s))
bytes = bytes
string_types = (str,)
raw_input = input
else:
from cStringIO import StringIO
from urllib2 import URLError, HTTPError
from Queue import Queue, Empty
from urllib import url2pathname, urlretrieve
from email import Message as emailmessage
import urllib
import urllib2
import urlparse
import ConfigParser
import xmlrpclib
import httplib
def b(s):
return s
def u(s):
return s
def console_to_str(s):
return s
def fwrite(f, s):
f.write(s)
bytes = str
string_types = (basestring,)
reduce = reduce
cmp = cmp
raw_input = raw_input
BytesIO = StringIO
from distutils.sysconfig import get_python_lib, get_python_version
#site.USER_SITE was created in py2.6
user_site = getattr(site, 'USER_SITE', None)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def home_lib(home):
"""Return the lib dir under the 'home' installation scheme"""
if hasattr(sys, 'pypy_version_info'):
lib = 'site-packages'
else:
lib = os.path.join('lib', 'python')
return os.path.join(home, lib)
## py25 has no builtin ssl module
## only >=py32 has ssl.match_hostname and ssl.CertificateError
try:
import ssl
try:
from ssl import match_hostname, CertificateError
except ImportError:
from pip.backwardcompat.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
ssl = None
# patch for py25 socket to work with http://pypi.python.org/pypi/ssl/
import socket
if not hasattr(socket, 'create_connection'): # for Python 2.5
# monkey-patch socket module
from pip.backwardcompat.socket_create_connection import create_connection
socket.create_connection = create_connection
| apache-2.0 |
waytai/odoo | addons/sale_journal/__openerp__.py | 262 | 2637 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Bismarrck/tensorflow | tensorflow/python/saved_model/saved_model_test.py | 13 | 64354 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
class SavedModelTestBase(test.TestCase):
def _get_export_dir(self, label):
return os.path.join(test.get_temp_dir(), label)
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.VariableV1(variable_value, name=variable_name)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(variable_value, self.evaluate(v))
def _build_asset_collection(self, asset_file_name, asset_file_contents,
asset_file_tensor_name, asset_subdir=""):
parent_dir = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_subdir))
file_io.recursive_create_dir(parent_dir)
asset_filepath = os.path.join(
compat.as_bytes(parent_dir), compat.as_bytes(asset_file_name))
file_io.write_string_to_file(asset_filepath, asset_file_contents)
asset_file_tensor = constant_op.constant(
asset_filepath, name=asset_file_tensor_name)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
return asset_collection
class SavedModelTest(SavedModelTestBase):
def _validate_assets(self,
export_dir,
asset_file_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name,
asset_file_def[asset_id].filename)
self.assertEqual(expected_asset_tensor_name,
asset_file_def[asset_id].tensor_info.name)
def _validate_inputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_inputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_sig_def_keys(self, builder, valid_tensor_info, invalid_key):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_key": valid_tensor_info}, "foo")
self.assertRaises(
KeyError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={invalid_key: foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError,
"SavedModel file does not exist at: %s" %
export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PB):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
@test_util.run_deprecated_v1
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
@test_util.run_deprecated_v1
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
@test_util.run_deprecated_v1
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
@test_util.run_v1_only("b/120545219")
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
@test_util.run_deprecated_v1
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with no variables.
with self.session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
# Restore the graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
@test_util.run_deprecated_v1
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# An attempt to create another builder with the same export directory should
# result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder._SavedModelBuilder,
export_dir)
@test_util.run_deprecated_v1
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
@test_util.run_v1_only("b/120545219")
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(42, name="v")
ops.add_to_collection("foo_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(42, self.evaluate(v))
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(43, name="v")
ops.add_to_collection("bar_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(43, self.evaluate(v))
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
@test_util.run_deprecated_v1
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(dict(),
dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(dict(),
dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(dict(),
dict(),
"foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidationFails(self):
export_dir = self._get_export_dir("test_signature_def_validation_fail")
builder = saved_model_builder._SavedModelBuilder(export_dir)
tensor_without_encoding = meta_graph_pb2.TensorInfo()
tensor_without_encoding.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info_fail(builder, tensor_without_encoding)
self._validate_outputs_tensor_info_fail(builder, tensor_without_encoding)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info_fail(builder, tensor_without_dtype)
self._validate_outputs_tensor_info_fail(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info_fail(builder, tensor_empty)
self._validate_outputs_tensor_info_fail(builder, tensor_empty)
valid_tensor_info = meta_graph_pb2.TensorInfo()
valid_tensor_info.name = "foo"
valid_tensor_info.dtype = types_pb2.DT_FLOAT
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.INIT_OP_SIGNATURE_KEY)
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.TRAIN_OP_SIGNATURE_KEY)
@test_util.run_deprecated_v1
def testSignatureDefValidationSucceedsWithName(self):
tensor_with_name = meta_graph_pb2.TensorInfo()
tensor_with_name.name = "foo"
tensor_with_name.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_name_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_name)
export_dir = self._get_export_dir("test_signature_def_validation_name_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_name)
@test_util.run_deprecated_v1
def testSignatureDefValidationSucceedsWithCoo(self):
tensor_with_coo = meta_graph_pb2.TensorInfo()
# TODO(soergel) test validation of each of the fields of coo_sparse
tensor_with_coo.coo_sparse.values_tensor_name = "foo"
tensor_with_coo.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_coo_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_coo)
export_dir = self._get_export_dir("test_signature_def_validation_coo_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_coo)
@test_util.run_deprecated_v1
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionDiffFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_diff_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar bak", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar bak", "asset_file_tensor:0")
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_1",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
@test_util.run_deprecated_v1
def testAssetsNameCollisionSameFilepath(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_path")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor_1")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionSameFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionManyFiles(self):
export_dir = self._get_export_dir("test_assets_name_collision_many_files")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
for i in range(5):
idx = str(i)
asset_list = self._build_asset_collection(
"hello42.txt",
"foo bar baz " + idx,
"asset_file_tensor_" + idx,
asset_subdir=idx)
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
for i in range(1, 5):
idx = str(i)
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_" + idx,
"foo bar baz " + idx,
"asset_file_tensor_{}:0".format(idx),
asset_id=i)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz 0", "asset_file_tensor_0:0")
@test_util.run_v1_only("b/120545219")
def testCustomInitOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3")
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1._ref(), v2._ref())
custom_init_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2))
self.evaluate(custom_init_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], init_op=custom_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
@test_util.run_v1_only("b/120545219")
def testTrainOp(self):
export_dir = self._get_export_dir("test_train_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(3, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Tensor)
@test_util.run_v1_only("b/120545219")
def testTrainOpGroup(self):
export_dir = self._get_export_dir("test_train_op_group")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
train_op = control_flow_ops.group()
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Operation)
@test_util.run_v1_only("b/120545219")
def testTrainOpAfterVariables(self):
export_dir = self._get_export_dir("test_train_op_after_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["pre_foo"])
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph(["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Tensor)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["pre_foo"], export_dir)
self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY))
@test_util.run_deprecated_v1
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_list = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_assets(export_dir, bar_graph.asset_file_def, "bar.txt",
"content_bar", "asset_file_tensor:0")
@test_util.run_deprecated_v1
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_assets(export_dir, bar_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
@test_util.run_v1_only("b/120545219")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.VariableV1(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variables.VariableV1(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variables.VariableV1(1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
init_op = control_flow_ops.group(assign_v3, name="init_op")
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
ops.add_to_collection("v", v3)
ops.add_to_collection("init_op", init_op)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
ops.get_collection("init_op")[0].run()
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
self.evaluate(variables.global_variables_initializer())
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
@test_util.run_deprecated_v1
def testCustomSaver(self):
export_dir = self._get_export_dir("test_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
custom_saver = training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"], saver=custom_saver)
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertFalse("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "my_saver/restore_all")
@test_util.run_deprecated_v1
def testNoCustomSaver(self):
export_dir = self._get_export_dir("test_no_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"])
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertTrue("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "save/restore_all")
@test_util.run_deprecated_v1
def testMultipleCustomSavers(self):
export_dir = self._get_export_dir("test_multiple_custom_savers")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["tag_0"])
saver_1 = training.Saver()
builder.add_meta_graph(["tag_1"], saver=saver_1)
saver_2 = training.Saver()
builder.add_meta_graph(["tag_2"], saver=saver_2)
# Save the SavedModel to disk.
builder.save()
def _validate_custom_saver(tag_name, saver_name):
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, [tag_name], export_dir)
self.assertEqual(
saved_graph.saver_def.restore_op_name,
saver_name)
_validate_custom_saver("tag_0", "save/restore_all")
_validate_custom_saver("tag_1", "save_1/restore_all")
_validate_custom_saver("tag_2", "save_2/restore_all")
@test_util.run_deprecated_v1
def testImportScope(self):
export_dir = self._get_export_dir("test_scoped_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Build a SavedModel with a variable, an asset, and a constant tensor.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
constant_op.constant("constant value", name="constant_tensor_name")
builder.add_meta_graph_and_variables(
sess, ["tag_name"], assets_list=asset_list)
# Save the asset file path for later comparison.
asset_file_path = asset_list[0].eval()
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
# Restore the SavedModel under an import_scope in a new graph/session.
graph_proto = loader.load(
sess, ["tag_name"], export_dir, import_scope="scope_name")
# The loaded variable tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
"scope_name/v:0",
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].name)
self.assertEqual(
42,
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# The loaded asset tensor should be scoped, but the asset file path and
# contents should be unchanged.
asset_list = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
self.assertEqual(1, len(asset_list))
self.assertEqual(asset_file_path, asset_list[0].eval())
self.assertEqual("scope_name/asset_file_tensor:0", asset_list[0].name)
# The static asset data inside graph_proto.collection_def should not be
# scoped.
self._validate_assets(export_dir, graph_proto.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# The constant tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
compat.as_bytes("constant value"),
ops.get_default_graph().get_tensor_by_name(
"scope_name/constant_tensor_name:0").eval())
@test_util.run_deprecated_v1
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Tests the behavior of loading SavedModels that having missing attrs or attrs
# with incorrect types.
def testInconsistentConsumerDefaultAttrs(self):
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Add a graph with a single variable and a test op with a defaultless
# float32 attr, "test_attr".
with session.Session(graph=ops.Graph()) as sess:
variables.VariableV1(1.0, dtype=dtypes.float64, name="var")
test_ops.test_attr(T=dtypes.float32, name="test_attr")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Rewrite the SavedModel to remove the T attr from "test_attr".
saved_model_file = os.path.join(
export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
with open(saved_model_file) as f:
original_saved_model = f.read()
no_attr_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", "")
with open(saved_model_file, "w") as f:
f.write(no_attr_saved_model)
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "TestAttr" node, and there is no
# default specified in the TestAttr OpDef.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"):
loader.load(sess, ["foo"], export_dir)
# Rewrite the SavedModel to change the type of the T attr in "test_attr"
bad_type_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", """
attr {
key: "T"
value {
type: DT_DOUBLE
}
}""")
with open(saved_model_file, "w") as f:
f.write(bad_type_saved_model)
# Loading the SavedModel via the loader must fail because there is no
# OpKernel registered to handle T = double.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"No OpKernel was registered to support Op 'TestAttr' used by node "
"test_attr \\(defined at .*\\) with these attrs: \\[.*\\]\n"
"Registered devices:.*\n"
"Registered kernels:.*"
):
loader.load(sess, ["foo"], export_dir)
class SavedModelV1Test(SavedModelTestBase):
def _validate_asset_collection(self,
export_dir,
graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[asset_id].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
@test_util.run_deprecated_v1
def testWritingAssetsToCollection(self):
export_dir = self._get_export_dir("test_writing_assets_to_collection")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset list.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir,
constants.LEGACY_INIT_OP_KEY)
@test_util.run_deprecated_v1
def testMainOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir("test_main_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir, constants.MAIN_OP_KEY)
def _testInitOpsWithNonEmptyCollection(self, export_dir, key):
builder = saved_model_builder.SavedModelBuilder(export_dir)
g = ops.Graph()
with self.session(graph=g) as sess:
# Initialize variable `v1` to 1.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variables.VariableV1(42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the init op.
assign_v2 = state_ops.assign(v2, v1)
init_op = control_flow_ops.group(assign_v2, name="init_op")
self.evaluate(variables.global_variables_initializer())
ops.add_to_collection(key, control_flow_ops.no_op())
# ValueError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=init_op)
# We shouldn't be able to add as MAIN_OP, either.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op)
def testStripDefaultAttrs(self):
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
saved_model_pb = loader_impl.parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
@test_util.run_v1_only("b/120545219")
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3", trainable=False, collections=[])
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the legacy_init_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
EmilianStankov/Viridis-Media-Player | source/playlist_tests.py | 1 | 1257 | import unittest
from playlist import Playlist, load_playlist_from_db
class TestPlaylist(unittest.TestCase):
"""Playlist tests"""
def setUp(self):
self.pl = Playlist("playlist", ["song_one", "song_two"])
self.pl.save_to_db()
def tearDown(self):
self.pl.delete_from_db()
def test_get_playlist_name(self):
self.assertEqual(self.pl.get_name(), "playlist")
def test_get_playlist_files(self):
self.assertEqual(self.pl.get_files(), ["song_one", "song_two"])
def test_add_new_file_to_playlist(self):
self.pl.add_file("song_three")
self.assertEqual(self.pl.get_files(),
["song_one", "song_two", "song_three"])
def test_remove_file_from_playlist(self):
self.pl.remove_file("song_one")
self.assertEqual(self.pl.get_files(), ["song_two"])
def test_remove_file_that_is_not_in_playlist(self):
self.assertRaises(ValueError, self.pl.remove_file("song_three"))
def test_load_playlist_from_database(self):
pl2 = load_playlist_from_db("playlist")
self.assertEqual(pl2.get_name(), "playlist")
self.assertEqual(pl2.get_files(), ["song_one", "song_two"])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
fdouetteau/PyBabe | pybabe/pivot.py | 1 | 2935 |
try:
from collections import OrderedDict
except:
## 2.6 Fallback
from ordereddict import OrderedDict
from base import StreamHeader, StreamFooter, BabeBase
class OrderedDefaultdict(OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self): # optional, for pickle support
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
class OrderedSet(set):
def __init__(self):
self.list = []
def add(self, elt):
if elt in self:
return
else:
super(OrderedSet, self).add(elt)
self.list.append(elt)
def __iter__(self):
return self.list.__iter__()
def pivot(stream, pivot, group):
"Create a pivot around field, grouping on identical value for 'group'"
groups = OrderedDefaultdict(dict)
pivot_values = OrderedSet()
header = None
group_n = map(StreamHeader.keynormalize, group)
for row in stream:
if isinstance(row, StreamHeader):
header = row
elif isinstance(row, StreamFooter):
# HEADER IS : GROUP + (OTHER FIELDS * EACH VALUE
other_fields = [f for f in header.fields if not f in group and not f == pivot]
other_fields_k = map(StreamHeader.keynormalize, other_fields)
fields = group + [f + "-" + str(v)
for v in pivot_values.list for f in other_fields]
newheader = header.replace(fields=fields)
yield newheader
for _, row_dict in groups.iteritems():
## Create a line per group
mrow = row_dict.itervalues().next()
group_cols = [getattr(mrow, col) for col in group_n]
for v in pivot_values:
if v in row_dict:
mrow = row_dict[v]
group_cols.extend([getattr(mrow, col) for col in other_fields_k])
else:
group_cols.extend([None for col in other_fields])
yield group_cols
yield row
else:
kgroup = ""
for f in group_n:
kgroup = kgroup + str(getattr(row, f))
groups[kgroup][getattr(row, pivot)] = row
pivot_values.add(getattr(row, pivot))
BabeBase.register("pivot", pivot)
| bsd-3-clause |
rbn42/stiler | config.py | 1 | 1027 | WinBorder = 2
LeftPadding = 15
BottomPadding = 15
TopPadding = BottomPadding
RightPadding = BottomPadding
NavigateAcrossWorkspaces = True # availabe in Unity7
TempFile = "/dev/shm/.stiler_db"
LockFile = "/dev/shm/.stiler.lock"
# This is the congiguration that works for unity7. If you are using a
# different Desktop Environment, close all windows and execute "wmctrl
# -lG" to find out all the applications need to exclude.
EXCLUDE_APPLICATIONS = ['<unknown>', 'x-nautilus-desktop', 'unity-launcher',
'unity-panel', 'Hud', 'unity-dash', 'Desktop',
'Docky',
'screenkey', 'XdndCollectionWindowImp']
# An alternative method to exclude applications.
EXCLUDE_WM_CLASS = ['wesnoth-1.12']
UNRESIZABLE_APPLICATIONS = ['Screenkey']
RESIZE_STEP = 50
MOVE_STEP = 50
MIN_WINDOW_WIDTH = 50
MIN_WINDOW_HEIGHT = 50
#NOFRAME_WMCLASS = ['Wine']
# In i3-wm's window tree, only one child of a node is allowed to split.
#MAX_KD_TREE_BRANCH = 1
MAX_KD_TREE_BRANCH = 2
| mit |
MattFaus/CrowdTube-Connector | lib/gdata-2.0.18/tests/gdata_tests/apps/emailsettings/live_client_test.py | 23 | 12853 | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Claudio Cherubino <[email protected]>'
import unittest
import gdata.apps.emailsettings.client
import gdata.apps.emailsettings.data
import gdata.client
import gdata.data
import gdata.gauth
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
conf.options.register_option(conf.TARGET_USERNAME_OPTION)
class EmailSettingsClientTest(unittest.TestCase):
def setUp(self):
self.client = gdata.apps.emailsettings.client.EmailSettingsClient(
domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.apps.emailsettings.client.EmailSettingsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'EmailSettingsClientTest',
self.client.auth_service, True)
self.username = conf.options.get_value('appsusername').split('@')[0]
def tearDown(self):
conf.close_client(self.client)
def testClientConfiguration(self):
self.assertEqual('apps-apis.google.com', self.client.host)
self.assertEqual('2.0', self.client.api_version)
self.assertEqual('apps', self.client.auth_service)
if conf.options.get_value('runlive') == 'true':
self.assertEqual(self.client.domain, conf.options.get_value('appsdomain'))
else:
self.assertEqual(self.client.domain, 'example.com')
def testMakeEmailSettingsUri(self):
self.assertEqual('/a/feeds/emailsettings/2.0/%s/%s/%s' % (self.client.domain,
'abc', 'label'),
self.client.MakeEmailSettingsUri('abc', 'label'))
def testCreateDeleteLabel(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateLabel')
new_label = self.client.CreateLabel(
username=conf.options.get_value('targetusername'),
name='status updates')
self.assert_(isinstance(new_label,
gdata.apps.emailsettings.data.EmailSettingsLabel))
self.assertEqual(new_label.name, 'status updates')
self.client.DeleteLabel(
username=conf.options.get_value('targetusername'),
label='status updates')
def testCreateFilter(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateFilter')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
from_address='[email protected]',
has_the_word='project proposal', mark_as_read=True)
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.from_address, '[email protected]')
self.assertEqual(new_filter.has_the_word, 'project proposal')
self.assertEqual(new_filter.mark_as_read, 'True')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
to_address='[email protected]',
label="announcements")
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.to_address, '[email protected]')
self.assertEqual(new_filter.label, 'announcements')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
subject='urgent',
does_not_have_the_word='spam',
has_attachments=True,
archive=True)
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.subject, 'urgent')
self.assertEqual(new_filter.does_not_have_the_word, 'spam')
self.assertEqual(new_filter.has_attachments, 'True')
self.assertEqual(new_filter.archive, 'True')
def testCreateSendAs(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateSendAs')
new_sendas = self.client.CreateSendAs(
username=conf.options.get_value('targetusername'),
name='Sales', address=conf.options.get_value('appsusername'),
reply_to='[email protected]',
make_default=True)
self.assert_(isinstance(new_sendas,
gdata.apps.emailsettings.data.EmailSettingsSendAsAlias))
self.assertEqual(new_sendas.name, 'Sales')
self.assertEqual(new_sendas.address,
conf.options.get_value('appsusername'))
self.assertEqual(new_sendas.reply_to, '[email protected]')
self.assertEqual(new_sendas.make_default, 'True')
def testUpdateWebclip(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateWebclip')
new_webclip = self.client.UpdateWebclip(
username=conf.options.get_value('targetusername'),
enable=True)
self.assert_(isinstance(new_webclip,
gdata.apps.emailsettings.data.EmailSettingsWebClip))
self.assertEqual(new_webclip.enable, 'True')
new_webclip = self.client.UpdateWebclip(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_webclip,
gdata.apps.emailsettings.data.EmailSettingsWebClip))
self.assertEqual(new_webclip.enable, 'False')
def testUpdateForwarding(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateForwarding')
new_forwarding = self.client.UpdateForwarding(
username=conf.options.get_value('targetusername'),
enable=True,
forward_to=conf.options.get_value('appsusername'),
action='KEEP')
self.assert_(isinstance(new_forwarding,
gdata.apps.emailsettings.data.EmailSettingsForwarding))
self.assertEqual(new_forwarding.enable, 'True')
self.assertEqual(new_forwarding.forward_to,
conf.options.get_value('appsusername'))
self.assertEqual(new_forwarding.action, 'KEEP')
new_forwarding = self.client.UpdateForwarding(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_forwarding,
gdata.apps.emailsettings.data.EmailSettingsForwarding))
self.assertEqual(new_forwarding.enable, 'False')
def testUpdatePop(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdatePop')
new_pop = self.client.UpdatePop(
username=conf.options.get_value('targetusername'),
enable=True, enable_for='MAIL_FROM_NOW_ON', action='KEEP')
self.assert_(isinstance(new_pop,
gdata.apps.emailsettings.data.EmailSettingsPop))
self.assertEqual(new_pop.enable, 'True')
self.assertEqual(new_pop.enable_for, 'MAIL_FROM_NOW_ON')
self.assertEqual(new_pop.action, 'KEEP')
new_pop = self.client.UpdatePop(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_pop,
gdata.apps.emailsettings.data.EmailSettingsPop))
self.assertEqual(new_pop.enable, 'False')
def testUpdateImap(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateImap')
new_imap = self.client.UpdateImap(
username=conf.options.get_value('targetusername'),
enable=True)
self.assert_(isinstance(new_imap,
gdata.apps.emailsettings.data.EmailSettingsImap))
self.assertEqual(new_imap.enable, 'True')
new_imap = self.client.UpdateImap(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_imap,
gdata.apps.emailsettings.data.EmailSettingsImap))
self.assertEqual(new_imap.enable, 'False')
def testUpdateVacation(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateVacation')
new_vacation = self.client.UpdateVacation(
username=conf.options.get_value('targetusername'),
enable=True, subject='Out of office',
message='If urgent call me at 555-5555.',
start_date='2011-12-05', end_date='2011-12-06',
contacts_only=True, domain_only=False)
self.assert_(isinstance(new_vacation,
gdata.apps.emailsettings.data.EmailSettingsVacationResponder))
self.assertEqual(new_vacation.enable, 'True')
self.assertEqual(new_vacation.subject, 'Out of office')
self.assertEqual(new_vacation.message, 'If urgent call me at 555-5555.')
self.assertEqual(new_vacation.start_date, '2011-12-05')
self.assertEqual(new_vacation.end_date, '2011-12-06')
self.assertEqual(new_vacation.contacts_only, 'True')
self.assertEqual(new_vacation.domain_only, 'False')
new_vacation = self.client.UpdateVacation(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_vacation,
gdata.apps.emailsettings.data.EmailSettingsVacationResponder))
self.assertEqual(new_vacation.enable, 'False')
def testUpdateSignature(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateSignature')
new_signature = self.client.UpdateSignature(
username=conf.options.get_value('targetusername'),
signature='Regards, Joe')
self.assert_(isinstance(new_signature,
gdata.apps.emailsettings.data.EmailSettingsSignature))
self.assertEqual(new_signature.signature_value, 'Regards, Joe')
new_signature = self.client.UpdateSignature(
username=conf.options.get_value('targetusername'),
signature='')
self.assert_(isinstance(new_signature,
gdata.apps.emailsettings.data.EmailSettingsSignature))
self.assertEqual(new_signature.signature_value, '')
def testUpdateLanguage(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateLanguage')
new_language = self.client.UpdateLanguage(
username=conf.options.get_value('targetusername'),
language='es')
self.assert_(isinstance(new_language,
gdata.apps.emailsettings.data.EmailSettingsLanguage))
self.assertEqual(new_language.language_tag, 'es')
def testUpdateGeneral(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateGeneral')
new_general = self.client.UpdateGeneralSettings(
username=conf.options.get_value('targetusername'),
page_size=25, arrows=True)
self.assert_(isinstance(new_general,
gdata.apps.emailsettings.data.EmailSettingsGeneral))
self.assertEqual(new_general.page_size, '25')
self.assertEqual(new_general.arrows, 'True')
new_general = self.client.UpdateGeneralSettings(
username=conf.options.get_value('targetusername'),
shortcuts=False, snippets=True, use_unicode=False)
self.assert_(isinstance(new_general,
gdata.apps.emailsettings.data.EmailSettingsGeneral))
self.assertEqual(new_general.shortcuts, 'False')
self.assertEqual(new_general.snippets, 'True')
self.assertEqual(new_general.use_unicode, 'False')
def suite():
return conf.build_suite([EmailSettingsClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| mit |
doduytrung/odoo-8.0 | addons/purchase/stock.py | 6 | 15030 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_move, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel']:
for move in self.browse(cr, uid, ids, context=context):
if move.purchase_line_id and move.purchase_line_id.order_id:
order_id = move.purchase_line_id.order_id.id
# update linked purchase order as superuser as the warehouse
# user may not have rights to access purchase.order
if self.pool.get('purchase.order').test_moves_done(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_done', cr)
if self.pool.get('purchase.order').test_moves_except(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_cancel', cr)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
context = context or {}
if not default.get('split_from'):
#we don't want to propagate the link to the purchase order line except in case of move split
default['purchase_line_id'] = False
return super(stock_move, self).copy(cr, uid, id, default, context)
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
if move.purchase_line_id:
invoice_line_vals['purchase_line_id'] = move.purchase_line_id.id
invoice_line_vals['account_analytic_id'] = move.purchase_line_id.account_analytic_id.id or False
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if move.purchase_line_id:
purchase_line = move.purchase_line_id
self.pool.get('purchase.order.line').write(cr, uid, [purchase_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('purchase.order').write(cr, uid, [purchase_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
purchase_line_obj = self.pool.get('purchase.order.line')
purchase_obj = self.pool.get('purchase.order')
invoice_line_obj = self.pool.get('account.invoice.line')
purchase_id = move.purchase_line_id.order_id.id
purchase_line_ids = purchase_line_obj.search(cr, uid, [('order_id', '=', purchase_id), ('invoice_lines', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if purchase_line_ids:
inv_lines = []
for po_line in purchase_line_obj.browse(cr, uid, purchase_line_ids, context=context):
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
invoice_line_obj.write(cr, uid, inv_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if move.purchase_line_id:
purchase_order = move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
elif move.picking_id:
# In case of an extra move, it is better to use the data from the original moves
for purchase_move in move.picking_id.move_lines:
if purchase_move.purchase_line_id:
purchase_order = purchase_move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
partner = move.picking_id and move.picking_id.partner_id or False
code = self.get_code_from_locs(cr, uid, move, context=context)
if partner and partner.property_product_pricelist_purchase and code == 'incoming':
currency = partner.property_product_pricelist_purchase.currency_id.id
return partner, uid, currency
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if move.purchase_line_id:
purchase_line = move.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
return res
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
code = self.get_code_from_locs(cr, uid, move, context=context)
if not move.purchase_line_id and code == 'incoming' and not move.price_unit:
partner = move.picking_id and move.picking_id.partner_id or False
price = False
# If partner given, search price in its purchase pricelist
if partner and partner.property_product_pricelist_purchase:
pricelist_obj = self.pool.get("product.pricelist")
pricelist = partner.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist],
move.product_id.id, move.product_uom_qty, partner, {
'uom': move.product_uom.id,
'date': move.date,
})[pricelist]
if price:
return self.write(cr, uid, [move.id], {'price_unit': price}, context=context)
super(stock_move, self).attribute_price(cr, uid, move, context=context)
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _get_to_invoice(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
for move in picking.move_lines:
if move.purchase_line_id and move.purchase_line_id.order_id.invoice_method == 'picking':
if not move.move_orig_ids:
res[picking.id] = True
return res
def _get_picking_to_recompute(self, cr, uid, ids, context=None):
picking_ids = set()
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
if move.picking_id and move.purchase_line_id:
picking_ids.add(move.picking_id.id)
return list(picking_ids)
_columns = {
'reception_to_invoice': fields.function(_get_to_invoice, type='boolean', string='Invoiceable on incoming shipment?',
help='Does the picking contains some moves related to a purchase order invoiceable on the receipt?',
store={
'stock.move': (_get_picking_to_recompute, ['purchase_line_id', 'picking_id'], 10),
}),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
purchase_obj = self.pool.get("purchase.order")
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
if move.purchase_line_id and move.purchase_line_id.order_id:
purchase = move.purchase_line_id.order_id
inv_vals.update({
'fiscal_position': purchase.fiscal_position.id,
'payment_term': purchase.payment_term_id.id,
})
return inv_vals
class stock_warehouse(osv.osv):
_inherit = 'stock.warehouse'
_columns = {
'buy_to_resupply': fields.boolean('Purchase to resupply this warehouse',
help="When products are bought, they can be delivered to this warehouse"),
'buy_pull_id': fields.many2one('procurement.rule', 'BUY rule'),
}
_defaults = {
'buy_to_resupply': True,
}
def _get_buy_pull_rule(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
buy_route_id = data_obj.get_object_reference(cr, uid, 'purchase', 'route_warehouse0_buy')[1]
except:
buy_route_id = route_obj.search(cr, uid, [('name', 'like', _('Buy'))], context=context)
buy_route_id = buy_route_id and buy_route_id[0] or False
if not buy_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Buy route.'))
return {
'name': self._format_routename(cr, uid, warehouse, _(' Buy'), context=context),
'location_id': warehouse.in_type_id.default_location_dest_id.id,
'route_id': buy_route_id,
'action': 'buy',
'picking_type_id': warehouse.in_type_id.id,
'warehouse_id': warehouse.id,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
pull_obj = self.pool.get('procurement.rule')
res = super(stock_warehouse, self).create_routes(cr, uid, ids, warehouse, context=context)
if warehouse.buy_to_resupply:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
res['buy_pull_id'] = buy_pull_id
return res
def write(self, cr, uid, ids, vals, context=None):
pull_obj = self.pool.get('procurement.rule')
if isinstance(ids, (int, long)):
ids = [ids]
if 'buy_to_resupply' in vals:
if vals.get("buy_to_resupply"):
for warehouse in self.browse(cr, uid, ids, context=context):
if not warehouse.buy_pull_id:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
vals['buy_pull_id'] = buy_pull_id
else:
for warehouse in self.browse(cr, uid, ids, context=context):
if warehouse.buy_pull_id:
buy_pull_id = pull_obj.unlink(cr, uid, warehouse.buy_pull_id.id, context=context)
return super(stock_warehouse, self).write(cr, uid, ids, vals, context=None)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
all_routes = super(stock_warehouse, self).get_all_routes_for_wh(cr, uid, warehouse, context=context)
if warehouse.buy_to_resupply and warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
all_routes += [warehouse.buy_pull_id.route_id.id]
return all_routes
def _get_all_products_to_resupply(self, cr, uid, warehouse, context=None):
res = super(stock_warehouse, self)._get_all_products_to_resupply(cr, uid, warehouse, context=context)
if warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
for product_id in res:
for route in self.pool.get('product.product').browse(cr, uid, product_id, context=context).route_ids:
if route.id == warehouse.buy_pull_id.route_id.id:
res.remove(product_id)
break
return res
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
res = super(stock_warehouse, self)._handle_renaming(cr, uid, warehouse, name, code, context=context)
pull_obj = self.pool.get('procurement.rule')
#change the buy pull rule name
if warehouse.buy_pull_id:
pull_obj.write(cr, uid, warehouse.buy_pull_id.id, {'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
return res
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
res = super(stock_warehouse, self).change_route(cr, uid, ids, warehouse, new_reception_step=new_reception_step, new_delivery_step=new_delivery_step, context=context)
if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id:
self.pool.get('procurement.rule').write(cr, uid, warehouse.buy_pull_id.id, {'location_id': warehouse.in_type_id.default_location_dest_id.id}, context=context)
return res
| agpl-3.0 |
janssen/kivy | kivy/garden/__init__.py | 59 | 3061 | '''
Garden
======
.. versionadded:: 1.7.0
.. versionchanged:: 1.8.0
Garden is a project to centralize addons for Kivy maintained by users. You can
find more information at `Kivy Garden <http://kivy-garden.github.io/>`_. All
the garden packages are centralized on the `kivy-garden Github
<https://github.com/kivy-garden>`_ repository.
Garden is now distributed as a separate Python module, kivy-garden. You can
install it with pip::
pip install kivy-garden
The garden module does not initially include any packages. You can download
them with the garden tool installed by the pip package::
# Installing a garden package
garden install graph
# Upgrade a garden package
garden install --upgrade graph
# Uninstall a garden package
garden uninstall graph
# List all the garden packages installed
garden list
# Search new packages
garden search
# Search all the packages that contain "graph"
garden search graph
# Show the help
garden --help
All the garden packages are installed by default in `~/.kivy/garden`.
.. Note:: In previous versions of Kivy, garden was a tool at
kivy/tools/garden. This no longer exists, but the
kivy-garden module provides exactly the same functionality.
Packaging
---------
If you want to include garden packages in your application, you can add `--app`
to the `install` command. This will create a `libs/garden` directory in your
current directory which will be used by `kivy.garden`.
For example::
cd myapp
garden install --app graph
'''
__path__ = 'kivy.garden'
import sys
import imp
from os.path import dirname, join, realpath, exists, abspath
from kivy import kivy_home_dir
import kivy
#: system path where garden modules can be installed
garden_system_dir = join(kivy_home_dir, 'garden')
garden_kivy_dir = abspath(join(dirname(kivy.__file__), 'garden'))
#: application path where garden modules can be installed
if getattr(sys, 'frozen', False) and getattr(sys, '_MEIPASS', False):
garden_app_dir = join(realpath(sys._MEIPASS), 'libs', 'garden')
else:
garden_app_dir = join(realpath(dirname(sys.argv[0])), 'libs', 'garden')
class GardenImporter(object):
def find_module(self, fullname, path):
if path == 'kivy.garden':
return self
def load_module(self, fullname):
assert(fullname.startswith('kivy.garden'))
moddir = join(garden_kivy_dir, fullname.split('.', 2)[-1])
if exists(moddir):
return self._load_module(fullname, moddir)
modname = fullname.split('.', 1)[-1]
for directory in (garden_app_dir, garden_system_dir):
moddir = join(directory, modname)
if exists(moddir):
return self._load_module(fullname, moddir)
def _load_module(self, fullname, moddir):
mod = imp.load_module(fullname, None, moddir,
('', '', imp.PKG_DIRECTORY))
return mod
# insert the garden importer as ultimate importer
sys.meta_path.append(GardenImporter())
| mit |
synconics/odoo | addons/account_budget/wizard/__init__.py | 444 | 1196 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_budget_crossovered_report
import account_budget_analytic
import account_budget_crossovered_summary_report
import account_budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NikiStanchev/SoftUni | AngularFundamentals/Angular2/Redux/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 1361 | 45045 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
# Let msbuild-only properties get translated as-is from msvs_settings.
tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
tool_settings[name] = value
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_masm = _Tool('MASM', 'MASM')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
_AddTool(_masm)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions', # /arch:IA32 (vs2012+)
# This one only exists in the new msbuild format.
'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
]))
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true'])) # /clr
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
# Directives for MASM.
# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
# MSBuild MASM settings.
# Options that have the same name in MSVS and MSBuild.
_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
| mit |
ltilve/ChromiumGStreamerBackend | chromecast/tools/build/generate_test_lists.py | 32 | 4698 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to generate unit test lists for the Chromecast build scripts.
"""
import glob
import optparse
import sys
def CombineList(test_files_dir, list_output_file, include_filters,
additional_runtime_options):
"""Writes a unit test file in a format compatible for Chromecast scripts.
If include_filters is True, uses filters to create a test runner list
and also include additional options, if any.
Otherwise, creates a list only of the tests to build.
Args:
test_files_dir: Path to the intermediate directory containing tests/filters.
list_output_file: Path to write the unit test file out to.
include_filters: Whether or not to include the filters when generating
the test list.
"""
# GYP targets may provide a numbered priority for the filename. Sort to
# use that priority.
test_files = sorted(glob.glob(test_files_dir + "/*.tests"))
filter_files = sorted(glob.glob(test_files_dir + "/*.filters"))
test_bin_set = set()
for test_filename in test_files:
with open(test_filename, "r") as test_file:
for test_file_line in test_file:
# Binary name may be a simple test target (cast_net_unittests) or be a
# qualified gyp path (../base.gyp:base_unittests).
test_binary_name = test_file_line.split(":")[-1].strip()
test_bin_set.add(test_binary_name)
test_filters = {}
if include_filters:
for filter_filename in filter_files:
with open(filter_filename, "r") as filter_file:
for filter_line in filter_file:
filter = filter_line.strip()
test_binary_name = filter.split(" ", 1)[0]
if test_binary_name not in test_bin_set:
raise Exception("Filter found for unknown target: " +
test_binary_name)
# Note: This may overwrite a previous rule. This is okay, since higher
# priority files are evaluated after lower priority files.
test_filters[test_binary_name] = filter
test_binaries = (
list(test_bin_set - set(test_filters.keys())) +
test_filters.values())
if additional_runtime_options:
lines = [
binary + " " + additional_runtime_options
for binary in test_binaries
]
else:
lines = test_binaries
with open(list_output_file, "w") as f:
f.write("\n".join(sorted(lines)))
def CreateList(inputs, list_output_file):
with open(list_output_file, "w") as f:
f.write("\n".join(inputs))
def DoMain(argv):
"""Main method. Runs helper commands for generating unit test lists."""
parser = optparse.OptionParser(
"""usage: %prog [<options>] <command> [<test names>]
Valid commands:
create_list prints all given test names/args to a file, one line
per string
pack_build packs all test files from the given output directory
into a single test list file
pack_run packs all test and filter files from the given
output directory into a single test list file
""")
parser.add_option("-o", action="store", dest="list_output_file",
help="Output path in which to write the test list.")
parser.add_option("-t", action="store", dest="test_files_dir",
help="Intermediate test list directory.")
parser.add_option("-a", action="store", dest="additional_runtime_options",
help="Additional options applied to all tests.")
options, inputs = parser.parse_args(argv)
list_output_file = options.list_output_file
test_files_dir = options.test_files_dir
additional_runtime_options = options.additional_runtime_options
if len(inputs) < 1:
parser.error("No command given.\n")
command = inputs[0]
test_names = inputs[1:]
if not list_output_file:
parser.error("Output path (-o) is required.\n")
if command == "create_list":
return CreateList(test_names, list_output_file)
if command == "pack_build":
if not test_files_dir:
parser.error("pack_build require a test files directory (-t).\n")
return CombineList(test_files_dir, list_output_file, False, None)
if command == "pack_run":
if not test_files_dir:
parser.error("pack_run require a test files directory (-t).\n")
return CombineList(test_files_dir, list_output_file, True,
additional_runtime_options)
parser.error("Invalid command specified.")
if __name__ == "__main__":
DoMain(sys.argv[1:])
| bsd-3-clause |
ojii/sandlib | lib/lib_pypy/_ctypes/primitive.py | 1 | 11496 | import _ffi
import _rawffi
import weakref
import sys
SIMPLE_TYPE_CHARS = "cbBhHiIlLdfguzZqQPXOv?"
from _ctypes.basics import _CData, _CDataMeta, cdata_from_address,\
CArgObject
from _ctypes.builtin import ConvMode
from _ctypes.array import Array
from _ctypes.pointer import _Pointer, as_ffi_pointer
#from _ctypes.function import CFuncPtr # this import is moved at the bottom
# because else it's circular
class NULL(object):
pass
NULL = NULL()
TP_TO_DEFAULT = {
'c': 0,
'u': 0,
'b': 0,
'B': 0,
'h': 0,
'H': 0,
'i': 0,
'I': 0,
'l': 0,
'L': 0,
'q': 0,
'Q': 0,
'f': 0.0,
'd': 0.0,
'g': 0.0,
'P': None,
# not part of struct
'O': NULL,
'z': None,
'Z': None,
'?': False,
}
if sys.platform == 'win32':
TP_TO_DEFAULT['X'] = NULL
TP_TO_DEFAULT['v'] = 0
DEFAULT_VALUE = object()
class GlobalPyobjContainer(object):
def __init__(self):
self.objs = []
def add(self, obj):
num = len(self.objs)
self.objs.append(weakref.ref(obj))
return num
def get(self, num):
return self.objs[num]()
pyobj_container = GlobalPyobjContainer()
def generic_xxx_p_from_param(cls, value):
if value is None:
return cls(None)
if isinstance(value, basestring):
return cls(value)
if isinstance(value, _SimpleCData) and \
type(value)._type_ in 'zZP':
return value
return None # eventually raise
def from_param_char_p(cls, value):
"used by c_char_p and c_wchar_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, (Array, _Pointer)):
from ctypes import c_char, c_byte, c_wchar
if type(value)._type_ in [c_char, c_byte, c_wchar]:
return value
def from_param_void_p(cls, value):
"used by c_void_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, Array):
return value
if isinstance(value, (_Pointer, CFuncPtr)):
return cls.from_address(value._buffer.buffer)
if isinstance(value, (int, long)):
return cls(value)
FROM_PARAM_BY_TYPE = {
'z': from_param_char_p,
'Z': from_param_char_p,
'P': from_param_void_p,
}
class SimpleType(_CDataMeta):
def __new__(self, name, bases, dct):
try:
tp = dct['_type_']
except KeyError:
for base in bases:
if hasattr(base, '_type_'):
tp = base._type_
break
else:
raise AttributeError("cannot find _type_ attribute")
if (not isinstance(tp, str) or
not len(tp) == 1 or
tp not in SIMPLE_TYPE_CHARS):
raise ValueError('%s is not a type character' % (tp))
default = TP_TO_DEFAULT[tp]
ffiarray = _rawffi.Array(tp)
result = type.__new__(self, name, bases, dct)
result._ffiargshape = tp
result._ffishape = tp
result._fficompositesize = None
result._ffiarray = ffiarray
if tp == 'z':
# c_char_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.charp2string(addr)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode(ConvMode.encoding,
ConvMode.errors)
#self._objects = value
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.char)
elif tp == 'Z':
# c_wchar_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.wcharp2unicode(addr)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, str):
value = value.decode(ConvMode.encoding,
ConvMode.errors)
#self._objects = value
array = _rawffi.Array('u')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.unichar)
elif tp == 'P':
# c_void_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
return addr
def _setvalue(self, value):
if isinstance(value, str):
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == 'u':
def _setvalue(self, val):
if isinstance(val, str):
val = val.decode(ConvMode.encoding, ConvMode.errors)
# possible if we use 'ignore'
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'c':
def _setvalue(self, val):
if isinstance(val, unicode):
val = val.encode(ConvMode.encoding, ConvMode.errors)
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'O':
def _setvalue(self, val):
num = pyobj_container.add(val)
self._buffer[0] = num
def _getvalue(self):
return pyobj_container.get(self._buffer[0])
result.value = property(_getvalue, _setvalue)
elif tp == 'X':
from ctypes import WinDLL
# Use WinDLL("oleaut32") instead of windll.oleaut32
# because the latter is a shared (cached) object; and
# other code may set their own restypes. We need out own
# restype here.
oleaut32 = WinDLL("oleaut32")
SysAllocStringLen = oleaut32.SysAllocStringLen
SysStringLen = oleaut32.SysStringLen
SysFreeString = oleaut32.SysFreeString
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
size = SysStringLen(addr)
return _rawffi.wcharp2rawunicode(addr, size)
def _setvalue(self, value):
if isinstance(value, basestring):
if isinstance(value, str):
value = value.decode(ConvMode.encoding,
ConvMode.errors)
array = _rawffi.Array('u')(len(value)+1, value)
value = SysAllocStringLen(array.buffer, len(value))
elif value is None:
value = 0
if self._buffer[0]:
SysFreeString(self._buffer[0])
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == '?': # regular bool
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
self._buffer[0] = bool(value)
result.value = property(_getvalue, _setvalue)
elif tp == 'v': # VARIANT_BOOL type
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
if value:
self._buffer[0] = -1 # VARIANT_TRUE
else:
self._buffer[0] = 0 # VARIANT_FALSE
result.value = property(_getvalue, _setvalue)
# make pointer-types compatible with the _ffi fast path
if result._is_pointer_like():
def _as_ffi_pointer_(self, ffitype):
return as_ffi_pointer(self, ffitype)
result._as_ffi_pointer_ = _as_ffi_pointer_
return result
from_address = cdata_from_address
def from_param(self, value):
if isinstance(value, self):
return value
from_param_f = FROM_PARAM_BY_TYPE.get(self._type_)
if from_param_f:
res = from_param_f(self, value)
if res is not None:
return res
else:
try:
return self(value)
except (TypeError, ValueError):
pass
return super(SimpleType, self).from_param(value)
def _CData_output(self, resbuffer, base=None, index=-1):
output = super(SimpleType, self)._CData_output(resbuffer, base, index)
if self.__bases__[0] is _SimpleCData:
return output.value
return output
def _sizeofinstances(self):
return _rawffi.sizeof(self._type_)
def _alignmentofinstances(self):
return _rawffi.alignment(self._type_)
def _is_pointer_like(self):
return self._type_ in "sPzUZXO"
class _SimpleCData(_CData):
__metaclass__ = SimpleType
_type_ = 'i'
def __init__(self, value=DEFAULT_VALUE):
if not hasattr(self, '_buffer'):
self._buffer = self._ffiarray(1, autofree=True)
if value is not DEFAULT_VALUE:
self.value = value
def _ensure_objects(self):
if self._type_ not in 'zZP':
assert self._objects is None
return self._objects
def _getvalue(self):
return self._buffer[0]
def _setvalue(self, value):
self._buffer[0] = value
value = property(_getvalue, _setvalue)
del _getvalue, _setvalue
def __ctypes_from_outparam__(self):
meta = type(type(self))
if issubclass(meta, SimpleType) and meta != SimpleType:
return self
return self.value
def __repr__(self):
if type(self).__bases__[0] is _SimpleCData:
return "%s(%r)" % (type(self).__name__, self.value)
else:
return "<%s object at 0x%x>" % (type(self).__name__,
id(self))
def __nonzero__(self):
return self._buffer[0] not in (0, '\x00')
from _ctypes.function import CFuncPtr
| bsd-3-clause |
LoHChina/nova | nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py | 83 | 1101 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.virt.hyperv import rdpconsoleutils
class RDPConsoleUtilsTestCase(test.NoDBTestCase):
def setUp(self):
self._rdpconsoleutils = rdpconsoleutils.RDPConsoleUtils()
super(RDPConsoleUtilsTestCase, self).setUp()
def test_get_rdp_console_port(self):
listener_port = self._rdpconsoleutils.get_rdp_console_port()
self.assertEqual(self._rdpconsoleutils._DEFAULT_HYPERV_RDP_PORT,
listener_port)
| apache-2.0 |
skearnes/pylearn2 | pylearn2/sandbox/cuda_convnet/tests/test_response_norm.py | 4 | 1500 | import numpy
import theano
from nose.plugins.skip import SkipTest
from theano.tests.unittest_tools import verify_grad
try:
from pylearn2.sandbox.cuda_convnet.response_norm import (
CrossMapNorm,
CrossMapNormUndo
)
from theano.sandbox.cuda import CudaNdarrayType, CudaNdarray
from theano.sandbox.cuda import gpu_from_host
except ImportError:
raise SkipTest('cuda not available')
def test_cross_map_norm_simple():
op = CrossMapNorm(16, 15. / 16., 1., True)
x = CudaNdarray(numpy.ones((16, 2, 2, 2), dtype='float32'))
x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4))
f = theano.function([x_], op(x_)[0])
numpy.testing.assert_allclose(f(x), 0.0625)
def test_cross_map_norm_grad_simple():
rng = numpy.random.RandomState([2013, 02, 10])
op = CrossMapNorm(16, 15/16., 1, True)
make_graph = lambda inp: op(gpu_from_host(inp))[0]
verify = lambda array: verify_grad(make_graph, [array])
inputs = [numpy.ones((16, 1, 1, 1), dtype='float32'),
rng.normal(size=(32, 5, 5, 10)).astype('float32')]
for arr in inputs:
yield verify, arr
def test_optimization():
op = CrossMapNorm(16, 15./16., 1, True)
x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4))
f = theano.function([x_], theano.grad(op(x_)[0].sum(), x_))
nodes = [x for x in f.maker.fgraph.apply_nodes
if type(x.op) == CrossMapNormUndo]
assert len(nodes) == 1
assert nodes[0].op.inplace
| bsd-3-clause |
joe820730/xiaomi_aries_kernel | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
bnsgeyer/Copter3_4 | mk/VRBRAIN/Tools/genmsg/src/genmsg/gentools.py | 51 | 6819 | #! /usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for supporting message and service generation for all ROS
client libraries. This is mainly responsible for calculating the
md5sums and message definitions of classes.
"""
# NOTE: this should not contain any rospy-specific code. The rospy
# generator library is rospy.genpy.
import sys
import hashlib
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . import msgs
from .msgs import InvalidMsgSpec, MsgSpec, bare_msg_type, is_builtin
from .msg_loader import load_depends
from .srvs import SrvSpec
from . import names
from . import base
def compute_md5_text(msg_context, spec):
"""
Compute the text used for md5 calculation. MD5 spec states that we
removes comments and non-meaningful whitespace. We also strip
packages names from type names. For convenience sake, constants are
reordered ahead of other declarations, in the order that they were
originally defined.
:returns: text for ROS MD5-processing, ``str``
"""
package = spec.package
buff = StringIO()
for c in spec.constants:
buff.write("%s %s=%s\n"%(c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
msg_type = bare_msg_type(type_)
# md5 spec strips package names
if is_builtin(msg_type):
buff.write("%s %s\n"%(type_, name))
else:
# recursively generate md5 for subtype. have to build up
# dependency representation for subtype in order to
# generate md5
sub_pkg, _ = names.package_resource_name(msg_type)
sub_pkg = sub_pkg or package
sub_spec = msg_context.get_registered(msg_type)
sub_md5 = compute_md5(msg_context, sub_spec)
buff.write("%s %s\n"%(sub_md5, name))
return buff.getvalue().strip() # remove trailing new line
def _compute_hash(msg_context, spec, hash):
"""
subroutine of compute_md5()
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute hash for.
:param hash: hash instance
"""
# accumulate the hash
# - root file
if isinstance(spec, MsgSpec):
hash.update(compute_md5_text(msg_context, spec).encode())
elif isinstance(spec, SrvSpec):
hash.update(compute_md5_text(msg_context, spec.request).encode())
hash.update(compute_md5_text(msg_context, spec.response).encode())
else:
raise Exception("[%s] is not a message or service"%spec)
return hash.hexdigest()
def compute_md5(msg_context, spec):
"""
Compute md5 hash for message/service
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute md5 for.
:returns: md5 hash, ``str``
"""
return _compute_hash(msg_context, spec, hashlib.md5())
## alias
compute_md5_v2 = compute_md5
def _unique_deps(dep_list):
uniques = []
for d in dep_list:
if d not in uniques:
uniques.append(d)
return uniques
def compute_full_text(msg_context, spec):
"""
Compute full text of message/service, including text of embedded
types. The text of the main msg/srv is listed first. Embedded
msg/srv files are denoted first by an 80-character '=' separator,
followed by a type declaration line,'MSG: pkg/type', followed by
the text of the embedded type.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute full text for.
:returns: concatenated text for msg/srv file and embedded msg/srv types, ``str``
"""
buff = StringIO()
sep = '='*80+'\n'
# write the text of the top-level type
buff.write(spec.text)
buff.write('\n')
# append the text of the dependencies (embedded types). Can't use set() as we have to preserve order.
for d in _unique_deps(msg_context.get_all_depends(spec.full_name)):
buff.write(sep)
buff.write("MSG: %s\n"%d)
buff.write(msg_context.get_registered(d).text)
buff.write('\n')
# #1168: remove the trailing \n separator that is added by the concatenation logic
return buff.getvalue()[:-1]
def compute_full_type_name(package_name, file_name):
"""
Compute the full type name of message/service 'pkg/type'.
:param package_name: name of package file is in, ``str``
:file_name: name of the msg og srv file, ``str``
:returns: typename in format 'pkg/type'
:raises: :exc:`MsgGenerationException` if file_name ends with an unknown file extension
"""
# strip extension
for ext in (base.EXT_MSG, base.EXT_SRV):
if file_name.endswith(ext):
short_name = file_name[:-len(ext)]
break
else:
raise base.MsgGenerationException("Processing file: '%s' - unknown file extension"% (file_name))
return "%s/%s"%(package_name, short_name)
| gpl-3.0 |
M4sse/chromium.src | third_party/tlslite/tlslite/utils/openssl_aes.py | 202 | 1944 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto AES implementation."""
from .cryptomath import *
from .aes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_AES(key, mode, IV)
class OpenSSL_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
if len(self.key)==16:
cipherType = m2.aes_128_cbc()
if len(self.key)==24:
cipherType = m2.aes_192_cbc()
if len(self.key)==32:
cipherType = m2.aes_256_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(ciphertext)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will discard it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(plaintext)
| bsd-3-clause |
jss-emr/openerp-7-src | openerp/addons/hr_timesheet_invoice/wizard/hr_timesheet_analytic_profit.py | 52 | 3163 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_analytic_profit(osv.osv_memory):
_name = 'hr.timesheet.analytic.profit'
_description = 'Print Timesheet Profit'
_columns = {
'date_from': fields.date('From', required=True),
'date_to': fields.date('To', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'analytic_profit_journal_rel', 'analytic_id', 'journal_id', 'Journal', required=True),
'employee_ids': fields.many2many('res.users', 'analytic_profit_emp_rel', 'analytic_id', 'emp_id', 'User', required=True),
}
def _date_from(*a):
return datetime.date.today().replace(day=1).strftime('%Y-%m-%d')
def _date_to(*a):
return datetime.date.today().strftime('%Y-%m-%d')
_defaults = {
'date_from': _date_from,
'date_to': _date_to
}
def print_report(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.analytic.line')
data = {}
data['form'] = self.read(cr, uid , ids, [], context=context)[0]
ids_chk = line_obj.search(cr, uid, [
('date', '>=', data['form']['date_from']),
('date', '<=', data['form']['date_to']),
('journal_id', 'in', data['form']['journal_ids']),
('user_id', 'in', data['form']['employee_ids']),
], context=context)
if not ids_chk:
raise osv.except_osv(_('Insufficient Data!'), _('No record(s) found for this report.'))
data['form']['journal_ids'] = [(6, 0, data['form']['journal_ids'])] # Improve me => Change the rml/sxw so that it can support withou [0][2]
data['form']['employee_ids'] = [(6, 0, data['form']['employee_ids'])]
datas = {
'ids': [],
'model': 'account.analytic.line',
'form': data['form']
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.analytic.profit',
'datas': datas,
}
account_analytic_profit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kubeflow/kubeflow | py/kubeflow/kubeflow/ci/common_ui_tests.py | 1 | 4563 | """"Argo Workflow for running frontend unit tests"""
from kubeflow.kubeflow.ci import workflow_utils
from kubeflow.testing import argo_build_util
class Builder(workflow_utils.ArgoTestBuilder):
def __init__(self, name=None, namespace=None, bucket=None,
test_target_name=None, **kwargs):
super().__init__(name=name, namespace=namespace, bucket=bucket,
test_target_name=test_target_name, **kwargs)
def _create_install_modules_task(self, task_template):
install = argo_build_util.deep_copy(task_template)
install["name"] = "npm-modules-install"
install["container"]["image"] = "node:12.20.1-stretch-slim"
install["container"]["command"] = ["npm"]
install["container"]["args"] = ["ci"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
install["container"]["workingDir"] = ui_dir
return install
def _create_ui_tests_task(self, task_template):
ui_tests = argo_build_util.deep_copy(task_template)
img = "browserless/chrome:1.44-chrome-stable"
ui_tests["name"] = "common-ui-tests"
ui_tests["container"]["image"] = img
ui_tests["container"]["command"] = ["npm"]
ui_tests["container"]["args"] = ["run", "test-ci"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
ui_tests["container"]["workingDir"] = ui_dir
return ui_tests
def _create_ui_build_task(self, task_template):
ui_build = argo_build_util.deep_copy(task_template)
ui_build["name"] = "build-common-ui-library"
ui_build["container"]["image"] = "node:12.20.1-stretch-slim"
ui_build["container"]["command"] = ["npm"]
ui_build["container"]["args"] = ["run", "build"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
ui_build["container"]["workingDir"] = ui_dir
return ui_build
def _create_exit_handler(self, task_template):
ui_build = argo_build_util.deep_copy(task_template)
ui_build["name"] = "rm-node-modules"
ui_build["container"]["image"] = "node:12.20.1-stretch-slim"
ui_build["container"]["command"] = ["rm"]
ui_build["container"]["args"] = ["-r", "node_modules"]
ui_dir = ("%s/components/crud-web-apps/common/"
"frontend/kubeflow-common-lib/") % self.src_dir
ui_build["container"]["workingDir"] = ui_dir
return ui_build
def build(self):
"""Build the Argo workflow graph"""
workflow = self.build_init_workflow()
task_template = self.build_task_template()
# install npm modules
modules_install_task = self._create_install_modules_task(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME,
modules_install_task,
[self.mkdir_task_name])
# run common ui frontend tests
ui_tests_task = self._create_ui_tests_task(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME,
ui_tests_task,
[modules_install_task["name"]])
# build the node module from the lib source code
build_step = self._create_ui_build_task(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.E2E_DAG_NAME,
build_step,
[modules_install_task["name"]])
# EXIT-HANDLER: remove node_modules folder as exit handler
rm_node_modules = self._create_exit_handler(task_template)
argo_build_util.add_task_to_dag(workflow, workflow_utils.EXIT_DAG_NAME,
rm_node_modules, [])
# Set the labels on all templates
workflow = argo_build_util.set_task_template_labels(workflow)
return workflow
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""Create workflow returns an Argo workflow to test kfctl upgrades.
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
| apache-2.0 |
mongolab/mongoctl | mongoctl/tests/sharded_test.py | 1 | 2582 | # The MIT License
# Copyright (c) 2012 ObjectLabs Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
import time
from mongoctl.tests.test_base import MongoctlTestBase, append_user_arg
########################################################################################################################
# Servers
SHARD_TEST_SERVERS = [
"ConfigServer1",
"ConfigServer2",
"ConfigServer3",
"Mongos1",
"Mongos2",
"ShardServer1",
"ShardServer2",
"ShardServer3",
"ShardServer4",
"ShardServer5",
"ShardServer6",
"ShardArbiter"
]
########################################################################################################################
### Sharded Servers
class ShardedTest(MongoctlTestBase):
########################################################################################################################
def test_sharded(self):
# Start all sharded servers
for s_id in SHARD_TEST_SERVERS:
self.assert_start_server(s_id, start_options=["--rs-add"])
print "Sleeping for 10 seconds..."
# sleep for 10 of seconds
time.sleep(10)
conf_cmd = ["configure-shard-cluster", "ShardedCluster"]
append_user_arg(conf_cmd)
# Configure the sharded cluster
self.mongoctl_assert_cmd(conf_cmd)
###########################################################################
def get_my_test_servers(self):
return SHARD_TEST_SERVERS
# booty
if __name__ == '__main__':
unittest.main()
| mit |
jamasi/Xtal-xplore-R | gui/doublespinslider.py | 1 | 3682 | # -*- coding: utf-8 -*-
"""DoubleSpinSlider - a custom widget combining a slider with a spinbox
Copyright (C) 2014 Jan M. Simons <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division, print_function, absolute_import
from decimal import Decimal
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSlot
class DoubleSpinSlider(QtGui.QWidget):
"""This is a QWidget containing a QSlider and a QDoubleSpinBox"""
def __init__(self, parent=None, width=50, height=100, dpi=100):
#super(DoubleSpinSlider, self).__init__(parent)
QtGui.QWidget.__init__(self, parent)
self._vLayout = QtGui.QVBoxLayout()
self._label = QtGui.QLabel(parent)
self._label.setAlignment(QtCore.Qt.AlignCenter)
self._vLayout.addWidget(self._label)
self._dSBox = QtGui.QDoubleSpinBox(parent)
self._dSBox.setWrapping(True)
self._dSBox.setDecimals(4)
self._dSBox.setMaximum(1.00000000)
self._dSBox.setSingleStep(0.1000000000)
self._vLayout.addWidget(self._dSBox)
self._hLayout = QtGui.QHBoxLayout()
self._vSlider = QtGui.QSlider(parent)
self._vSlider.setMinimum(0)
self._vSlider.setMaximum(10000)
self._vSlider.setPageStep(1000)
self._vSlider.setOrientation(QtCore.Qt.Vertical)
self._vSlider.setTickPosition(QtGui.QSlider.TicksBothSides)
self._vSlider.setTickInterval(0)
self._hLayout.addWidget(self._vSlider)
self._vLayout.addLayout(self._hLayout)
self.setLayout(self._vLayout)
self.setParent(parent)
# map functions
self.setText = self._label.setText
self.text = self._label.text
self.setValue = self._dSBox.setValue
self.value = self._dSBox.value
self._vSlider.valueChanged.connect(self.ChangeSpinBox)
self._dSBox.valueChanged.connect(self.ChangeSlider)
def _multiplier(self):
return 10.000000 ** self._dSBox.decimals()
@pyqtSlot(int)
def ChangeSpinBox(self, slidervalue):
#print("sv: {}".format(slidervalue))
newvalue = round(slidervalue / (self._multiplier()),4)
#print("nv: {}".format(newvalue))
if newvalue != self._dSBox.value():
self._dSBox.setValue(newvalue)
@pyqtSlot('double')
def ChangeSlider(self, spinboxvalue):
newvalue = spinboxvalue * self._multiplier()
#print("sb: {sb} mult: {mult} prod: {prod}".format(
# sb=spinboxvalue,
# mult=int(10.00000000 ** self._dSBox.decimals()),
# prod=newvalue))
self._vSlider.setValue(newvalue)
@pyqtSlot('double')
def setMaximum(self, maximum):
self._dSBox.setMaximum(maximum)
self._vSlider.setMaximum(maximum * self._multiplier())
@pyqtSlot('double')
def setMinimum(self, minimum):
self._dSBox.setMinimum(minimum)
self._vSlider.setMinimum(minimum * self._multiplier())
| agpl-3.0 |
rbharath/deepchem | examples/qm9/qm9_tf_model.py | 2 | 1512 | """
Script that trains Tensorflow multitask models on QM9 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
from qm9_datasets import load_qm9
np.random.seed(123)
qm9_tasks, datasets, transformers = load_qm9()
train_dataset, valid_dataset, test_dataset = datasets
fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)]
regression_metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks=len(qm9_tasks),
n_features=[29, 29],
learning_rate=0.001,
momentum=.8,
batch_size=32,
weight_init_stddevs=[1 / np.sqrt(400), 1 / np.sqrt(100), 1 / np.sqrt(100)],
bias_init_consts=[0., 0., 0.],
layer_sizes=[400, 100, 100],
dropouts=[0.01, 0.01, 0.01],
fit_transformers=fit_transformers,
n_evals=10,
seed=123)
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
model.save()
train_scores = model.evaluate(train_dataset, regression_metric, transformers)
print("Train scores [kcal/mol]")
print(train_scores)
valid_scores = model.evaluate(valid_dataset, regression_metric, transformers)
print("Valid scores [kcal/mol]")
print(valid_scores)
test_scores = model.evaluate(test_dataset, regression_metric, transformers)
print("Test scores [kcal/mol]")
print(test_scores)
| mit |
TheTimmy/spack | lib/spack/spack/cmd/configure.py | 2 | 3509 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import llnl.util.tty as tty
import spack.cmd
import spack.cmd.install as inst
from spack import *
description = 'stage and configure a package but do not install'
section = "build"
level = "long"
build_system_to_phase = {
AutotoolsPackage: 'configure',
CMakePackage: 'cmake',
QMakePackage: 'qmake',
WafPackage: 'configure',
PerlPackage: 'configure',
IntelPackage: 'configure',
}
def setup_parser(subparser):
subparser.add_argument(
'package',
nargs=argparse.REMAINDER,
help="spec of the package to install"
)
subparser.add_argument(
'-v', '--verbose',
action='store_true',
help="print additional output during builds"
)
def _stop_at_phase_during_install(args, calling_fn, phase_mapping):
if not args.package:
tty.die("configure requires at least one package argument")
# TODO: to be refactored with code in install
specs = spack.cmd.parse_specs(args.package, concretize=True)
if len(specs) != 1:
tty.error('only one spec can be installed at a time.')
spec = specs.pop()
pkg = spec.package
try:
key = [cls for cls in phase_mapping if isinstance(pkg, cls)].pop()
phase = phase_mapping[key]
# Install package dependencies if needed
parser = argparse.ArgumentParser()
inst.setup_parser(parser)
tty.msg('Checking dependencies for {0}'.format(args.package))
cli_args = ['-v'] if args.verbose else []
install_args = parser.parse_args(cli_args + ['--only=dependencies'])
install_args.package = args.package
inst.install(parser, install_args)
# Install package and stop at the given phase
cli_args = ['-v'] if args.verbose else []
install_args = parser.parse_args(cli_args + ['--only=package'])
install_args.package = args.package
inst.install(parser, install_args, stop_at=phase)
except IndexError:
tty.error(
'Package {0} has no {1} phase, or its {1} phase is not separated from install'.format( # NOQA: ignore=E501
spec.name, calling_fn.__name__)
)
def configure(parser, args):
_stop_at_phase_during_install(args, configure, build_system_to_phase)
| lgpl-2.1 |
agileblaze/OpenStackTwoFactorAuthentication | horizon/openstack_dashboard/dashboards/project/instances/audit_tables.py | 59 | 2391 | # Copyright 2013 Metacloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils import filters
class AuditTable(tables.DataTable):
ACTION_DISPLAY_CHOICES = (
("create", pgettext_lazy("Action log of an instance", u"Create")),
("pause", pgettext_lazy("Action log of an instance", u"Pause")),
("unpause", pgettext_lazy("Action log of an instance", u"Unpause")),
("rebuild", pgettext_lazy("Action log of an instance", u"Rebuild")),
("resize", pgettext_lazy("Action log of an instance", u"Resize")),
("confirmresize", pgettext_lazy("Action log of an instance",
u"Confirm Resize")),
("suspend", pgettext_lazy("Action log of an instance", u"Suspend")),
("resume", pgettext_lazy("Action log of an instance", u"Resume")),
("reboot", pgettext_lazy("Action log of an instance", u"Reboot")),
("stop", pgettext_lazy("Action log of an instance", u"Stop")),
("start", pgettext_lazy("Action log of an instance", u"Start")),
)
request_id = tables.Column('request_id',
verbose_name=_('Request ID'))
action = tables.Column('action', verbose_name=_('Action'),
display_choices=ACTION_DISPLAY_CHOICES)
start_time = tables.Column('start_time', verbose_name=_('Start Time'),
filters=[filters.parse_isotime])
user_id = tables.Column('user_id', verbose_name=_('User ID'))
message = tables.Column('message', verbose_name=_('Message'))
class Meta(object):
name = 'audit'
verbose_name = _('Instance Action List')
def get_object_id(self, datum):
return datum.request_id
| apache-2.0 |
pepetreshere/odoo | addons/account/tests/test_reconciliation_matching_rules.py | 1 | 42618 | # -*- coding: utf-8 -*-
from freezegun import freeze_time
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests.common import Form
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TestReconciliationMatchingRules(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
#################
# Company setup #
#################
cls.currency_data_2 = cls.setup_multi_currency_data({
'name': 'Dark Chocolate Coin',
'symbol': '🍫',
'currency_unit_label': 'Dark Choco',
'currency_subunit_label': 'Dark Cacao Powder',
}, rate2016=10.0, rate2017=20.0)
cls.company = cls.company_data['company']
cls.account_pay = cls.company_data['default_account_payable']
cls.current_assets_account = cls.env['account.account'].search([
('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id),
('company_id', '=', cls.company.id)], limit=1)
cls.bank_journal = cls.env['account.journal'].search([('type', '=', 'bank'), ('company_id', '=', cls.company.id)], limit=1)
cls.cash_journal = cls.env['account.journal'].search([('type', '=', 'cash'), ('company_id', '=', cls.company.id)], limit=1)
cls.tax21 = cls.env['account.tax'].create({
'name': '21%',
'type_tax_use': 'purchase',
'amount': 21,
})
cls.tax12 = cls.env['account.tax'].create({
'name': '12%',
'type_tax_use': 'purchase',
'amount': 12,
})
cls.partner_1 = cls.env['res.partner'].create({'name': 'partner_1', 'company_id': cls.company.id})
cls.partner_2 = cls.env['res.partner'].create({'name': 'partner_2', 'company_id': cls.company.id})
cls.partner_3 = cls.env['res.partner'].create({'name': 'partner_3', 'company_id': cls.company.id})
###############
# Rules setup #
###############
cls.rule_1 = cls.env['account.reconcile.model'].create({
'name': 'Invoices Matching Rule',
'sequence': '1',
'rule_type': 'invoice_matching',
'auto_reconcile': False,
'match_nature': 'both',
'match_same_currency': True,
'match_total_amount': True,
'match_total_amount_param': 100,
'match_partner': True,
'match_partner_ids': [(6, 0, (cls.partner_1 + cls.partner_2 + cls.partner_3).ids)],
'company_id': cls.company.id,
'line_ids': [(0, 0, {'account_id': cls.current_assets_account.id})],
})
cls.rule_2 = cls.env['account.reconcile.model'].create({
'name': 'write-off model',
'rule_type': 'writeoff_suggestion',
'match_partner': True,
'match_partner_ids': [],
'line_ids': [(0, 0, {'account_id': cls.current_assets_account.id})],
})
##################
# Invoices setup #
##################
cls.invoice_line_1 = cls._create_invoice_line(100, cls.partner_1, 'out_invoice')
cls.invoice_line_2 = cls._create_invoice_line(200, cls.partner_1, 'out_invoice')
cls.invoice_line_3 = cls._create_invoice_line(300, cls.partner_1, 'in_refund', name="RBILL/2019/09/0013")
cls.invoice_line_4 = cls._create_invoice_line(1000, cls.partner_2, 'in_invoice')
cls.invoice_line_5 = cls._create_invoice_line(600, cls.partner_3, 'out_invoice')
cls.invoice_line_6 = cls._create_invoice_line(600, cls.partner_3, 'out_invoice', ref="RF12 3456")
cls.invoice_line_7 = cls._create_invoice_line(200, cls.partner_3, 'out_invoice', pay_reference="RF12 3456")
####################
# Statements setup #
####################
# TODO : account_number, partner_name, transaction_type, narration
invoice_number = cls.invoice_line_1.move_id.name
cls.bank_st, cls.bank_st_2, cls.cash_st = cls.env['account.bank.statement'].create([
{
'name': 'test bank journal',
'journal_id': cls.bank_journal.id,
'line_ids': [
(0, 0, {
'payment_ref': 'invoice %s-%s-%s' % tuple(invoice_number.split('/')[1:]),
'partner_id': cls.partner_1.id,
'amount': 100,
'sequence': 1,
}),
(0, 0, {
'payment_ref': 'xxxxx',
'partner_id': cls.partner_1.id,
'amount': 600,
'sequence': 2,
}),
],
}, {
'name': 'second test bank journal',
'journal_id': cls.bank_journal.id,
'line_ids': [
(0, 0, {
'payment_ref': 'nawak',
'narration': 'Communication: RF12 3456',
'partner_id': cls.partner_3.id,
'amount': 600,
'sequence': 1,
}),
(0, 0, {
'payment_ref': 'RF12 3456',
'partner_id': cls.partner_3.id,
'amount': 600,
'sequence': 2,
}),
(0, 0, {
'payment_ref': 'baaaaah',
'ref': 'RF12 3456',
'partner_id': cls.partner_3.id,
'amount': 600,
'sequence': 2,
}),
],
}, {
'name': 'test cash journal',
'journal_id': cls.cash_journal.id,
'line_ids': [
(0, 0, {
'payment_ref': 'yyyyy',
'partner_id': cls.partner_2.id,
'amount': -1000,
'sequence': 1,
}),
],
}
])
cls.bank_line_1, cls.bank_line_2 = cls.bank_st.line_ids
cls.bank_line_3, cls.bank_line_4, cls.bank_line_5 = cls.bank_st_2.line_ids
cls.cash_line_1 = cls.cash_st.line_ids
cls._post_statements(cls)
@classmethod
def _create_invoice_line(cls, amount, partner, type, currency=None, pay_reference=None, ref=None, name=None):
''' Create an invoice on the fly.'''
invoice_form = Form(cls.env['account.move'].with_context(default_move_type=type, default_invoice_date='2019-09-01', default_date='2019-09-01'))
invoice_form.partner_id = partner
if currency:
invoice_form.currency_id = currency
if pay_reference:
invoice_form.payment_reference = pay_reference
if ref:
invoice_form.ref = ref
if name:
invoice_form.name = name
with invoice_form.invoice_line_ids.new() as invoice_line_form:
invoice_line_form.name = 'xxxx'
invoice_line_form.quantity = 1
invoice_line_form.price_unit = amount
invoice_line_form.tax_ids.clear()
invoice = invoice_form.save()
invoice.action_post()
lines = invoice.line_ids
return lines.filtered(lambda l: l.account_id.user_type_id.type in ('receivable', 'payable'))
def _post_statements(self):
self.bank_st.balance_end_real = self.bank_st.balance_end
self.bank_st_2.balance_end_real = self.bank_st_2.balance_end
self.cash_st.balance_end_real = self.cash_st.balance_end
(self.bank_st + self.bank_st_2 + self.cash_st).button_post()
def _check_statement_matching(self, rules, expected_values, statements=None):
if statements is None:
statements = self.bank_st + self.cash_st
statement_lines = statements.mapped('line_ids').sorted()
matching_values = rules._apply_rules(statement_lines, None)
for st_line_id, values in matching_values.items():
values.pop('reconciled_lines', None)
values.pop('write_off_vals', None)
self.assertDictEqual(values, expected_values[st_line_id])
def test_matching_fields(self):
# Check without restriction.
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1,
'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
def test_matching_fields_match_text_location(self):
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = False
self.rule_1.match_text_location_note = False
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = False
self.rule_1.match_text_location_note = True
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = True
self.rule_1.match_text_location_note = False
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = True
self.rule_1.match_text_location_reference = True
self.rule_1.match_text_location_note = True
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_7.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
self.rule_1.match_text_location_label = False
self.rule_1.match_text_location_reference = False
self.rule_1.match_text_location_note = False
self._check_statement_matching(self.rule_1, {
self.bank_line_3.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_3.partner_id},
self.bank_line_4.id: {'aml_ids': [self.invoice_line_5.id], 'model': self.rule_1, 'partner': self.bank_line_4.partner_id},
self.bank_line_5.id: {'aml_ids': [self.invoice_line_6.id], 'model': self.rule_1, 'partner': self.bank_line_5.partner_id},
}, statements=self.bank_st_2)
def test_matching_fields_match_journal_ids(self):
self.rule_1.match_journal_ids |= self.cash_st.journal_id
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_journal_ids |= self.bank_st.journal_id + self.cash_st.journal_id
def test_matching_fields_match_nature(self):
self.rule_1.match_nature = 'amount_received'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': []},
})
self.rule_1.match_nature = 'amount_paid'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_nature = 'both'
def test_matching_fields_match_amount(self):
self.rule_1.match_amount = 'lower'
self.rule_1.match_amount_max = 150
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': []},
})
self.rule_1.match_amount = 'greater'
self.rule_1.match_amount_min = 200
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_1.id,
self.invoice_line_2.id,
self.invoice_line_3.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_amount = 'between'
self.rule_1.match_amount_min = 200
self.rule_1.match_amount_max = 800
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_1.id,
self.invoice_line_2.id,
self.invoice_line_3.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': []},
})
self.rule_1.match_amount = False
def test_matching_fields_match_label(self):
self.rule_1.match_label = 'contains'
self.rule_1.match_label_param = 'yyyyy'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_label = 'not_contains'
self.rule_1.match_label_param = 'xxxxx'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_label = 'match_regex'
self.rule_1.match_label_param = 'xxxxx|yyyyy'
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_1.id,
self.invoice_line_2.id,
self.invoice_line_3.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_label = False
def test_matching_fields_match_total_amount(self):
# Check match_total_amount: line amount >= total residual amount.
self.rule_1.match_total_amount_param = 90.0
self.bank_line_1.amount += 5
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_total_amount_param = 100.0
self.bank_line_1.amount -= 5
# Check match_total_amount: line amount <= total residual amount.
self.rule_1.match_total_amount_param = 90.0
self.bank_line_1.amount -= 5
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_total_amount_param = 100.0
self.bank_line_1.amount += 5
def test_matching_fields_match_partner_category_ids(self):
test_category = self.env['res.partner.category'].create({'name': 'Consulting Services'})
self.partner_2.category_id = test_category
self.rule_1.match_partner_category_ids |= test_category
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': []},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
self.rule_1.match_partner_category_ids = False
def test_mixin_rules(self):
''' Test usage of rules together.'''
# rule_1 is used before rule_2.
self.rule_1.sequence = 1
self.rule_2.sequence = 2
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [
self.invoice_line_2.id,
self.invoice_line_3.id,
self.invoice_line_1.id,
], 'model': self.rule_1, 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
# rule_2 is used before rule_1.
self.rule_1.sequence = 2
self.rule_2.sequence = 1
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.cash_line_1.partner_id},
})
# rule_2 is used before rule_1 but only on partner_1.
self.rule_2.match_partner_ids |= self.partner_1
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'write_off', 'partner': self.bank_line_2.partner_id},
self.cash_line_1.id: {'aml_ids': [self.invoice_line_4.id], 'model': self.rule_1, 'partner': self.cash_line_1.partner_id},
})
def test_auto_reconcile(self):
''' Test auto reconciliation.'''
self.bank_line_1.amount += 5
self.rule_1.sequence = 2
self.rule_1.auto_reconcile = True
self.rule_1.match_total_amount_param = 90
self.rule_2.sequence = 1
self.rule_2.match_partner_ids |= self.partner_2
self.rule_2.auto_reconcile = True
self._check_statement_matching(self.rule_1 + self.rule_2, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
self.cash_line_1.id: {'aml_ids': [], 'model': self.rule_2, 'status': 'reconciled', 'partner': self.cash_line_1.partner_id},
})
# Check first line has been well reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 105.0, 'credit': 0.0},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 5.0},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0},
])
# Check second line has been well reconciled.
self.assertRecordValues(self.cash_line_1.line_ids, [
{'partner_id': self.partner_2.id, 'debit': 0.0, 'credit': 1000.0},
{'partner_id': self.partner_2.id, 'debit': 1000.0, 'credit': 0.0},
])
def test_larger_invoice_auto_reconcile(self):
''' Test auto reconciliation with an invoice with larger amount than the
statement line's, for rules without write-offs.'''
self.bank_line_1.amount = 40
self.invoice_line_1.move_id.payment_reference = self.bank_line_1.payment_ref
self.rule_1.sequence = 2
self.rule_1.auto_reconcile = True
self.rule_1.line_ids = [(5, 0, 0)]
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
# Check first line has been well reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 40.0, 'credit': 0.0},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 40.0},
])
self.assertEqual(self.invoice_line_1.amount_residual, 60.0, "The invoice should have been partially reconciled")
def test_auto_reconcile_with_tax(self):
''' Test auto reconciliation with a tax amount included in the bank statement line'''
self.rule_1.write({
'auto_reconcile': True,
'rule_type': 'writeoff_suggestion',
'line_ids': [(1, self.rule_1.line_ids.id, {
'amount': 50,
'force_tax_included': True,
'tax_ids': [(6, 0, self.tax21.ids)],
}), (0, 0, {
'amount': 100,
'force_tax_included': False,
'tax_ids': [(6, 0, self.tax12.ids)],
'account_id': self.current_assets_account.id,
})]
})
self.bank_line_1.amount = -121
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_2.partner_id},
}, statements=self.bank_st)
# Check first line has been well reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 121.0, 'tax_ids': [], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 7.26, 'tax_ids': [], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 50.0, 'credit': 0.0, 'tax_ids': [self.tax21.id], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 10.5, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.tax21.id},
{'partner_id': self.partner_1.id, 'debit': 60.5, 'credit': 0.0, 'tax_ids': [self.tax12.id], 'tax_line_id': False},
{'partner_id': self.partner_1.id, 'debit': 7.26, 'credit': 0.0, 'tax_ids': [], 'tax_line_id': self.tax12.id},
])
def test_reverted_move_matching(self):
partner = self.partner_1
AccountMove = self.env['account.move']
move = AccountMove.create({
'journal_id': self.bank_journal.id,
'line_ids': [
(0, 0, {
'account_id': self.account_pay.id,
'partner_id': partner.id,
'name': 'One of these days',
'debit': 10,
}),
(0, 0, {
'account_id': self.bank_journal.payment_credit_account_id.id,
'partner_id': partner.id,
'name': 'I\'m gonna cut you into little pieces',
'credit': 10,
})
],
})
payment_bnk_line = move.line_ids.filtered(lambda l: l.account_id == self.bank_journal.payment_credit_account_id)
move.action_post()
move_reversed = move._reverse_moves()
self.assertTrue(move_reversed.exists())
self.bank_line_1.write({
'payment_ref': '8',
'partner_id': partner.id,
'amount': -10,
})
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [payment_bnk_line.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
def test_match_different_currencies(self):
partner = self.env['res.partner'].create({'name': 'Bernard Gagnant'})
self.rule_1.write({'match_partner_ids': [(6, 0, partner.ids)], 'match_same_currency': False})
currency_inv = self.env.ref('base.EUR')
currency_statement = self.env.ref('base.JPY')
currency_statement.active = True
invoice_line = self._create_invoice_line(100, partner, 'out_invoice', currency=currency_inv)
self.bank_line_1.write({'partner_id': partner.id, 'foreign_currency_id': currency_statement.id, 'amount_currency': 100, 'payment_ref': 'test'})
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': invoice_line.ids, 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
def test_invoice_matching_rule_no_partner(self):
""" Tests that a statement line without any partner can be matched to the
right invoice if they have the same payment reference.
"""
self.invoice_line_1.move_id.write({'payment_reference': 'Tournicoti66'})
self.bank_line_1.write({
'payment_ref': 'Tournicoti66',
'partner_id': None,
'amount': 95,
})
self.rule_1.write({
'line_ids': [(5, 0, 0)],
'match_partner': False,
'match_label': 'contains',
'match_label_param': 'Tournicoti', # So that we only match what we want to test
})
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
def test_inv_matching_rule_auto_rec_no_partner_with_writeoff(self):
self.invoice_line_1.move_id.write({'payment_reference': 'doudlidou355'})
self.bank_line_1.write({
'payment_ref': 'doudlidou355',
'partner_id': None,
'amount': 95,
})
self.rule_1.write({
'match_partner': False,
'match_label': 'contains',
'match_label_param': 'doudlidou', # So that we only match what we want to test
'match_total_amount_param': 90,
'auto_reconcile': True,
})
# Check bank reconciliation
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id, 'status': 'reconciled'},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
# Check invoice line has been fully reconciled, with a write-off.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 95.0, 'credit': 0.0, 'account_id': self.bank_journal.default_account_id.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 5.0, 'credit': 0.0, 'account_id': self.current_assets_account.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0, 'account_id': self.invoice_line_1.account_id.id, 'reconciled': True},
])
self.assertEqual(self.invoice_line_1.amount_residual, 0.0, "The invoice should have been fully reconciled")
def test_partner_mapping_rule(self):
self.bank_line_1.write({'partner_id': None, 'payment_ref': 'toto42', 'narration': None})
self.bank_line_2.write({'partner_id': None})
# Do the test for both rule 1 and 2, so that we check invoice matching and write-off rules
for rule in (self.rule_1 + self.rule_2):
# To cope for minor differences in rule results
matching_amls = rule.rule_type == 'invoice_matching' and self.invoice_line_1.ids or []
result_status = rule.rule_type == 'writeoff_suggestion' and {'status': 'write_off'} or {}
match_result = {**result_status, 'aml_ids': matching_amls, 'model': rule, 'partner': self.partner_1}
no_match_result = {'aml_ids': []}
# Without mapping, there should be no match
self._check_statement_matching(rule, {
self.bank_line_1.id: no_match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
# We add some mapping for payment reference to rule_1
rule.write({
'partner_mapping_line_ids': [(0, 0, {
'partner_id': self.partner_1.id,
'payment_ref_regex': 'toto.*',
})]
})
# bank_line_1 should now match
self._check_statement_matching(rule, {
self.bank_line_1.id: match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
# If we now add a narration regex to the same mapping line, nothing should match
rule.partner_mapping_line_ids.write({'narration_regex': ".*coincoin"})
self.bank_line_1.write({'narration': None}) # Reset from possible previous iteration
self._check_statement_matching(rule, {
self.bank_line_1.id: no_match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
# If we set the narration so that it matches the new mapping criterium, line_1 matches
self.bank_line_1.write({'narration': "42coincoin"})
self._check_statement_matching(rule, {
self.bank_line_1.id: match_result,
self.bank_line_2.id: no_match_result,
}, self.bank_st)
def test_partner_name_in_communication(self):
self.invoice_line_1.partner_id.write({'name': "Archibald Haddock"})
self.bank_line_1.write({'partner_id': None, 'payment_ref': '1234//HADDOCK-Archibald'})
self.bank_line_2.write({'partner_id': None})
self.rule_1.write({'match_partner': False})
# bank_line_1 should match, as its communication contains the invoice's partner name
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
def test_partner_name_with_regexp_chars(self):
self.invoice_line_1.partner_id.write({'name': "Archibald + Haddock"})
self.bank_line_1.write({'partner_id': None, 'payment_ref': '1234//HADDOCK+Archibald'})
self.bank_line_2.write({'partner_id': None})
self.rule_1.write({'match_partner': False})
# The query should still work
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, self.bank_st)
def test_match_multi_currencies(self):
''' Ensure the matching of candidates is made using the right statement line currency.
In this test, the value of the statement line is 100 USD = 300 GOL = 900 DAR and we want to match two journal
items of:
- 100 USD = 200 GOL (= 600 DAR from the statement line point of view)
- 14 USD = 280 DAR
Both journal items should be suggested to the user because they represents 98% of the statement line amount
(DAR).
'''
partner = self.env['res.partner'].create({'name': 'Bernard Perdant'})
journal = self.env['account.journal'].create({
'name': 'test_match_multi_currencies',
'code': 'xxxx',
'type': 'bank',
'currency_id': self.currency_data['currency'].id,
})
matching_rule = self.env['account.reconcile.model'].create({
'name': 'test_match_multi_currencies',
'rule_type': 'invoice_matching',
'match_partner': True,
'match_partner_ids': [(6, 0, partner.ids)],
'match_total_amount': True,
'match_total_amount_param': 95.0,
'match_same_currency': False,
'company_id': self.company_data['company'].id,
})
statement = self.env['account.bank.statement'].create({
'name': 'test_match_multi_currencies',
'journal_id': journal.id,
'line_ids': [
(0, 0, {
'journal_id': journal.id,
'date': '2016-01-01',
'payment_ref': 'line',
'partner_id': partner.id,
'foreign_currency_id': self.currency_data_2['currency'].id,
'amount': 300.0, # Rate is 3 GOL = 1 USD in 2016.
'amount_currency': 900.0, # Rate is 10 DAR = 1 USD in 2016 but the rate used by the bank is 9:1.
}),
],
})
statement_line = statement.line_ids
statement.button_post()
move = self.env['account.move'].create({
'move_type': 'entry',
'date': '2017-01-01',
'journal_id': self.company_data['default_journal_sale'].id,
'line_ids': [
# Rate is 2 GOL = 1 USD in 2017.
# The statement line will consider this line equivalent to 600 DAR.
(0, 0, {
'account_id': self.company_data['default_account_receivable'].id,
'partner_id': partner.id,
'currency_id': self.currency_data['currency'].id,
'debit': 100.0,
'credit': 0.0,
'amount_currency': 200.0,
}),
# Rate is 20 GOL = 1 USD in 2017.
(0, 0, {
'account_id': self.company_data['default_account_receivable'].id,
'partner_id': partner.id,
'currency_id': self.currency_data_2['currency'].id,
'debit': 14.0,
'credit': 0.0,
'amount_currency': 280.0,
}),
# Line to balance the journal entry:
(0, 0, {
'account_id': self.company_data['default_account_revenue'].id,
'debit': 0.0,
'credit': 114.0,
}),
],
})
move.action_post()
move_line_1 = move.line_ids.filtered(lambda line: line.debit == 100.0)
move_line_2 = move.line_ids.filtered(lambda line: line.debit == 14.0)
with freeze_time('2017-01-01'):
self._check_statement_matching(matching_rule, {
statement_line.id: {'aml_ids': (move_line_1 + move_line_2).ids, 'model': matching_rule, 'partner': statement_line.partner_id}
}, statements=statement)
def test_inv_matching_with_write_off(self):
self.rule_1.match_total_amount_param = 90
self.bank_st.line_ids[1].unlink() # We don't need this one here
statement_line = self.bank_st.line_ids[0]
statement_line.write({
'payment_ref': self.invoice_line_1.move_id.payment_reference,
'amount': 90,
})
# Test the invoice-matching part
self._check_statement_matching(self.rule_1, {
statement_line.id: {'aml_ids': self.invoice_line_1.ids, 'model': self.rule_1, 'partner': self.invoice_line_1.partner_id, 'status': 'write_off'},
}, self.bank_st)
# Test the write-off part
expected_write_off = {
'balance': 10,
'currency_id': False,
'reconcile_model_id': self.rule_1.id,
'account_id': self.current_assets_account.id,
}
matching_result = self.rule_1._apply_rules(statement_line)
self.assertEqual(len(matching_result[statement_line.id].get('write_off_vals', [])), 1, "Exactly one write-off line should be proposed.")
full_write_off_dict = matching_result[statement_line.id]['write_off_vals'][0]
to_compare = {
key: full_write_off_dict[key]
for key in expected_write_off.keys()
}
self.assertDictEqual(expected_write_off, to_compare)
def test_inv_matching_with_write_off_autoreconcile(self):
self.bank_line_1.amount = 95
self.rule_1.sequence = 2
self.rule_1.auto_reconcile = True
self.rule_1.match_total_amount_param = 90
self._check_statement_matching(self.rule_1, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'reconciled', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': []},
}, statements=self.bank_st)
# Check first line has been properly reconciled.
self.assertRecordValues(self.bank_line_1.line_ids, [
{'partner_id': self.partner_1.id, 'debit': 95.0, 'credit': 0.0, 'account_id': self.bank_journal.default_account_id.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 5.0, 'credit': 0.0, 'account_id': self.current_assets_account.id, 'reconciled': False},
{'partner_id': self.partner_1.id, 'debit': 0.0, 'credit': 100.0, 'account_id': self.invoice_line_1.account_id.id, 'reconciled': True},
])
self.assertEqual(self.invoice_line_1.amount_residual, 0.0, "The invoice should have been fully reconciled")
def test_avoid_amount_matching_bypass(self):
""" By the default, if the label of statement lines exactly matches a payment reference, it bypasses any kind of amount verification.
This is annoying in some setups, so a config parameter was introduced to handle that.
"""
self.env['ir.config_parameter'].set_param('account.disable_rec_models_bypass', '1')
self.rule_1.match_total_amount_param = 90
second_inv_matching_rule = self.env['account.reconcile.model'].create({
'name': 'Invoices Matching Rule',
'sequence': 2,
'rule_type': 'invoice_matching',
'auto_reconcile': False,
'match_nature': 'both',
'match_same_currency': False,
'match_total_amount': False,
'match_partner': True,
'company_id': self.company.id,
})
self.bank_line_1.write({
'payment_ref': self.invoice_line_1.move_id.payment_reference,
'amount': 99,
})
self.bank_line_2.write({
'payment_ref': self.invoice_line_2.move_id.payment_reference,
'amount': 1,
})
self._check_statement_matching(self.rule_1 + second_inv_matching_rule, {
self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},
self.bank_line_2.id: {'aml_ids': [self.invoice_line_2.id], 'model': second_inv_matching_rule, 'partner': self.bank_line_2.partner_id}
}, statements=self.bank_st)
| agpl-3.0 |
bourreauEric/or-tools | examples/python/max_flow_taha.py | 34 | 3443 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Max flow problem in Google CP Solver.
From Taha 'Introduction to Operations Research', Example 6.4-2
Translated from the AMPL code at
http://taha.ineg.uark.edu/maxflo.txt
Compare with the following model:
* MiniZinc: http://www.hakank.org/minizinc/max_flow_taha.mzn
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Max flow problem, Taha')
#
# data
#
n = 5
start = 0
end = n - 1
nodes = range(n)
# cost matrix
c = [
[0, 20, 30, 10, 0],
[0, 0, 40, 0, 30],
[0, 0, 0, 10, 20],
[0, 0, 5, 0, 20],
[0, 0, 0, 0, 0]
]
#
# declare variables
#
x = {}
for i in nodes:
for j in nodes:
x[i, j] = solver.IntVar(0, c[i][j], 'x[%i,%i]' % (i, j))
x_flat = [x[i, j] for i in nodes for j in nodes]
out_flow = [solver.IntVar(0, 10000, 'out_flow[%i]' % i) for i in nodes]
in_flow = [solver.IntVar(0, 10000, 'in_flow[%i]' % i) for i in nodes]
total = solver.IntVar(0, 10000, 'z')
#
# constraints
#
cost_sum = solver.Sum([x[start, j] for j in nodes if c[start][j] > 0])
solver.Add(total == cost_sum)
for i in nodes:
in_flow_sum = solver.Sum([x[j, i] for j in nodes if c[j][i] > 0])
solver.Add(in_flow[i] == in_flow_sum)
out_flow_sum = solver.Sum([x[i, j] for j in nodes if c[i][j] > 0])
solver.Add(out_flow[i] == out_flow_sum)
# in_flow == out_flow
for i in nodes:
if i != start and i != end:
solver.Add(out_flow[i] - in_flow[i] == 0)
s1 = [x[i, start] for i in nodes if c[i][start] > 0]
if len(s1) > 0:
solver.Add(solver.Sum([x[i, start]
for i in nodes if c[i][start] > 0] == 0))
s2 = [x[end, j] for j in nodes if c[end][j] > 0]
if len(s2) > 0:
solver.Add(solver.Sum([x[end, j]
for j in nodes if c[end][j] > 0]) == 0)
# objective: maximize total cost
objective = solver.Maximize(total, 1)
#
# solution and search
#
db = solver.Phase(x_flat,
solver.INT_VAR_DEFAULT,
solver.ASSIGN_MAX_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'total:', total.Value()
print 'in_flow:', [in_flow[i].Value() for i in nodes]
print 'out_flow:', [out_flow[i].Value() for i in nodes]
for i in nodes:
for j in nodes:
print '%2i' % x[i, j].Value(),
print
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime(), 'ms'
if __name__ == '__main__':
main()
| apache-2.0 |
epssy/hue | desktop/core/ext-py/elementtree/selftest.py | 45 | 28405 | # $Id: selftest.py 2326 2005-03-17 07:45:21Z fredrik $
# -*- coding: iso-8859-1 -*-
# elementtree selftest program
# this test script uses Python's "doctest" module to check that the
# *test script* works as expected.
# TODO: add more elementtree method tests
# TODO: add xml/html parsing tests
# TODO: etc
import sys, string, StringIO
from elementtree import ElementTree
from elementtree import ElementPath
from elementtree import ElementInclude
from elementtree import HTMLTreeBuilder
from elementtree import SimpleXMLWriter
def serialize(elem, encoding=None):
import StringIO
file = StringIO.StringIO()
tree = ElementTree.ElementTree(elem)
if encoding:
tree.write(file, encoding)
else:
tree.write(file)
return file.getvalue()
def summarize(elem):
return elem.tag
def summarize_list(seq):
return map(summarize, seq)
def normalize_crlf(tree):
for elem in tree.getiterator():
if elem.text: elem.text = string.replace(elem.text, "\r\n", "\n")
if elem.tail: elem.tail = string.replace(elem.tail, "\r\n", "\n")
SAMPLE_XML = ElementTree.XML("""
<body>
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
""")
#
# interface tests
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print "expected one-character string, got %r" % char
new_string = string + ""
new_string = string + " "
string[:0]
def check_string_or_none(value):
if value is None:
return
return check_string(value)
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print "expected value string, got %r" % mapping["key"]
def check_element(element):
if not hasattr(element, "tag"):
print "no tag member"
if not hasattr(element, "attrib"):
print "no attrib member"
if not hasattr(element, "text"):
print "no text member"
if not hasattr(element, "tail"):
print "no tail member"
check_string(element.tag)
check_mapping(element.attrib)
check_string_or_none(element.text)
check_string_or_none(element.tail)
for elem in element:
check_element(elem)
def check_element_tree(tree):
check_element(tree.getroot())
# --------------------------------------------------------------------
# element tree tests
def sanity():
"""
>>> from elementtree.ElementTree import *
>>> from elementtree.ElementInclude import *
>>> from elementtree.ElementPath import *
>>> from elementtree.HTMLTreeBuilder import *
>>> from elementtree.SimpleXMLTreeBuilder import *
>>> from elementtree.SimpleXMLWriter import *
>>> from elementtree.TidyTools import *
>>> from elementtree.XMLTreeBuilder import *
"""
def version():
"""
>>> ElementTree.VERSION
'1.2.6'
"""
def interface():
"""
Test element tree interface.
>>> element = ElementTree.Element("tag")
>>> check_element(element)
>>> tree = ElementTree.ElementTree(element)
>>> check_element_tree(tree)
"""
def simplefind():
"""
Test find methods using the elementpath fallback.
>>> CurrentElementPath = ElementTree.ElementPath
>>> ElementTree.ElementPath = ElementTree._SimpleElementPath()
>>> elem = SAMPLE_XML
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
Path syntax doesn't work in this case.
>>> elem.find("section/tag")
>>> elem.findtext("section/tag")
>>> elem.findall("section/tag")
[]
>>> ElementTree.ElementPath = CurrentElementPath
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = SAMPLE_XML
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("section/tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ElementTree.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag']
>>> summarize_list(elem.findall("section/*"))
['tag']
>>> summarize_list(elem.findall("section//*"))
['tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag']
>>> summarize_list(elem.findall("*/*"))
['tag']
>>> summarize_list(elem.findall("*//*"))
['tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(ElementTree.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> summarize_list(ElementTree.ElementTree(elem).findall("./tag"))
['tag', 'tag']
"""
def bad_find():
"""
Check bad or unsupported path expressions.
>>> elem = SAMPLE_XML
>>> elem.findall("/tag")
Traceback (most recent call last):
SyntaxError: cannot use absolute path on element
>>> elem.findall("../tag")
Traceback (most recent call last):
SyntaxError: unsupported path syntax (..)
>>> elem.findall("section//")
Traceback (most recent call last):
SyntaxError: path cannot end with //
>>> elem.findall("tag[tag]")
Traceback (most recent call last):
SyntaxError: expected path separator ([)
"""
def parsefile():
"""
Test parsing from file.
>>> tree = ElementTree.parse("samples/simple.xml")
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> tree = ElementTree.parse("samples/simple-ns.xml")
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<ns0:root xmlns:ns0="namespace">
<ns0:element key="value">text</ns0:element>
<ns0:element>text</ns0:element>tail
<ns0:empty-element />
</ns0:root>
"""
def parsehtml():
"""
Test HTML parsing.
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p><p>spam<b>egg</b></p>")
>>> serialize(p.close())
'<p>spam<b>egg</b></p>'
"""
def parseliteral():
r"""
>>> element = ElementTree.XML("<html><body>text</body></html>")
>>> ElementTree.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> element = ElementTree.fromstring("<html><body>text</body></html>")
>>> ElementTree.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> print ElementTree.tostring(element)
<html><body>text</body></html>
>>> print ElementTree.tostring(element, "ascii")
<?xml version='1.0' encoding='ascii'?>
<html><body>text</body></html>
>>> _, ids = ElementTree.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ElementTree.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def simpleparsefile():
"""
Test the xmllib-based parser.
>>> from elementtree import SimpleXMLTreeBuilder
>>> parser = SimpleXMLTreeBuilder.TreeBuilder()
>>> tree = ElementTree.parse("samples/simple.xml", parser)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
def iterparse():
"""
Test iterparse interface.
>>> iterparse = ElementTree.iterparse
>>> context = iterparse("samples/simple.xml")
>>> for action, elem in context:
... print action, elem.tag
end element
end element
end empty-element
end root
>>> context.root.tag
'root'
>>> context = iterparse("samples/simple-ns.xml")
>>> for action, elem in context:
... print action, elem.tag
end {namespace}element
end {namespace}element
end {namespace}empty-element
end {namespace}root
>>> events = ()
>>> context = iterparse("samples/simple.xml", events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ()
>>> context = iterparse("samples/simple.xml", events=events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ("start", "end")
>>> context = iterparse("samples/simple.xml", events)
>>> for action, elem in context:
... print action, elem.tag
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> events = ("start", "end", "start-ns", "end-ns")
>>> context = iterparse("samples/simple-ns.xml", events)
>>> for action, elem in context:
... if action in ("start", "end"):
... print action, elem.tag
... else:
... print action, elem
start-ns ('', 'namespace')
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
end-ns None
"""
def fancyparsefile():
"""
Test the "fancy" parser.
Sanity check.
>>> from elementtree import XMLTreeBuilder
>>> parser = XMLTreeBuilder.FancyTreeBuilder()
>>> tree = ElementTree.parse("samples/simple.xml", parser)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
Callback check.
>>> class MyFancyParser(XMLTreeBuilder.FancyTreeBuilder):
... def start(self, elem):
... print "START", elem.tag
... def end(self, elem):
... print "END", elem.tag
>>> parser = MyFancyParser()
>>> tree = ElementTree.parse("samples/simple.xml", parser)
START root
START element
END element
START element
END element
START empty-element
END empty-element
END root
"""
def writefile():
"""
>>> elem = ElementTree.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ElementTree.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
"""
def writestring():
"""
>>> elem = ElementTree.XML("<html><body>text</body></html>")
>>> ElementTree.tostring(elem)
'<html><body>text</body></html>'
>>> elem = ElementTree.fromstring("<html><body>text</body></html>")
>>> ElementTree.tostring(elem)
'<html><body>text</body></html>'
"""
def encoding():
r"""
Test encoding issues.
>>> elem = ElementTree.Element("tag")
>>> elem.text = u"abc"
>>> serialize(elem)
'<tag>abc</tag>'
>>> serialize(elem, "utf-8")
'<tag>abc</tag>'
>>> serialize(elem, "us-ascii")
'<tag>abc</tag>'
>>> serialize(elem, "iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
>>> elem.text = "<&\"\'>"
>>> serialize(elem)
'<tag><&"\'></tag>'
>>> serialize(elem, "utf-8")
'<tag><&"\'></tag>'
>>> serialize(elem, "us-ascii") # cdata characters
'<tag><&"\'></tag>'
>>> serialize(elem, "iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag><&"\'></tag>'
>>> elem.attrib["key"] = "<&\"\'>"
>>> elem.text = None
>>> serialize(elem)
'<tag key="<&"'>" />'
>>> serialize(elem, "utf-8")
'<tag key="<&"'>" />'
>>> serialize(elem, "us-ascii")
'<tag key="<&"'>" />'
>>> serialize(elem, "iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="<&"'>" />'
>>> elem.text = u'\xe5\xf6\xf6<>'
>>> elem.attrib.clear()
>>> serialize(elem)
'<tag>åöö<></tag>'
>>> serialize(elem, "utf-8")
'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>'
>>> serialize(elem, "us-ascii")
'<tag>åöö<></tag>'
>>> serialize(elem, "iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6<></tag>"
>>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
>>> elem.text = None
>>> serialize(elem)
'<tag key="åöö<>" />'
>>> serialize(elem, "utf-8")
'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />'
>>> serialize(elem, "us-ascii")
'<tag key="åöö<>" />'
>>> serialize(elem, "iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6<>" />'
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
def entity():
"""
Test entity handling.
1) bad entities
>>> ElementTree.XML("<document>&entity;</document>")
Traceback (most recent call last):
ExpatError: undefined entity: line 1, column 10
>>> ElementTree.XML(ENTITY_XML)
Traceback (most recent call last):
ExpatError: undefined entity &entity;: line 5, column 10
(add more tests here)
"""
def namespace():
"""
Test namespace issues.
1) xml namespace
>>> elem = ElementTree.XML("<tag xml:lang='en' />")
>>> serialize(elem) # 1.1
'<tag xml:lang="en" />'
2) other "well-known" namespaces
>>> elem = ElementTree.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
>>> serialize(elem) # 2.1
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />'
>>> elem = ElementTree.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
>>> serialize(elem) # 2.2
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />'
>>> elem = ElementTree.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
>>> serialize(elem) # 2.3
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />'
3) unknown namespaces
"""
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ElementTree.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ElementTree.Element(ElementTree.QName("{uri}tag"))
>>> serialize(elem) # 1.2
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ElementTree.Element(ElementTree.QName("uri", "tag"))
>>> serialize(elem) # 1.3
'<ns0:tag xmlns:ns0="uri" />'
2) decorated attributes
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "value"
>>> serialize(elem) # 2.1
'<ns0:tag ns0:key="value" xmlns:ns0="uri" />'
>>> elem.clear()
>>> elem.attrib[ElementTree.QName("{uri}key")] = "value"
>>> serialize(elem) # 2.2
'<ns0:tag ns0:key="value" xmlns:ns0="uri" />'
3) decorated values are not converted by default, but the
QName wrapper can be used for values
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "{uri}value"
>>> serialize(elem) # 3.1
'<ns0:tag ns0:key="{uri}value" xmlns:ns0="uri" />'
>>> elem.clear()
>>> elem.attrib["{uri}key"] = ElementTree.QName("{uri}value")
>>> serialize(elem) # 3.2
'<ns0:tag ns0:key="ns0:value" xmlns:ns0="uri" />'
>>> elem.clear()
>>> subelem = ElementTree.Element("tag")
>>> subelem.attrib["{uri1}key"] = ElementTree.QName("{uri2}value")
>>> elem.append(subelem)
>>> elem.append(subelem)
>>> serialize(elem) # 3.3
'<ns0:tag xmlns:ns0="uri"><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2" /><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2" /></ns0:tag>'
"""
def xpath_tokenizer(p):
"""
Test the XPath tokenizer.
>>> # tests from the xml specification
>>> xpath_tokenizer("*")
['*']
>>> xpath_tokenizer("text()")
['text', '()']
>>> xpath_tokenizer("@name")
['@', 'name']
>>> xpath_tokenizer("@*")
['@', '*']
>>> xpath_tokenizer("para[1]")
['para', '[', '1', ']']
>>> xpath_tokenizer("para[last()]")
['para', '[', 'last', '()', ']']
>>> xpath_tokenizer("*/para")
['*', '/', 'para']
>>> xpath_tokenizer("/doc/chapter[5]/section[2]")
['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']']
>>> xpath_tokenizer("chapter//para")
['chapter', '/', '/', 'para']
>>> xpath_tokenizer("//para")
['/', '/', 'para']
>>> xpath_tokenizer("//olist/item")
['/', '/', 'olist', '/', 'item']
>>> xpath_tokenizer(".")
['.']
>>> xpath_tokenizer(".//para")
['.', '/', '/', 'para']
>>> xpath_tokenizer("..")
['..']
>>> xpath_tokenizer("../@lang")
['..', '/', '@', 'lang']
>>> xpath_tokenizer("chapter[title]")
['chapter', '[', 'title', ']']
>>> xpath_tokenizer("employee[@secretary and @assistant]")
['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']']
>>> # additional tests
>>> xpath_tokenizer("{http://spam}egg")
['{http://spam}egg']
>>> xpath_tokenizer("./spam.egg")
['.', '/', 'spam.egg']
>>> xpath_tokenizer(".//{http://spam}egg")
['.', '/', '/', '{http://spam}egg']
"""
out = []
for op, tag in ElementPath.xpath_tokenizer(p):
out.append(op or tag)
return out
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:[email protected]">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="samples/simple.xml"/>
</document>
"""
def xinclude_loader(href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise IOError("resource not found")
if parse == "xml":
return ElementTree.XML(data)
return data
def xinclude():
r"""
Basic inclusion example (XInclude C.1)
>>> document = xinclude_loader("C1.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C1
<document>
<p>120 Mz is adequate for an average home user.</p>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
</document>
Textual inclusion example (XInclude C.2)
>>> document = xinclude_loader("C2.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C2
<document>
<p>This document has been accessed
324387 times.</p>
</document>
Textual inclusion of XML example (XInclude C.3)
>>> document = xinclude_loader("C3.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C3
<document>
<p>The following is the source of the "data.xml" resource:</p>
<example><?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
</example>
</document>
Fallback example (XInclude C.5)
Note! Fallback support is not yet implemented
>>> document = xinclude_loader("C5.xml")
>>> ElementInclude.include(document, xinclude_loader)
Traceback (most recent call last):
IOError: resource not found
>>> # print serialize(document) # C5
"""
def xinclude_default():
"""
>>> document = xinclude_loader("default.xml")
>>> ElementInclude.include(document)
>>> print serialize(document) # default
<document>
<p>Example.</p>
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
</document>
"""
#
# xmlwriter
def xmlwriter():
r"""
>>> file = StringIO.StringIO()
>>> w = SimpleXMLWriter.XMLWriter(file)
>>> html = w.start("html")
>>> x = w.start("head")
>>> w.element("title", "my document")
>>> w.data("\n")
>>> w.element("meta", name="hello", value="goodbye")
>>> w.data("\n")
>>> w.end()
>>> x = w.start("body")
>>> w.element("h1", "this is a heading")
>>> w.data("\n")
>>> w.element("p", u"this is a paragraph")
>>> w.data("\n")
>>> w.element("p", u"reserved characters: <&>")
>>> w.data("\n")
>>> w.element("p", u"detta är också ett stycke")
>>> w.data("\n")
>>> w.close(html)
>>> print file.getvalue()
<html><head><title>my document</title>
<meta name="hello" value="goodbye" />
</head><body><h1>this is a heading</h1>
<p>this is a paragraph</p>
<p>reserved characters: <&></p>
<p>detta är också ett stycke</p>
</body></html>
"""
# --------------------------------------------------------------------
# reported bugs
def bug_xmltoolkit21():
"""
marshaller gives obscure errors for non-string values
>>> elem = ElementTree.Element(123)
>>> serialize(elem) # tag
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.text = 123
>>> serialize(elem) # text
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.tail = 123
>>> serialize(elem) # tail
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.set(123, "123")
>>> serialize(elem) # attribute key
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.set("123", 123)
>>> serialize(elem) # attribute value
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
"""
def bug_xmltoolkit25():
"""
typo in ElementTree.findtext
>>> tree = ElementTree.ElementTree(SAMPLE_XML)
>>> tree.findtext("tag")
'text'
>>> tree.findtext("section/tag")
'subtext'
"""
def bug_xmltoolkit28():
"""
.//tag causes exceptions
>>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>")
>>> summarize_list(tree.findall(".//thead"))
[]
>>> summarize_list(tree.findall(".//tbody"))
['tbody']
"""
def bug_xmltoolkitX1():
"""
dump() doesn't flush the output buffer
>>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>")
>>> ElementTree.dump(tree); sys.stdout.write("tail")
<doc><table><tbody /></table></doc>
tail
"""
def bug_xmltoolkit39():
"""
non-ascii element and attribute names doesn't work
>>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg />")
>>> ElementTree.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag ättr='välue' />")
>>> tree.attrib
{u'\\xe4ttr': u'v\\xe4lue'}
>>> ElementTree.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
>>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg>text</täg>")
>>> ElementTree.tostring(tree, "utf-8")
'<t\\xc3\\xa4g>text</t\\xc3\\xa4g>'
>>> tree = ElementTree.Element(u"täg")
>>> ElementTree.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ElementTree.Element("tag")
>>> tree.set(u"ättr", u"välue")
>>> ElementTree.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
"""
def bug_xmltoolkit45():
"""
problems parsing mixed unicode/non-ascii html documents
latin-1 text
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>välue</p>")
>>> serialize(p.close())
'<p>välue</p>'
utf-8 text
>>> p = HTMLTreeBuilder.TreeBuilder(encoding="utf-8")
>>> p.feed("<p>v\xc3\xa4lue</p>")
>>> serialize(p.close())
'<p>välue</p>'
utf-8 text using meta tag
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<html><meta http-equiv='Content-Type' content='text/html; charset=utf-8'><p>v\xc3\xa4lue</p></html>")
>>> serialize(p.close().find("p"))
'<p>välue</p>'
latin-1 character references
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>välue</p>")
>>> serialize(p.close())
'<p>välue</p>'
latin-1 character entities
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>välue</p>")
>>> serialize(p.close())
'<p>välue</p>'
mixed latin-1 text and unicode entities
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>”välue”</p>")
>>> serialize(p.close())
'<p>”välue”</p>'
mixed unicode and latin-1 entities
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>”välue”</p>")
>>> serialize(p.close())
'<p>”välue”</p>'
"""
def bug_xmltoolkit46():
"""
problems parsing open BR tags
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>key<br>value</p>")
>>> serialize(p.close())
'<p>key<br />value</p>'
"""
def bug_xmltoolkit54():
"""
problems handling internally defined entities
>>> e = ElementTree.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]><doc>&ldots;</doc>")
>>> serialize(e)
'<doc>舰</doc>'
"""
def bug_xmltoolkit55():
"""
make sure we're reporting the first error, not the last
>>> e = ElementTree.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>")
Traceback (most recent call last):
ExpatError: undefined entity &ldots;: line 1, column 36
"""
# --------------------------------------------------------------------
if __name__ == "__main__":
import doctest, selftest
failed, tested = doctest.testmod(selftest)
print tested - failed, "tests ok."
| apache-2.0 |
denisenkom/django-sqlserver | tests/pagination/tests.py | 1 | 15383 | from __future__ import unicode_literals
import unittest
import warnings
from datetime import datetime
import django
from django.core.paginator import (
EmptyPage, InvalidPage, PageNotAnInteger, Paginator,
)
if django.VERSION >= (1, 11, 0):
from django.core.paginator import UnorderedObjectListWarning
from django.test import TestCase
from django.utils import six
from .custom import ValidAdjacentNumsPaginator
from .models import Article
class PaginationTests(unittest.TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params, coerce=list)
def check_attribute(self, name, paginator, expected, params, coerce=None):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
if coerce is not None:
got = coerce(got)
self.assertEqual(
expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params)
)
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def test_invalid_page_number(self):
"""
Invalid page numbers result in the correct exception being raised.
"""
paginator = Paginator([1, 2, 3], 2)
with self.assertRaises(InvalidPage):
paginator.page(3)
with self.assertRaises(PageNotAnInteger):
paginator.validate_number(None)
with self.assertRaises(PageNotAnInteger):
paginator.validate_number('x')
# With no content and allow_empty_first_page=True, 1 is a valid page number
paginator = Paginator([], 2)
self.assertEqual(paginator.validate_number(1), 1)
def test_paginate_misc_classes(self):
class CountContainer(object):
def count(self):
return 42
# Paginator can be passed other objects with a count() method.
paginator = Paginator(CountContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
# Paginator can be passed other objects that implement __len__.
class LenContainer(object):
def __len__(self):
return 42
paginator = Paginator(LenContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s. Paginator parameters were: %s")
self.assertEqual(start, page.start_index(), msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(), msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 0, False), 1, None)
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 1, False), 1, None)
with self.assertRaises(EmptyPage):
self.check_indexes(([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
A paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertIn('k', page2)
self.assertNotIn('a', page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
def test_get_page_hook(self):
"""
A Paginator subclass can use the ``_get_page`` hook to
return an alternative to the standard Page class.
"""
eleven = 'abcdefghijk'
paginator = ValidAdjacentNumsPaginator(eleven, per_page=6)
page1 = paginator.page(1)
page2 = paginator.page(2)
self.assertIsNone(page1.previous_page_number())
self.assertEqual(page1.next_page_number(), 2)
self.assertEqual(page2.previous_page_number(), 1)
self.assertIsNone(page2.next_page_number())
def test_page_range_iterator(self):
"""
Paginator.page_range should be an iterator.
"""
self.assertIsInstance(Paginator([1, 2, 3], 2).page_range, type(six.moves.range(0)))
class ModelPaginationTests(TestCase):
"""
Test pagination with Django model instances
"""
def setUp(self):
# Prepare a list of objects for pagination.
for x in range(1, 10):
a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
a.save()
def test_first_page(self):
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(1)
self.assertEqual("<Page 1 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 1>",
"<Article: Article 2>",
"<Article: Article 3>",
"<Article: Article 4>",
"<Article: Article 5>"
])
self.assertTrue(p.has_next())
self.assertFalse(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertEqual(2, p.next_page_number())
with self.assertRaises(InvalidPage):
p.previous_page_number()
self.assertEqual(1, p.start_index())
self.assertEqual(5, p.end_index())
def test_last_page(self):
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(2)
self.assertEqual("<Page 2 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 6>",
"<Article: Article 7>",
"<Article: Article 8>",
"<Article: Article 9>"
])
self.assertFalse(p.has_next())
self.assertTrue(p.has_previous())
self.assertTrue(p.has_other_pages())
with self.assertRaises(InvalidPage):
p.next_page_number()
self.assertEqual(1, p.previous_page_number())
self.assertEqual(6, p.start_index())
self.assertEqual(9, p.end_index())
def test_page_getitem(self):
"""
Tests proper behavior of a paginator page __getitem__ (queryset
evaluation, slicing, exception raised).
"""
paginator = Paginator(Article.objects.order_by('id'), 5)
p = paginator.page(1)
# Make sure object_list queryset is not evaluated by an invalid __getitem__ call.
# (this happens from the template engine when using eg: {% page_obj.has_previous %})
self.assertIsNone(p.object_list._result_cache)
with self.assertRaises(TypeError):
p['has_previous']
self.assertIsNone(p.object_list._result_cache)
self.assertNotIsInstance(p.object_list, list)
# Make sure slicing the Page object with numbers and slice objects work.
self.assertEqual(p[0], Article.objects.get(headline='Article 1'))
self.assertQuerysetEqual(p[slice(2)], [
"<Article: Article 1>",
"<Article: Article 2>",
]
)
# After __getitem__ is called, object_list is a list
self.assertIsInstance(p.object_list, list)
def test_paginating_unordered_queryset_raises_warning(self):
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older version of Django")
with warnings.catch_warnings(record=True) as warns:
# Prevent the RuntimeWarning subclass from appearing as an
# exception due to the warnings.simplefilter() in runtests.py.
warnings.filterwarnings('always', category=UnorderedObjectListWarning)
Paginator(Article.objects.all(), 5)
self.assertEqual(len(warns), 1)
warning = warns[0]
self.assertEqual(str(warning.message), (
"Pagination may yield inconsistent results with an unordered "
"object_list: <class 'pagination.models.Article'> QuerySet."
))
# The warning points at the Paginator caller (i.e. the stacklevel
# is appropriate).
self.assertEqual(warning.filename, __file__)
def test_paginating_unordered_object_list_raises_warning(self):
"""
Unordered object list warning with an object that has an orderd
attribute but not a model attribute.
"""
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older version of Django")
class ObjectList():
ordered = False
object_list = ObjectList()
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings('always', category=UnorderedObjectListWarning)
Paginator(object_list, 5)
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), (
"Pagination may yield inconsistent results with an unordered "
"object_list: {!r}.".format(object_list)
))
| mit |
adw0rd/lettuce-py3 | lettuce/__init__.py | 1 | 6767 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = version = '0.2.22'
release = 'kryptonite'
import os
import sys
import traceback
import warnings
try:
from imp import reload
except ImportError:
# python 2.5 fallback
pass
import random
from lettuce.core import Feature, TotalResult
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.terrain import world
from lettuce.decorators import step, steps
from lettuce.registry import call_hook
from lettuce.registry import STEP_REGISTRY
from lettuce.registry import CALLBACK_REGISTRY
from lettuce.exceptions import StepLoadingError
from lettuce.plugins import (
xunit_output,
subunit_output,
autopdb,
smtp_mail_queue,
)
from lettuce import fs
from lettuce import exceptions
try:
from colorama import init as ms_windows_workaround
ms_windows_workaround()
except ImportError:
pass
__all__ = [
'after',
'before',
'step',
'steps',
'world',
'STEP_REGISTRY',
'CALLBACK_REGISTRY',
'call_hook',
]
try:
terrain = fs.FileSystem._import("terrain")
reload(terrain)
except Exception as e:
if not "No module named 'terrain'" in str(e):
string = 'Lettuce has tried to load the conventional environment ' \
'module "terrain"\nbut it has errors, check its contents and ' \
'try to run lettuce again.\n\nOriginal traceback below:\n\n'
sys.stderr.write(string)
sys.stderr.write(exceptions.traceback.format_exc())
raise SystemExit(1)
class Runner(object):
""" Main lettuce's test runner
Takes a base path as parameter (string), so that it can look for
features and step definitions on there.
"""
def __init__(self, base_path, scenarios=None,
verbosity=0, no_color=False, random=False,
enable_xunit=False, xunit_filename=None,
enable_subunit=False, subunit_filename=None,
tags=None, failfast=False, auto_pdb=False,
smtp_queue=None, root_dir=None, **kwargs):
""" lettuce.Runner will try to find a terrain.py file and
import it from within `base_path`
"""
self.tags = tags
self.single_feature = None
if os.path.isfile(base_path) and os.path.exists(base_path):
self.single_feature = base_path
base_path = os.path.dirname(base_path)
sys.path.insert(0, base_path)
self.loader = fs.FeatureLoader(base_path, root_dir)
self.verbosity = verbosity
self.scenarios = scenarios and list(map(int, scenarios.split(","))) or None
self.failfast = failfast
if auto_pdb:
autopdb.enable(self)
sys.path.remove(base_path)
if verbosity == 0:
from lettuce.plugins import non_verbose as output
elif verbosity == 1:
from lettuce.plugins import dots as output
elif verbosity == 2:
from lettuce.plugins import scenario_names as output
else:
if verbosity == 4:
from lettuce.plugins import colored_shell_output as output
msg = ('Deprecated in lettuce 2.2.21. Use verbosity 3 without '
'--no-color flag instead of verbosity 4')
warnings.warn(msg, DeprecationWarning)
elif verbosity == 3:
if no_color:
from lettuce.plugins import shell_output as output
else:
from lettuce.plugins import colored_shell_output as output
self.random = random
if enable_xunit:
xunit_output.enable(filename=xunit_filename)
if smtp_queue:
smtp_mail_queue.enable()
if enable_subunit:
subunit_output.enable(filename=subunit_filename)
reload(output)
self.output = output
def run(self):
""" Find and load step definitions, and them find and load
features under `base_path` specified on constructor
"""
results = []
if self.single_feature:
features_files = [self.single_feature]
else:
features_files = self.loader.find_feature_files()
if self.random:
random.shuffle(features_files)
if not features_files:
self.output.print_no_features_found(self.loader.base_dir)
return
# only load steps if we've located some features.
# this prevents stupid bugs when loading django modules
# that we don't even want to test.
try:
self.loader.find_and_load_step_definitions()
except StepLoadingError as e:
print("Error loading step definitions:\n", e)
return
call_hook('before', 'all')
failed = False
try:
for filename in features_files:
feature = Feature.from_file(filename)
results.append(
feature.run(self.scenarios,
tags=self.tags,
random=self.random,
failfast=self.failfast))
except exceptions.LettuceSyntaxError as e:
sys.stderr.write(e.msg)
failed = True
except exceptions.NoDefinitionFound as e:
sys.stderr.write(e.msg)
failed = True
except:
if not self.failfast:
e = sys.exc_info()[1]
print("Died with %s" % str(e))
traceback.print_exc()
else:
print()
print ("Lettuce aborted running any more tests "
"because was called with the `--failfast` option")
failed = True
finally:
total = TotalResult(results)
total.output_format()
call_hook('after', 'all', total)
if failed:
raise SystemExit(2)
return total
| gpl-3.0 |
CristianBB/SickRage | lib/imdb/parser/http/utils.py | 59 | 34532 | """
parser.http.utils module (imdb package).
This module provides miscellaneous utilities used by
the imdb.parser.http classes.
Copyright 2004-2012 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import logging
import warnings
from imdb._exceptions import IMDbError
from imdb.utils import flatten, _Container
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.Character import Character
# Year, imdbIndex and kind.
re_yearKind_index = re.compile(r'(\([0-9\?]{4}(?:/[IVXLCDM]+)?\)(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)')
# Match imdb ids in href tags
re_imdbid = re.compile(r'(title/tt|name/nm|character/ch|company/co)([0-9]+)')
def analyze_imdbid(href):
"""Return an imdbID from an URL."""
if not href:
return None
match = re_imdbid.search(href)
if not match:
return None
return str(match.group(2))
_modify_keys = list(Movie.keys_tomodify_list) + list(Person.keys_tomodify_list)
def _putRefs(d, re_titles, re_names, re_characters, lastKey=None):
"""Iterate over the strings inside list items or dictionary values,
substitutes movie titles and person names with the (qv) references."""
if isinstance(d, list):
for i in xrange(len(d)):
if isinstance(d[i], (unicode, str)):
if lastKey in _modify_keys:
if re_names:
d[i] = re_names.sub(ur"'\1' (qv)", d[i])
if re_titles:
d[i] = re_titles.sub(ur'_\1_ (qv)', d[i])
if re_characters:
d[i] = re_characters.sub(ur'#\1# (qv)', d[i])
elif isinstance(d[i], (list, dict)):
_putRefs(d[i], re_titles, re_names, re_characters,
lastKey=lastKey)
elif isinstance(d, dict):
for k, v in d.items():
lastKey = k
if isinstance(v, (unicode, str)):
if lastKey in _modify_keys:
if re_names:
d[k] = re_names.sub(ur"'\1' (qv)", v)
if re_titles:
d[k] = re_titles.sub(ur'_\1_ (qv)', v)
if re_characters:
d[k] = re_characters.sub(ur'#\1# (qv)', v)
elif isinstance(v, (list, dict)):
_putRefs(d[k], re_titles, re_names, re_characters,
lastKey=lastKey)
# Handle HTML/XML/SGML entities.
from htmlentitydefs import entitydefs
entitydefs = entitydefs.copy()
entitydefsget = entitydefs.get
entitydefs['nbsp'] = ' '
sgmlentity = {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\'', 'ndash': '-'}
sgmlentityget = sgmlentity.get
_sgmlentkeys = sgmlentity.keys()
entcharrefs = {}
entcharrefsget = entcharrefs.get
for _k, _v in entitydefs.items():
if _k in _sgmlentkeys: continue
if _v[0:2] == '&#':
dec_code = _v[1:-1]
_v = unichr(int(_v[2:-1]))
entcharrefs[dec_code] = _v
else:
dec_code = '#' + str(ord(_v))
_v = unicode(_v, 'latin_1', 'replace')
entcharrefs[dec_code] = _v
entcharrefs[_k] = _v
del _sgmlentkeys, _k, _v
entcharrefs['#160'] = u' '
entcharrefs['#xA0'] = u' '
entcharrefs['#xa0'] = u' '
entcharrefs['#XA0'] = u' '
entcharrefs['#x22'] = u'"'
entcharrefs['#X22'] = u'"'
# convert &x26; to &, to make BeautifulSoup happy; beware that this
# leaves lone '&' in the html broken, but I assume this is better than
# the contrary...
entcharrefs['#38'] = u'&'
entcharrefs['#x26'] = u'&'
entcharrefs['#x26'] = u'&'
re_entcharrefs = re.compile('&(%s|\#160|\#\d{1,5}|\#x[0-9a-f]{1,4});' %
'|'.join(map(re.escape, entcharrefs)), re.I)
re_entcharrefssub = re_entcharrefs.sub
sgmlentity.update(dict([('#34', u'"'), ('#38', u'&'),
('#60', u'<'), ('#62', u'>'), ('#39', u"'")]))
re_sgmlref = re.compile('&(%s);' % '|'.join(map(re.escape, sgmlentity)))
re_sgmlrefsub = re_sgmlref.sub
# Matches XML-only single tags, like <br/> ; they are invalid in HTML,
# but widely used by IMDb web site. :-/
re_xmltags = re.compile('<([a-zA-Z]+)/>')
def _replXMLRef(match):
"""Replace the matched XML/HTML entities and references;
replace everything except sgml entities like <, >, ..."""
ref = match.group(1)
value = entcharrefsget(ref)
if value is None:
if ref[0] == '#':
ref_code = ref[1:]
if ref_code in ('34', '38', '60', '62', '39'):
return match.group(0)
elif ref_code[0].lower() == 'x':
#if ref[2:] == '26':
# # Don't convert &x26; to &, to make BeautifulSoup happy.
# return '&'
return unichr(int(ref[2:], 16))
else:
return unichr(int(ref[1:]))
else:
return ref
return value
def subXMLRefs(s):
"""Return the given html string with entity and char references
replaced."""
return re_entcharrefssub(_replXMLRef, s)
# XXX: no more used here; move it to mobile (they are imported by helpers, too)?
def _replSGMLRefs(match):
"""Replace the matched SGML entity."""
ref = match.group(1)
return sgmlentityget(ref, ref)
def subSGMLRefs(s):
"""Return the given html string with sgml entity and char references
replaced."""
return re_sgmlrefsub(_replSGMLRefs, s)
_b_p_logger = logging.getLogger('imdbpy.parser.http.build_person')
def build_person(txt, personID=None, billingPos=None,
roleID=None, accessSystem='http', modFunct=None):
"""Return a Person instance from the tipical <tr>...</tr> strings
found in the IMDb's web site."""
#if personID is None
# _b_p_logger.debug('empty name or personID for "%s"', txt)
notes = u''
role = u''
# Search the (optional) separator between name and role/notes.
if txt.find('....') != -1:
sep = '....'
elif txt.find('...') != -1:
sep = '...'
else:
sep = '...'
# Replace the first parenthesis, assuming there are only
# notes, after.
# Rationale: no imdbIndex is (ever?) showed on the web site.
txt = txt.replace('(', '...(', 1)
txt_split = txt.split(sep, 1)
name = txt_split[0].strip()
if len(txt_split) == 2:
role_comment = txt_split[1].strip()
# Strip common endings.
if role_comment[-4:] == ' and':
role_comment = role_comment[:-4].rstrip()
elif role_comment[-2:] == ' &':
role_comment = role_comment[:-2].rstrip()
elif role_comment[-6:] == '& ....':
role_comment = role_comment[:-6].rstrip()
# Get the notes.
if roleID is not None:
if not isinstance(roleID, list):
cmt_idx = role_comment.find('(')
if cmt_idx != -1:
role = role_comment[:cmt_idx].rstrip()
notes = role_comment[cmt_idx:]
else:
# Just a role, without notes.
role = role_comment
else:
role = role_comment
else:
# We're managing something that doesn't have a 'role', so
# everything are notes.
notes = role_comment
if role == '....': role = u''
roleNotes = []
# Manages multiple roleIDs.
if isinstance(roleID, list):
rolesplit = role.split('/')
role = []
for r in rolesplit:
nidx = r.find('(')
if nidx != -1:
role.append(r[:nidx].rstrip())
roleNotes.append(r[nidx:])
else:
role.append(r)
roleNotes.append(None)
lr = len(role)
lrid = len(roleID)
if lr > lrid:
roleID += [None] * (lrid - lr)
elif lr < lrid:
roleID = roleID[:lr]
for i, rid in enumerate(roleID):
if rid is not None:
roleID[i] = str(rid)
if lr == 1:
role = role[0]
roleID = roleID[0]
notes = roleNotes[0] or u''
elif roleID is not None:
roleID = str(roleID)
if personID is not None:
personID = str(personID)
if (not name) or (personID is None):
# Set to 'debug', since build_person is expected to receive some crap.
_b_p_logger.debug('empty name or personID for "%s"', txt)
# XXX: return None if something strange is detected?
person = Person(name=name, personID=personID, currentRole=role,
roleID=roleID, notes=notes, billingPos=billingPos,
modFunct=modFunct, accessSystem=accessSystem)
if roleNotes and len(roleNotes) == len(roleID):
for idx, role in enumerate(person.currentRole):
if roleNotes[idx]:
role.notes = roleNotes[idx]
return person
_re_chrIDs = re.compile('[0-9]{7}')
_b_m_logger = logging.getLogger('imdbpy.parser.http.build_movie')
# To shrink spaces.
re_spaces = re.compile(r'\s+')
def build_movie(txt, movieID=None, roleID=None, status=None,
accessSystem='http', modFunct=None, _parsingCharacter=False,
_parsingCompany=False, year=None, chrRoles=None,
rolesNoChar=None, additionalNotes=None):
"""Given a string as normally seen on the "categorized" page of
a person on the IMDb's web site, returns a Movie instance."""
# FIXME: Oook, lets face it: build_movie and build_person are now
# two horrible sets of patches to support the new IMDb design. They
# must be rewritten from scratch.
if _parsingCharacter:
_defSep = ' Played by '
elif _parsingCompany:
_defSep = ' ... '
else:
_defSep = ' .... '
title = re_spaces.sub(' ', txt).strip()
# Split the role/notes from the movie title.
tsplit = title.split(_defSep, 1)
role = u''
notes = u''
roleNotes = []
if len(tsplit) == 2:
title = tsplit[0].rstrip()
role = tsplit[1].lstrip()
if title[-9:] == 'TV Series':
title = title[:-9].rstrip()
#elif title[-7:] == '(short)':
# title = title[:-7].rstrip()
#elif title[-11:] == '(TV series)':
# title = title[:-11].rstrip()
#elif title[-10:] == '(TV movie)':
# title = title[:-10].rstrip()
elif title[-14:] == 'TV mini-series':
title = title[:-14] + ' (mini)'
if title and title.endswith(_defSep.rstrip()):
title = title[:-len(_defSep)+1]
# Try to understand where the movie title ends.
while True:
if year:
break
if title[-1:] != ')':
# Ignore the silly "TV Series" notice.
if title[-9:] == 'TV Series':
title = title[:-9].rstrip()
continue
else:
# Just a title: stop here.
break
# Try to match paired parentheses; yes: sometimes there are
# parentheses inside comments...
nidx = title.rfind('(')
while (nidx != -1 and \
title[nidx:].count('(') != title[nidx:].count(')')):
nidx = title[:nidx].rfind('(')
# Unbalanced parentheses: stop here.
if nidx == -1: break
# The last item in parentheses seems to be a year: stop here.
first4 = title[nidx+1:nidx+5]
if (first4.isdigit() or first4 == '????') and \
title[nidx+5:nidx+6] in (')', '/'): break
# The last item in parentheses is a known kind: stop here.
if title[nidx+1:-1] in ('TV', 'V', 'mini', 'VG', 'TV movie',
'TV series', 'short'): break
# Else, in parentheses there are some notes.
# XXX: should the notes in the role half be kept separated
# from the notes in the movie title half?
if notes: notes = '%s %s' % (title[nidx:], notes)
else: notes = title[nidx:]
title = title[:nidx].rstrip()
if year:
year = year.strip()
if title[-1:] == ')':
fpIdx = title.rfind('(')
if fpIdx != -1:
if notes: notes = '%s %s' % (title[fpIdx:], notes)
else: notes = title[fpIdx:]
title = title[:fpIdx].rstrip()
title = u'%s (%s)' % (title, year)
if _parsingCharacter and roleID and not role:
roleID = None
if not roleID:
roleID = None
elif len(roleID) == 1:
roleID = roleID[0]
if not role and chrRoles and isinstance(roleID, (str, unicode)):
roleID = _re_chrIDs.findall(roleID)
role = ' / '.join(filter(None, chrRoles.split('@@')))
# Manages multiple roleIDs.
if isinstance(roleID, list):
tmprole = role.split('/')
role = []
for r in tmprole:
nidx = r.find('(')
if nidx != -1:
role.append(r[:nidx].rstrip())
roleNotes.append(r[nidx:])
else:
role.append(r)
roleNotes.append(None)
lr = len(role)
lrid = len(roleID)
if lr > lrid:
roleID += [None] * (lrid - lr)
elif lr < lrid:
roleID = roleID[:lr]
for i, rid in enumerate(roleID):
if rid is not None:
roleID[i] = str(rid)
if lr == 1:
role = role[0]
roleID = roleID[0]
elif roleID is not None:
roleID = str(roleID)
if movieID is not None:
movieID = str(movieID)
if (not title) or (movieID is None):
_b_m_logger.error('empty title or movieID for "%s"', txt)
if rolesNoChar:
rolesNoChar = filter(None, [x.strip() for x in rolesNoChar.split('/')])
if not role:
role = []
elif not isinstance(role, list):
role = [role]
role += rolesNoChar
notes = notes.strip()
if additionalNotes:
additionalNotes = re_spaces.sub(' ', additionalNotes).strip()
if notes:
notes += u' '
notes += additionalNotes
if role and isinstance(role, list) and notes.endswith(role[-1].replace('\n', ' ')):
role = role[:-1]
m = Movie(title=title, movieID=movieID, notes=notes, currentRole=role,
roleID=roleID, roleIsPerson=_parsingCharacter,
modFunct=modFunct, accessSystem=accessSystem)
if roleNotes and len(roleNotes) == len(roleID):
for idx, role in enumerate(m.currentRole):
try:
if roleNotes[idx]:
role.notes = roleNotes[idx]
except IndexError:
break
# Status can't be checked here, and must be detected by the parser.
if status:
m['status'] = status
return m
class DOMParserBase(object):
"""Base parser to handle HTML data from the IMDb's web server."""
_defGetRefs = False
_containsObjects = False
preprocessors = []
extractors = []
usingModule = None
_logger = logging.getLogger('imdbpy.parser.http.domparser')
def __init__(self, useModule=None):
"""Initialize the parser. useModule can be used to force it
to use 'BeautifulSoup' or 'lxml'; by default, it's auto-detected,
using 'lxml' if available and falling back to 'BeautifulSoup'
otherwise."""
# Module to use.
if useModule is None:
useModule = ('lxml', 'BeautifulSoup')
if not isinstance(useModule, (tuple, list)):
useModule = [useModule]
self._useModule = useModule
nrMods = len(useModule)
_gotError = False
for idx, mod in enumerate(useModule):
mod = mod.strip().lower()
try:
if mod == 'lxml':
from lxml.html import fromstring
from lxml.etree import tostring
self._is_xml_unicode = False
self.usingModule = 'lxml'
elif mod == 'beautifulsoup':
from bsouplxml.html import fromstring
from bsouplxml.etree import tostring
self._is_xml_unicode = True
self.usingModule = 'beautifulsoup'
else:
self._logger.warn('unknown module "%s"' % mod)
continue
self.fromstring = fromstring
self._tostring = tostring
if _gotError:
warnings.warn('falling back to "%s"' % mod)
break
except ImportError, e:
if idx+1 >= nrMods:
# Raise the exception, if we don't have any more
# options to try.
raise IMDbError('unable to use any parser in %s: %s' % \
(str(useModule), str(e)))
else:
warnings.warn('unable to use "%s": %s' % (mod, str(e)))
_gotError = True
continue
else:
raise IMDbError('unable to use parsers in %s' % str(useModule))
# Fall-back defaults.
self._modFunct = None
self._as = 'http'
self._cname = self.__class__.__name__
self._init()
self.reset()
def reset(self):
"""Reset the parser."""
# Names and titles references.
self._namesRefs = {}
self._titlesRefs = {}
self._charactersRefs = {}
self._reset()
def _init(self):
"""Subclasses can override this method, if needed."""
pass
def _reset(self):
"""Subclasses can override this method, if needed."""
pass
def parse(self, html_string, getRefs=None, **kwds):
"""Return the dictionary generated from the given html string;
getRefs can be used to force the gathering of movies/persons/characters
references."""
self.reset()
if getRefs is not None:
self.getRefs = getRefs
else:
self.getRefs = self._defGetRefs
# Useful only for the testsuite.
if not isinstance(html_string, unicode):
html_string = unicode(html_string, 'latin_1', 'replace')
html_string = subXMLRefs(html_string)
# Temporary fix: self.parse_dom must work even for empty strings.
html_string = self.preprocess_string(html_string)
html_string = html_string.strip()
if self.usingModule == 'beautifulsoup':
# tag attributes like title=""Family Guy"" will be
# converted to title=""Family Guy"" and this confuses BeautifulSoup.
html_string = html_string.replace('""', '"')
# Browser-specific escapes create problems to BeautifulSoup.
html_string = html_string.replace('<!--[if IE]>', '"')
html_string = html_string.replace('<![endif]-->', '"')
#print html_string.encode('utf8')
if html_string:
dom = self.get_dom(html_string)
#print self.tostring(dom).encode('utf8')
try:
dom = self.preprocess_dom(dom)
except Exception, e:
self._logger.error('%s: caught exception preprocessing DOM',
self._cname, exc_info=True)
if self.getRefs:
try:
self.gather_refs(dom)
except Exception, e:
self._logger.warn('%s: unable to gather refs: %s',
self._cname, exc_info=True)
data = self.parse_dom(dom)
else:
data = {}
try:
data = self.postprocess_data(data)
except Exception, e:
self._logger.error('%s: caught exception postprocessing data',
self._cname, exc_info=True)
if self._containsObjects:
self.set_objects_params(data)
data = self.add_refs(data)
return data
def _build_empty_dom(self):
from bsouplxml import _bsoup
return _bsoup.BeautifulSoup('')
def get_dom(self, html_string):
"""Return a dom object, from the given string."""
try:
dom = self.fromstring(html_string)
if dom is None:
dom = self._build_empty_dom()
self._logger.error('%s: using a fake empty DOM', self._cname)
return dom
except Exception, e:
self._logger.error('%s: caught exception parsing DOM',
self._cname, exc_info=True)
return self._build_empty_dom()
def xpath(self, element, path):
"""Return elements matching the given XPath."""
try:
xpath_result = element.xpath(path)
if self._is_xml_unicode:
return xpath_result
result = []
for item in xpath_result:
if isinstance(item, str):
item = unicode(item)
result.append(item)
return result
except Exception, e:
self._logger.error('%s: caught exception extracting XPath "%s"',
self._cname, path, exc_info=True)
return []
def tostring(self, element):
"""Convert the element to a string."""
if isinstance(element, (unicode, str)):
return unicode(element)
else:
try:
return self._tostring(element, encoding=unicode)
except Exception, e:
self._logger.error('%s: unable to convert to string',
self._cname, exc_info=True)
return u''
def clone(self, element):
"""Clone an element."""
return self.fromstring(self.tostring(element))
def preprocess_string(self, html_string):
"""Here we can modify the text, before it's parsed."""
if not html_string:
return html_string
# Remove silly » and – chars.
html_string = html_string.replace(u' \xbb', u'')
html_string = html_string.replace(u'–', u'-')
try:
preprocessors = self.preprocessors
except AttributeError:
return html_string
for src, sub in preprocessors:
# re._pattern_type is present only since Python 2.5.
if callable(getattr(src, 'sub', None)):
html_string = src.sub(sub, html_string)
elif isinstance(src, str):
html_string = html_string.replace(src, sub)
elif callable(src):
try:
html_string = src(html_string)
except Exception, e:
_msg = '%s: caught exception preprocessing html'
self._logger.error(_msg, self._cname, exc_info=True)
continue
##print html_string.encode('utf8')
return html_string
def gather_refs(self, dom):
"""Collect references."""
grParser = GatherRefs(useModule=self._useModule)
grParser._as = self._as
grParser._modFunct = self._modFunct
refs = grParser.parse_dom(dom)
refs = grParser.postprocess_data(refs)
self._namesRefs = refs['names refs']
self._titlesRefs = refs['titles refs']
self._charactersRefs = refs['characters refs']
def preprocess_dom(self, dom):
"""Last chance to modify the dom, before the rules in self.extractors
are applied by the parse_dom method."""
return dom
def parse_dom(self, dom):
"""Parse the given dom according to the rules specified
in self.extractors."""
result = {}
for extractor in self.extractors:
##print extractor.label
if extractor.group is None:
elements = [(extractor.label, element)
for element in self.xpath(dom, extractor.path)]
else:
groups = self.xpath(dom, extractor.group)
elements = []
for group in groups:
group_key = self.xpath(group, extractor.group_key)
if not group_key: continue
group_key = group_key[0]
# XXX: always tries the conversion to unicode:
# BeautifulSoup.NavigableString is a subclass
# of unicode, and so it's never converted.
group_key = self.tostring(group_key)
normalizer = extractor.group_key_normalize
if normalizer is not None:
if callable(normalizer):
try:
group_key = normalizer(group_key)
except Exception, e:
_m = '%s: unable to apply group_key normalizer'
self._logger.error(_m, self._cname,
exc_info=True)
group_elements = self.xpath(group, extractor.path)
elements.extend([(group_key, element)
for element in group_elements])
for group_key, element in elements:
for attr in extractor.attrs:
if isinstance(attr.path, dict):
data = {}
for field in attr.path.keys():
path = attr.path[field]
value = self.xpath(element, path)
if not value:
data[field] = None
else:
# XXX: use u'' , to join?
data[field] = ''.join(value)
else:
data = self.xpath(element, attr.path)
if not data:
data = None
else:
data = attr.joiner.join(data)
if not data:
continue
attr_postprocess = attr.postprocess
if callable(attr_postprocess):
try:
data = attr_postprocess(data)
except Exception, e:
_m = '%s: unable to apply attr postprocess'
self._logger.error(_m, self._cname, exc_info=True)
key = attr.key
if key is None:
key = group_key
elif key.startswith('.'):
# assuming this is an xpath
try:
key = self.xpath(element, key)[0]
except IndexError:
self._logger.error('%s: XPath returned no items',
self._cname, exc_info=True)
elif key.startswith('self.'):
key = getattr(self, key[5:])
if attr.multi:
if key not in result:
result[key] = []
result[key].append(data)
else:
if isinstance(data, dict):
result.update(data)
else:
result[key] = data
return result
def postprocess_data(self, data):
"""Here we can modify the data."""
return data
def set_objects_params(self, data):
"""Set parameters of Movie/Person/... instances, since they are
not always set in the parser's code."""
for obj in flatten(data, yieldDictKeys=True, scalar=_Container):
obj.accessSystem = self._as
obj.modFunct = self._modFunct
def add_refs(self, data):
"""Modify data according to the expected output."""
if self.getRefs:
titl_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._titlesRefs.keys()])
if titl_re != ur'()': re_titles = re.compile(titl_re, re.U)
else: re_titles = None
nam_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._namesRefs.keys()])
if nam_re != ur'()': re_names = re.compile(nam_re, re.U)
else: re_names = None
chr_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._charactersRefs.keys()])
if chr_re != ur'()': re_characters = re.compile(chr_re, re.U)
else: re_characters = None
_putRefs(data, re_titles, re_names, re_characters)
return {'data': data, 'titlesRefs': self._titlesRefs,
'namesRefs': self._namesRefs,
'charactersRefs': self._charactersRefs}
class Extractor(object):
"""Instruct the DOM parser about how to parse a document."""
def __init__(self, label, path, attrs, group=None, group_key=None,
group_key_normalize=None):
"""Initialize an Extractor object, used to instruct the DOM parser
about how to parse a document."""
# rarely (never?) used, mostly for debugging purposes.
self.label = label
self.group = group
if group_key is None:
self.group_key = ".//text()"
else:
self.group_key = group_key
self.group_key_normalize = group_key_normalize
self.path = path
# A list of attributes to fetch.
if isinstance(attrs, Attribute):
attrs = [attrs]
self.attrs = attrs
def __repr__(self):
"""String representation of an Extractor object."""
r = '<Extractor id:%s (label=%s, path=%s, attrs=%s, group=%s, ' \
'group_key=%s group_key_normalize=%s)>' % (id(self),
self.label, self.path, repr(self.attrs), self.group,
self.group_key, self.group_key_normalize)
return r
class Attribute(object):
"""The attribute to consider, for a given node."""
def __init__(self, key, multi=False, path=None, joiner=None,
postprocess=None):
"""Initialize an Attribute object, used to specify the
attribute to consider, for a given node."""
# The key under which information will be saved; can be a string or an
# XPath. If None, the label of the containing extractor will be used.
self.key = key
self.multi = multi
self.path = path
if joiner is None:
joiner = ''
self.joiner = joiner
# Post-process this set of information.
self.postprocess = postprocess
def __repr__(self):
"""String representation of an Attribute object."""
r = '<Attribute id:%s (key=%s, multi=%s, path=%s, joiner=%s, ' \
'postprocess=%s)>' % (id(self), self.key,
self.multi, repr(self.path),
self.joiner, repr(self.postprocess))
return r
def _parse_ref(text, link, info):
"""Manage links to references."""
if link.find('/title/tt') != -1:
yearK = re_yearKind_index.match(info)
if yearK and yearK.start() == 0:
text += ' %s' % info[:yearK.end()]
return (text.replace('\n', ' '), link)
class GatherRefs(DOMParserBase):
"""Parser used to gather references to movies, persons and characters."""
_attrs = [Attribute(key=None, multi=True,
path={
'text': './text()',
'link': './@href',
'info': './following::text()[1]'
},
postprocess=lambda x: _parse_ref(x.get('text') or u'', x.get('link') or '',
(x.get('info') or u'').strip()))]
extractors = [
Extractor(label='names refs',
path="//a[starts-with(@href, '/name/nm')][string-length(@href)=16]",
attrs=_attrs),
Extractor(label='titles refs',
path="//a[starts-with(@href, '/title/tt')]" \
"[string-length(@href)=17]",
attrs=_attrs),
Extractor(label='characters refs',
path="//a[starts-with(@href, '/character/ch')]" \
"[string-length(@href)=21]",
attrs=_attrs),
]
def postprocess_data(self, data):
result = {}
for item in ('names refs', 'titles refs', 'characters refs'):
result[item] = {}
for k, v in data.get(item, []):
k = k.strip()
v = v.strip()
if not (k and v):
continue
if not v.endswith('/'): continue
imdbID = analyze_imdbid(v)
if item == 'names refs':
obj = Person(personID=imdbID, name=k,
accessSystem=self._as, modFunct=self._modFunct)
elif item == 'titles refs':
obj = Movie(movieID=imdbID, title=k,
accessSystem=self._as, modFunct=self._modFunct)
else:
obj = Character(characterID=imdbID, name=k,
accessSystem=self._as, modFunct=self._modFunct)
# XXX: companies aren't handled: are they ever found in text,
# as links to their page?
result[item][k] = obj
return result
def add_refs(self, data):
return data
| gpl-3.0 |
arne-cl/pattern | pattern/text/en/modality.py | 21 | 21985 | #### PATTERN | EN | MOOD & MODALITY ################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
### LIST FUNCTIONS #################################################################################
def find(function, list):
""" Returns the first item in the list for which function(item) is True, None otherwise.
"""
for item in list:
if function(item) == True:
return item
### MOOD ###########################################################################################
# Functions take Sentence objects, see pattern.text.tree.Sentence and pattern.text.parsetree().
INDICATIVE = "indicative" # They went for a walk.
IMPERATIVE = "imperative" # Let's go for a walk!
CONDITIONAL = "conditional" # It might be nice to go for a walk when it stops raining.
SUBJUNCTIVE = "subjunctive" # It would be nice to go for a walk sometime.
def s(word):
return word.string.lower()
def join(words):
return " ".join([w.string.lower() for w in words])
def question(sentence):
return len(sentence) > 0 and sentence[-1].string == "?"
def verb(word):
return word.type.startswith(("VB","MD")) and (word.chunk is None or word.chunk.type.endswith("VP"))
def verbs(sentence, i=0, j=None):
return [w for w in sentence[i:j or len(sentence)] if verb(w)]
def imperative(sentence, **kwargs):
""" The imperative mood is used to give orders, commands, warnings, instructions,
or to make requests (if used with "please").
It is marked by the infinitive form of the verb, without "to":
"For goodness sake, just stop it!"
"""
S = sentence
if not (hasattr(S, "words") and hasattr(S, "parse_token")):
raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__))
if question(S):
return False
if S.subjects and s(S.subjects[0]) not in ("you", "yourself"):
# The subject can only identify as "you" (2sg): "Control yourself!".
return False
r = s(S).rstrip(" .!")
for cc in ("if", "assuming", "provided that", "given that"):
# A conjunction can also indicate conditional mood.
if cc+" " in r:
return False
for i, w in enumerate(S):
if verb(w):
if s(w) in ("do", "let") and w == verbs(S)[0]:
# "Do your homework!"
return True
if s(w) in ("do", "let"):
# "Let's not argue."
continue
if s(w) in ("would", "should", "'d", "could", "can", "may", "might"):
# "You should leave." => conditional.
return False
if s(w) in ("will", "shall") and i > 0 and s(S[i-1]) == "you" and not verbs(S,0,i):
# "You will eat your dinner."
continue
if w.type == "VB" and (i == 0 or s(S[i-1]) != "to"):
# "Come here!"
return True
# Break on any other verb form.
return False
return False
#from __init__ import parse, Sentence
#
#for str in (
# "Do your homework!", # True
# "Do whatever you want.", # True
# "Do not listen to me.", # True
# "Do it if you think it is necessary.", # False
# "Turn that off, will you.", # True
# "Let's help him.", # True
# "Help me!", # True
# "You will help me.", # True
# "I hope you will help me.", # False
# "I can help you.", # False
# "I can help you if you let me."): # False
# print str
# print parse(str)
# print imperative(Sentence(parse(str)))
# print
def conditional(sentence, predictive=True, **kwargs):
""" The conditional mood is used to talk about possible or imaginary situations.
It is marked by the infinitive form of the verb, preceded by would/could/should:
"we should be going", "we could have stayed longer".
With predictive=False, sentences with will/shall need an explicit if/when/once-clause:
- "I will help you" => predictive.
- "I will help you if you pay me" => speculative.
Sentences with can/may always need an explicit if-clause.
"""
S = sentence
if not (hasattr(S, "words") and hasattr(S, "parse_token")):
raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__))
if question(S):
return False
i = find(lambda w: s(w) == "were", S)
i = i and i.index or 0
if i > 0 and (s(S[i-1]) in ("i", "it", "he", "she") or S[i-1].type == "NN"):
# "As if it were summer already." => subjunctive (wish).
return False
for i, w in enumerate(S):
if w.type == "MD":
if s(w) == "ought" and i < len(S) and s(S[i+1]) == "to":
# "I ought to help you."
return True
if s(w) in ("would", "should", "'d", "could", "might"):
# "I could help you."
return True
if s(w) in ("will", "shall", "'ll") and i > 0 and s(S[i-1]) == "you" and not verbs(S,0,i):
# "You will help me." => imperative.
return False
if s(w) in ("will", "shall", "'ll") and predictive:
# "I will help you." => predictive.
return True
if s(w) in ("will", "shall", "'ll", "can", "may"):
# "I will help you when I get back." => speculative.
r = s(S).rstrip(" .!")
for cc in ("if", "when", "once", "as soon as", "assuming", "provided that", "given that"):
if cc+" " in r:
return True
return False
#from __init__ import parse, Sentence
#
#for str in (
# "We ought to help him.", # True
# "We could help him.", # True
# "I will help you.", # True
# "You will help me.", # False (imperative)
# "I hope you will help me.", # True (predictive)
# "I can help you.", # False
# "I can help you if you let me."): # True
# print str
# print parse(str)
# print conditional(Sentence(parse(str)))
# print
subjunctive1 = [
"advise", "ask", "command", "demand", "desire", "insist",
"propose", "recommend", "request", "suggest", "urge"]
subjunctive2 = [
"best", "crucial", "desirable", "essential", "imperative",
"important", "recommended", "urgent", "vital"]
for w in list(subjunctive1): # Inflect.
subjunctive1.append(w+"s")
subjunctive1.append(w.rstrip("e")+"ed")
def subjunctive(sentence, classical=True, **kwargs):
""" The subjunctive mood is a classical mood used to express a wish, judgment or opinion.
It is marked by the verb wish/were, or infinitive form of a verb
preceded by an "it is"-statement:
"It is recommended that he bring his own computer."
"""
S = sentence
if not (hasattr(S, "words") and hasattr(S, "parse_token")):
raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__))
if question(S):
return False
for i, w in enumerate(S):
b = False
if w.type.startswith("VB"):
if s(w).startswith("wish"):
# "I wish I knew."
return True
if s(w) == "hope" and i > 0 and s(S[i-1]) in ("i", "we"):
# "I hope ..."
return True
if s(w) == "were" and i > 0 and (s(S[i-1]) in ("i", "it", "he", "she") or S[i-1].type == "NN"):
# "It is as though she were here." => counterfactual.
return True
if s(w) in subjunctive1:
# "I propose that you be on time."
b = True
elif s(w) == "is" and 0 < i < len(S)-1 and s(S[i-1]) == "it" \
and s(S[i+1]) in subjunctive2:
# "It is important that you be there." => but you aren't (yet).
b = True
elif s(w) == "is" and 0 < i < len(S)-3 and s(S[i-1]) == "it" \
and s(S[i+2]) in ("good", "bad") and s(S[i+3]) == "idea":
# "It is a good idea that you be there."
b = True
if b:
# With classical=False, "It is important that you are there." passes.
# This is actually an informal error: it states a fact, not a wish.
v = find(lambda w: w.type.startswith("VB"), S[i+1:])
if v and classical is True and v and v.type == "VB":
return True
if v and classical is False:
return True
return False
#from __init__ import parse, Sentence
#
#for str in (
# "I wouldn't do that if I were you.", # True
# "I wish I knew.", # True
# "I propose that you be on time.", # True
# "It is a bad idea to be late.", # True
# "I will be dead."): # False, predictive
# print str
# print parse(str)
# print subjunctive(Sentence(parse(str)))
# print
def negated(sentence, negative=("not", "n't", "never")):
if hasattr(sentence, "string"):
# Sentence object => string.
sentence = sentence.string
S = " %s " % (sentence).strip(".?!").lower()
for w in negative:
if " %s " % w in S:
return True
return False
def mood(sentence, **kwargs):
""" Returns IMPERATIVE (command), CONDITIONAL (possibility), SUBJUNCTIVE (wish) or INDICATIVE (fact).
"""
if isinstance(sentence, basestring):
try:
# A Sentence is expected but a string given.
# Attempt to parse the string on-the-fly.
from pattern.en import parse, Sentence
sentence = Sentence(parse(sentence))
except ImportError:
pass
if imperative(sentence, **kwargs):
return IMPERATIVE
if conditional(sentence, **kwargs):
return CONDITIONAL
if subjunctive(sentence, **kwargs):
return SUBJUNCTIVE
else:
return INDICATIVE
### MODALITY #######################################################################################
# Functions take Sentence objects, see pattern.text.tree.Sentence and pattern.text.parsetree().
def d(*args):
return dict.fromkeys(args, True)
AUXILLARY = {
"be": ["be", "am", "m", "are", "is", "being", "was", "were" "been"],
"can": ["can", "ca", "could"],
"dare": ["dare", "dares", "daring", "dared"],
"do": ["do", "does", "doing", "did", "done"],
"have": ["have", "ve", "has", "having", "had"],
"may": ["may", "might"],
"must": ["must"],
"need": ["need", "needs", "needing", "needed"],
"ought": ["ought"],
"shall": ["shall", "sha"],
"will": ["will", "ll", "wo", "willing", "would", "d"]
}
MODIFIERS = ("fully", "highly", "most", "much", "strongly", "very")
EPISTEMIC = "epistemic" # Expresses degree of possiblity.
# -1.00 = NEGATIVE
# -0.75 = NEGATIVE, with slight doubts
# -0.50 = NEGATIVE, with doubts
# -0.25 = NEUTRAL, slightly negative
# +0.00 = NEUTRAL
# +0.25 = NEUTRAL, slightly positive
# +0.50 = POSITIVE, with doubts
# +0.75 = POSITIVE, with slight doubts
# +1.00 = POSITIVE
epistemic_MD = { # would => could => can => should => shall => will => must
-1.00: d(),
-0.75: d(),
-0.50: d("would"),
-0.25: d("could", "dare", "might"),
0.00: d("can", "ca", "may"),
+0.25: d("ought", "should"),
+0.50: d("shall", "sha"),
+0.75: d("will", "'ll", "wo"),
+1.00: d("have", "has", "must", "need"),
}
epistemic_VB = { # wish => feel => believe => seem => think => know => prove + THAT
-1.00: d(),
-0.75: d(),
-0.50: d("dispute", "disputed", "doubt", "question"),
-0.25: d("hope", "want", "wish"),
0.00: d("guess", "imagine", "seek"),
+0.25: d("appear", "bet", "feel", "hear", "rumor", "rumour", "say", "said", "seem", "seemed",
"sense", "speculate", "suspect", "suppose", "wager"),
+0.50: d("allude", "anticipate", "assume", "claim", "claimed", "believe", "believed",
"conjecture", "consider", "considered", "decide", "expect", "find", "found",
"hypothesize", "imply", "indicate", "infer", "postulate", "predict", "presume",
"propose", "report", "reported", "suggest", "suggested", "tend",
"think", "thought"),
+0.75: d("know", "known", "look", "see", "show", "shown"),
+1.00: d("certify", "demonstrate", "prove", "proven", "verify"),
}
epistemic_RB = { # unlikely => supposedly => maybe => probably => usually => clearly => definitely
-1.00: d("impossibly"),
-0.75: d("hardly"),
-0.50: d("presumptively", "rarely", "scarcely", "seldomly", "uncertainly", "unlikely"),
-0.25: d("almost", "allegedly", "debatably", "nearly", "presumably", "purportedly", "reportedly",
"reputedly", "rumoredly", "rumouredly", "supposedly"),
0.00: d("barely", "hypothetically", "maybe", "occasionally", "perhaps", "possibly", "putatively",
"sometimes", "sporadically", "traditionally", "widely"),
+0.25: d("admittedly", "apparently", "arguably", "believably", "conceivably", "feasibly", "fairly",
"hopefully", "likely", "ostensibly", "potentially", "probably", "quite", "seemingly"),
+0.50: d("commonly", "credibly", "defendably", "defensibly", "effectively", "frequently",
"generally", "largely", "mostly", "normally", "noticeably", "often", "plausibly",
"reasonably", "regularly", "relatively", "typically", "usually"),
+0.75: d("assuredly", "certainly", "clearly", "doubtless", "evidently", "evitably", "manifestly",
"necessarily", "nevertheless", "observably", "ostensively", "patently", "plainly",
"positively", "really", "surely", "truly", "undoubtably", "undoubtedly", "verifiably"),
+1.00: d("absolutely", "always", "definitely", "incontestably", "indisputably", "indubitably",
"ineluctably", "inescapably", "inevitably", "invariably", "obviously", "unarguably",
"unavoidably", "undeniably", "unquestionably")
}
epistemic_JJ = {
-1.00: d("absurd", "prepostoreous", "ridiculous"),
-0.75: d("inconceivable", "unthinkable"),
-0.50: d("misleading", "scant", "unlikely", "unreliable"),
-0.25: d("customer-centric", "doubtful", "ever", "ill-defined, ""inadequate", "late",
"uncertain", "unclear", "unrealistic", "unspecified", "unsure", "wild"),
0.00: d("dynamic", "possible", "unknown"),
+0.25: d("according", "creative", "likely", "local", "innovative", "interesting",
"potential", "probable", "several", "some", "talented", "viable"),
+0.50: d("certain", "generally", "many", "notable", "numerous", "performance-oriented",
"promising", "putative", "well-known"),
+0.75: d("concrete", "credible", "famous", "important", "major", "necessary", "original",
"positive", "significant", "real", "robust", "substantial", "sure"),
+1.00: d("confirmed", "definite", "prime", "undisputable"),
}
epistemic_NN = {
-1.00: d("fantasy", "fiction", "lie", "myth", "nonsense"),
-0.75: d("controversy"),
-0.50: d("criticism", "debate", "doubt"),
-0.25: d("belief", "chance", "faith", "luck", "perception", "speculation"),
0.00: d("challenge", "guess", "feeling", "hunch", "opinion", "possibility", "question"),
+0.25: d("assumption", "expectation", "hypothesis", "notion", "others", "team"),
+0.50: d("example", "proces", "theory"),
+0.75: d("conclusion", "data", "evidence", "majority", "proof", "symptom", "symptoms"),
+1.00: d("fact", "truth", "power"),
}
epistemic_CC_DT_IN = {
0.00: d("either", "whether"),
+0.25: d("however", "some"),
+1.00: d("despite")
}
epistemic_PRP = {
+0.25: d("I", "my"),
+0.50: d("our"),
+0.75: d("we")
}
epistemic_weaseling = {
-0.75: d("popular belief"),
-0.50: d("but that", "but this", "have sought", "might have", "seems to"),
-0.25: d("may also", "may be", "may have", "may have been", "some have", "sort of"),
+0.00: d("been argued", "believed to", "considered to", "claimed to", "is considered", "is possible",
"overall solutions", "regarded as", "said to"),
+0.25: d("a number of", "in some", "one of", "some of",
"many modern", "many people", "most people", "some people", "some cases", "some studies",
"scientists", "researchers"),
+0.50: d("in several", "is likely", "many of", "many other", "of many", "of the most", "such as",
"several reasons", "several studies", "several universities", "wide range"),
+0.75: d("almost always", "and many", "and some", "around the world", "by many", "in many", "in order to",
"most likely"),
+1.00: d("i.e.", "'s most", "of course", "There are", "without doubt"),
}
def modality(sentence, type=EPISTEMIC):
""" Returns the sentence's modality as a weight between -1.0 and +1.0.
Currently, the only type implemented is EPISTEMIC.
Epistemic modality is used to express possibility (i.e. how truthful is what is being said).
"""
if isinstance(sentence, basestring):
try:
# A Sentence is expected but a string given.
# Attempt to parse the string on-the-fly.
from pattern.en import parse, Sentence
sentence = Sentence(parse(sentence))
except ImportError:
pass
S, n, m = sentence, 0.0, 0
if not (hasattr(S, "words") and hasattr(S, "parse_token")):
raise TypeError("%s object is not a parsed Sentence" % repr(S.__class__.__name__))
if type == EPISTEMIC:
r = S.string.rstrip(" .!")
for k, v in epistemic_weaseling.items():
for phrase in v:
if phrase in r:
n += k
m += 2
for i, w in enumerate(S.words):
for type, dict, weight in (
( "MD", epistemic_MD, 4),
( "VB", epistemic_VB, 2),
( "RB", epistemic_RB, 2),
( "JJ", epistemic_JJ, 1),
( "NN", epistemic_NN, 1),
( "CC", epistemic_CC_DT_IN, 1),
( "DT", epistemic_CC_DT_IN, 1),
( "IN", epistemic_CC_DT_IN, 1),
("PRP" , epistemic_PRP, 1),
("PRP$", epistemic_PRP, 1),
( "WP" , epistemic_PRP, 1)):
# "likely" => weight 1, "very likely" => weight 2
if i > 0 and s(S[i-1]) in MODIFIERS:
weight += 1
# likely" => score 0.25 (neutral inclining towards positive).
if w.type and w.type.startswith(type):
for k, v in dict.items():
# Prefer lemmata.
if (w.lemma or s(w)) in v:
# Reverse score for negated terms.
if i > 0 and s(S[i-1]) in ("not", "n't", "never", "without"):
k = -k * 0.5
n += weight * k
m += weight
break
# Numbers, citations, explanations make the sentence more factual.
if w.type in ("CD", "\"", "'", ":", "("):
n += 0.75
m += 1
if m == 0:
return 1.0 # No modal verbs/adverbs used, so statement must be true.
return max(-1.0, min(n / (m or 1), +1.0))
def uncertain(sentence, threshold=0.5):
return modality(sentence) <= threshold
#from __init__ import parse, Sentence
#
#for str in (
# "I wish it would stop raining.",
# "It will surely stop raining soon."):
# print str
# print parse(str)
# print modality(Sentence(parse(str)))
# print
#---------------------------------------------------------------------------------------------------
# Celle, A. (2009). Hearsay adverbs and modality, in: Modality in English, Mouton.
# Allegedly, presumably, purportedly, ... are in the negative range because
# they introduce a fictious point of view by referring to an unclear source.
#---------------------------------------------------------------------------------------------------
# Tseronis, A. (2009). Qualifying standpoints. LOT Dissertation Series: 233.
# Following adverbs are not epistemic but indicate the way in which things are said.
# 1) actually, admittedly, avowedly, basically, bluntly, briefly, broadly, candidly,
# confidentially, factually, figuratively, frankly, generally, honestly, hypothetically,
# in effect, in fact, in reality, indeed, literally, metaphorically, naturally,
# of course, objectively, personally, really, roughly, seriously, simply, sincerely,
# strictly, truly, truthfully.
# 2) bizarrely, commendably, conveniently, curiously, disappointingly, fortunately, funnily,
# happily, hopefully, illogically, interestingly, ironically, justifiably, justly, luckily,
# oddly, paradoxically, preferably, regretfully, regrettably, sadly, significantly,
# strangely, surprisingly, tragically, unaccountably, unfortunately, unhappily unreasonably
#---------------------------------------------------------------------------------------------------
# The modality() function was tested with BioScope and Wikipedia training data from CoNLL2010 Shared Task 1.
# See for example Morante, R., Van Asch, V., Daelemans, W. (2010):
# Memory-Based Resolution of In-Sentence Scopes of Hedge Cues
# http://www.aclweb.org/anthology/W/W10/W10-3006.pdf
# Sentences in the training corpus are labelled as "certain" or "uncertain".
# For Wikipedia sentences, 2000 "certain" and 2000 "uncertain":
# modality(sentence) > 0.5 => A 0.70 P 0.73 R 0.64 F1 0.68 | bsd-3-clause |
dennybaa/st2 | st2client/st2client/utils/date.py | 7 | 1586 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dateutil.tz
import dateutil.parser
__all__ = [
'parse',
'format_isodate'
]
def add_utc_tz(dt):
return dt.replace(tzinfo=dateutil.tz.tzutc())
def parse(value):
dt = dateutil.parser.parse(str(value))
# pylint: disable=no-member
# For some reason pylint thinks it returns a tuple but it returns a datetime object
return dt if dt.tzinfo else add_utc_tz(dt)
def format_isodate(value):
"""
Make a ISO date time string human friendly.
:type value: ``str``
:rtype: ``str``
"""
if not value:
return ''
# pylint: disable=no-member
# For some reason pylint thinks it returns a tuple but it returns a datetime object
date = dateutil.parser.parse(str(value))
value = date.strftime('%a, %d %b %Y %H:%M:%S %Z')
return value
| apache-2.0 |
sipwise/repoapi | repoapi/wsgi.py | 1 | 1088 | # Copyright (C) 2015 The Sipwise Team - http://sipwise.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""
WSGI config for repoapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "repoapi.settings.prod")
application = get_wsgi_application()
| gpl-3.0 |
mindnervestech/mnrp | addons/hr_timesheet_invoice/report/report_analytic.py | 299 | 5164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
from openerp.addons.decimal_precision import decimal_precision as dp
class report_analytic_account_close(osv.osv):
_name = "report.analytic.account.close"
_description = "Analytic account to close"
_auto = False
_columns = {
'name': fields.many2one('account.analytic.account', 'Analytic account', readonly=True),
'state': fields.char('Status', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'quantity_max': fields.float('Max. Quantity', readonly=True),
'balance': fields.float('Balance', readonly=True),
'date_deadline': fields.date('Deadline', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_analytic_account_close')
cr.execute("""
create or replace view report_analytic_account_close as (
select
a.id as id,
a.id as name,
a.state as state,
sum(l.unit_amount) as quantity,
sum(l.amount) as balance,
a.partner_id as partner_id,
a.quantity_max as quantity_max,
a.date as date_deadline
from
account_analytic_line l
right join
account_analytic_account a on (l.account_id=a.id)
group by
a.id,a.state, a.quantity_max,a.date,a.partner_id
having
(a.quantity_max>0 and (sum(l.unit_amount)>=a.quantity_max)) or
a.date <= current_date
)""")
class report_account_analytic_line_to_invoice(osv.osv):
_name = "report.account.analytic.line.to.invoice"
_description = "Analytic lines to invoice report"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic account', readonly=True),
'product_uom_id':fields.many2one('product.uom', 'Unit of Measure', readonly=True),
'unit_amount': fields.float('Units', readonly=True),
'sale_price': fields.float('Sale price', readonly=True, digits_compute=dp.get_precision('Product Price')),
'amount': fields.float('Amount', readonly=True, digits_compute=dp.get_precision('Account')),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc, product_id asc, account_id asc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_analytic_line_to_invoice')
cr.execute("""
CREATE OR REPLACE VIEW report_account_analytic_line_to_invoice AS (
SELECT
DISTINCT(to_char(l.date,'MM')) as month,
to_char(l.date, 'YYYY') as name,
MIN(l.id) AS id,
l.product_id,
l.account_id,
SUM(l.amount) AS amount,
SUM(l.unit_amount*t.list_price) AS sale_price,
SUM(l.unit_amount) AS unit_amount,
l.product_uom_id
FROM
account_analytic_line l
left join
product_product p on (l.product_id=p.id)
left join
product_template t on (p.product_tmpl_id=t.id)
WHERE
(invoice_id IS NULL) and (to_invoice IS NOT NULL)
GROUP BY
to_char(l.date, 'YYYY'), to_char(l.date,'MM'), product_id, product_uom_id, account_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ozburo/youtube-dl | youtube_dl/extractor/tf1.py | 3 | 3073 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
try_get,
)
class TF1IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tf1\.fr/[^/]+/(?P<program_slug>[^/]+)/videos/(?P<id>[^/?&#]+)\.html'
_TESTS = [{
'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html',
'info_dict': {
'id': '13641379',
'ext': 'mp4',
'title': 'md5:f392bc52245dc5ad43771650c96fb620',
'description': 'md5:a02cdb217141fb2d469d6216339b052f',
'upload_date': '20190611',
'timestamp': 1560273989,
'duration': 1738,
'series': 'Quotidien avec Yann Barthès',
'tags': ['intégrale', 'quotidien', 'Replay'],
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
'format': 'bestvideo',
},
}, {
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
'only_matching': True,
}, {
'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
'only_matching': True,
}]
def _real_extract(self, url):
program_slug, slug = re.match(self._VALID_URL, url).groups()
video = self._download_json(
'https://www.tf1.fr/graphql/web', slug, query={
'id': '9b80783950b85247541dd1d851f9cc7fa36574af015621f853ab111a679ce26f',
'variables': json.dumps({
'programSlug': program_slug,
'slug': slug,
})
})['data']['videoBySlug']
wat_id = video['streamId']
tags = []
for tag in (video.get('tags') or []):
label = tag.get('label')
if not label:
continue
tags.append(label)
decoration = video.get('decoration') or {}
thumbnails = []
for source in (try_get(decoration, lambda x: x['image']['sources'], list) or []):
source_url = source.get('url')
if not source_url:
continue
thumbnails.append({
'url': source_url,
'width': int_or_none(source.get('width')),
})
return {
'_type': 'url_transparent',
'id': wat_id,
'url': 'wat:' + wat_id,
'title': video.get('title'),
'thumbnails': thumbnails,
'description': decoration.get('description'),
'timestamp': parse_iso8601(video.get('date')),
'duration': int_or_none(try_get(video, lambda x: x['publicPlayingInfos']['duration'])),
'tags': tags,
'series': decoration.get('programLabel'),
'season_number': int_or_none(video.get('season')),
'episode_number': int_or_none(video.get('episode')),
}
| unlicense |
sam-roth/Keypad | keypad/plugins/shell/bourne_model.py | 1 | 4068 | import subprocess
import shlex
from keypad.api import (Plugin,
register_plugin,
Filetype,
Cursor)
from keypad.abstract.code import IndentRetainingCodeModel, AbstractCompletionResults
from keypad.core.syntaxlib import SyntaxHighlighter, lazy
from keypad.core.processmgr.client import AsyncServerProxy
from keypad.core.fuzzy import FuzzyMatcher
from keypad.core.executors import future_wrap
from keypad.core.attributed_string import AttributedString
@lazy
def lexer():
from . import bourne_lexer
return bourne_lexer.Shell
class GetManPage:
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, ns):
with subprocess.Popen(['man', self.cmd], stdout=subprocess.PIPE) as proc:
out, _ = proc.communicate()
import re
return [re.subn('.\x08', '', out.decode())[0]]
class ShellCompletionResults(AbstractCompletionResults):
def __init__(self, token_start, results, prox):
'''
token_start - the (line, col) position at which the token being completed starts
'''
super().__init__(token_start)
self.results = [(AttributedString(x.decode()),) for x in results]
self._prox = prox
def doc_async(self, index):
'''
Return a Future for the documentation for a given completion result as a list of
AttributedString.
'''
return self._prox.submit(GetManPage(self.text(index)))
@property
def rows(self):
'''
Return a list of tuples of AttributedString containing the contents of
each column for each row in the completion results.
'''
return self._filtered.rows
def text(self, index):
'''
Return the text that should be inserted for the given completion.
'''
return self._filtered.rows[index][0].text
def filter(self, text=''):
'''
Filter the completion results using the given text.
'''
self._filtered = FuzzyMatcher(text).filter(self.results, key=lambda x: x[0].text)
self._filtered.sort(lambda item: len(item[0].text))
def dispose(self):
pass
class GetPathItems:
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, ns):
with subprocess.Popen(['bash',
'-c',
'compgen -c ' + shlex.quote(self.prefix)],
stdout=subprocess.PIPE) as proc:
out, _ = proc.communicate()
return [l.strip() for l in out.splitlines()]
class BourneCodeModel(IndentRetainingCodeModel):
completion_triggers = []
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._prox = AsyncServerProxy()
self._prox.start()
def dispose(self):
self._prox.shutdown()
super().dispose()
def highlight(self):
'''
Rehighlight the buffer.
'''
highlighter = SyntaxHighlighter(
'keypad.plugins.shell.syntax',
lexer(),
dict(lexcat=None)
)
highlighter.highlight_buffer(self.buffer)
def completions_async(self, pos):
'''
Return a future to the completions available at the given position in the document.
Raise NotImplementedError if not implemented.
'''
c = Cursor(self.buffer).move(pos)
text_to_pos = c.line.text[:c.x]
for x, ch in reversed(list(enumerate(text_to_pos))):
if ch.isspace():
x += 1
break
else:
x = 0
print('text_to_pos', text_to_pos[x:], pos)
return self._prox.submit(GetPathItems(text_to_pos[x:]),
transform=lambda r: ShellCompletionResults((pos[0], x), r,
self._prox))
| gpl-3.0 |
ehashman/oh-mainline | vendor/packages/docutils/docutils/transforms/peps.py | 122 | 11065 | # $Id: peps.py 6433 2010-09-28 08:21:25Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils, languages
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d'
pep_cvs_url = ('http://svn.python.org/view/*checkout*'
'/peps/trunk/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.replace_self(mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('[email protected]',
'[email protected]',
'[email protected]')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref
| agpl-3.0 |
dmvieira/P.O.D. | func.py | 1 | 5799 | from mergesort import *
def comeca(sequencia,entrada,entrada2,entrada3):
div=open(entrada3,'w')
t=open(entrada,'r')
saida=open(entrada2,'w')
x=t.readlines()
if (x[-1][-1])<>'\n':
comp=x[-1][-1]
comp=comp+'\n'
x.insert(-1,comp)
comp=x[-1]
comp=comp+'\n'
del(x[-1])
x.insert(-1,comp)
del(x[-1])
l=[]
b=0
t.close()
if sequencia=='r':
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'u' or x[j][i] == 'U' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N':
y=y+x[j][i]
c=c+y
l.append(c)
elif sequencia=='p':
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'L' or x[j][i] == 'l' or x[j][i] == 'I' or x[j][i] == 'i' or x[j][i] == 'S' or x[j][i] == 's' or x[j][i] == 'T' or x[j][i] == 't' or x[j][i] == 'Y' or x[j][i] == 'y' or x[j][i] == 'M' or x[j][i] == 'm' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'n' or x[j][i] == 'N' or x[j][i] == 'E' or x[j][i] == 'e' or x[j][i] == 'Q' or x[j][i] == 'q' or x[j][i] == 'R' or x[j][i] == 'r' or x[j][i] == 'K' or x[j][i] == 'k' or x[j][i] == 'H' or x[j][i] == 'h' or x[j][i] == 'F' or x[j][i] == 'f' or x[j][i] == 'W' or x[j][i] == 'w' or x[j][i] == 'P' or x[j][i] == 'p' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'z' or x[j][i] == 'Z' or x[j][i] == 'x' or x[j][i] == 'X' or x[j][i] == 'u' or x[j][i] == 'U':
y=y+x[j][i]
c=c+y
l.append(c)
else:
for j in range(0,len(x)):
k=len(x[j])
if x[j][0]=='>':
if b==1:
l.append(c)
l.append(x[j][:k-1])
c=""
b=1
else:
y=""
for i in range(0,k-1):
if x[j][i] == 'a' or x[j][i] == 'A' or x[j][i] == 'c' or x[j][i] == 'C' or x[j][i] == 'g' or x[j][i] == 'G' or x[j][i] == 't' or x[j][i] == 'T' or x[j][i] == 'r' or x[j][i] == 'R' or x[j][i] == 'y' or x[j][i] == 'Y' or x[j][i] == 'k' or x[j][i] == 'K' or x[j][i] == 'm' or x[j][i] == 'M' or x[j][i] == 's' or x[j][i] == 'S' or x[j][i] == 'w' or x[j][i] == 'W' or x[j][i] == 'b' or x[j][i] == 'B' or x[j][i] == 'd' or x[j][i] == 'D' or x[j][i] == 'h' or x[j][i] == 'H' or x[j][i] == 'v' or x[j][i] == 'V' or x[j][i] == 'n' or x[j][i] == 'N':
y=y+x[j][i]
c=c+y
l.append(c)
dec,dic={},{}
for j in range(0,len(l),2):
alta=(l[j+1]).upper()
del(l[j+1])
l.insert(j+1,alta)
if (dic.has_key((l[j+1][::-1])))==True:
del(l[j+1])
l.insert((j+1),alta[::-1])
d={l[j]:l[j+1]}
dec.update(d)
d={l[j+1]:l[j]}
dic.update(d)
vou=dic.keys()
v=dec.values()
diversidade=[]
dic={}
for j in range(0,len(l),2):
alta=(l[j+1])
divo=(len(alta))/65
if divo > 0:
alta2=''
for h in range(1,divo+1):
alta2=alta2+alta[(65*(h-1)):(65*h)]+'\n'
alta=alta2+alta[65*divo:]
del(l[j+1])
l.insert(j+1,alta)
d= {alta:l[j]}
dic.update(d)
key=dic.keys()
value=dic.values()
for j in range(len(key)):
saida.write(value[j]+'\n'+key[j]+'\n')
diversidade.append((v.count(vou[j])))
saida.close()
ordena(diversidade, value, key, div)
div.close()
| gpl-3.0 |
tectronics/huhamhire-hosts | doc/dev/conf.py | 23 | 8175 | # -*- coding: utf-8 -*-
#
# huhamhire-hosts documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 14 10:49:55 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('../../'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hosts Setup Utility'
copyright = u'2011-2014, huhamhire-hosts team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9.8'
# The full version, including alpha/beta/rc tags.
release = '1.9.8 beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#exclude_patterns = []
#unused_docs = ["gpl"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
"stickysidebar": True,
"collapsiblesidebar": False,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'huhamhire-hostsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
'inputenc': '',
'utf8extra': '',
'preamble': '''
\\hypersetup{unicode=true}
\\usepackage{CJKutf8}
\\AtBeginDocument{\\begin{CJK}{UTF8}{}}
\\AtEndDocument{\\end{CJK}}
''',
'papersize': 'a4paper',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'huhamhire-hosts.tex', u'Hosts Setup Utility Documentation',
u'huhamhire-hosts team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = True
# If true, show URL addresses after external links.
latex_show_urls = True
# Documents to append as an appendix to all manuals.
latex_appendices = ['gpl']
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'huhamhire-hosts', u'Hosts Setup Utility Documentation',
[u'huhamhire-hosts team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'huhamhire-hosts', u'Hosts Setup Utility Documentation',
u'huhamhire-hosts team', 'huhamhire-hosts',
'Easy managing hosts file.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
cdriehuys/chmvh-website | chmvh_website/contact/forms.py | 1 | 2333 | import logging
from smtplib import SMTPException
from captcha.fields import ReCaptchaField
from django import forms
from django.conf import settings
from django.core import mail
from django.template import loader
logger = logging.getLogger("chmvh_website.{0}".format(__name__))
class ContactForm(forms.Form):
captcha = ReCaptchaField()
name = forms.CharField()
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea(attrs={"rows": 5}))
street_address = forms.CharField(required=False)
city = forms.CharField(required=False)
zipcode = forms.CharField(required=False)
template = loader.get_template("contact/email/message.txt")
def clean_city(self):
"""
If no city was provided, use a default string.
"""
if not self.cleaned_data["city"]:
return "<No City Given>"
return self.cleaned_data["city"]
def send_email(self):
assert self.is_valid(), self.errors
subject = "[CHMVH Website] Message from {}".format(
self.cleaned_data["name"]
)
address_line_2_parts = [self.cleaned_data["city"], "North Carolina"]
if self.cleaned_data["zipcode"]:
address_line_2_parts.append(self.cleaned_data["zipcode"])
address_line_1 = self.cleaned_data["street_address"]
address_line_2 = ", ".join(address_line_2_parts)
address = ""
if address_line_1:
address = "\n".join([address_line_1, address_line_2])
context = {
"name": self.cleaned_data["name"],
"email": self.cleaned_data["email"],
"message": self.cleaned_data["message"],
"address": address,
}
logger.debug("Preparing to send email")
try:
emails_sent = mail.send_mail(
subject,
self.template.render(context),
settings.DEFAULT_FROM_EMAIL,
["[email protected]"],
)
logger.info(
"Succesfully sent email from {0}".format(
self.cleaned_data["email"]
)
)
except SMTPException as e:
emails_sent = 0
logger.exception("Failed to send email.", exc_info=e)
return emails_sent == 1
| mit |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/bx/intervals/operations/concat.py | 7 | 2623 | """
Concatenate sets of intervals.
Preserves format of the first input -- it is possible to concat two files that
have different column orders. Of course, the meta-data of the second will be
lost (and filled with a "."). If all of the files (GenomicInteralReaders) are
the same format, sameformat=True will preserve all columns of the first input,
cuts extra columns on subsequent input, and pads missing columns. If
sameformat=False then extra columns are filled with ".".
"""
import psyco_full
import traceback
import fileinput
from warnings import warn
from bx.intervals.io import *
from bx.intervals.operations import *
def concat(readers, comments=True, header=True, sameformat=True):
# Save columns from the first input
chrom_col = readers[0].chrom_col
start_col = readers[0].start_col
end_col = readers[0].end_col
strand_col = readers[0].strand_col
nfields = None
firstdataset = True
output = False
for intervals in readers:
for interval in intervals:
if isinstance(interval, GenomicInterval):
if not nfields: nfields = interval.nfields
out_interval = interval.copy()
if sameformat or firstdataset:
# everything except the first input has to be
# trimmed or padded to match the first input
if len(out_interval.fields) > nfields:
out_interval.fields = out_interval.fields[0:nfields]
while len(out_interval.fields) < nfields:
out_interval.fields.append(".")
output = True
yield out_interval
else:
chrom = out_interval.chrom
start = out_interval.start
end = out_interval.end
strand = out_interval.strand
out_interval.fields = ["." for col in range(nfields)]
out_interval.fields[chrom_col] = chrom
out_interval.fields[start_col] = str(start)
out_interval.fields[end_col] = str(end)
# Strand is optional, might not exist in output
if strand_col < len( out_interval.fields ):
out_interval.fields[strand_col] = strand
yield out_interval
elif isinstance(interval, Header) and header:
yield interval
elif isinstance(interval, Comment) and comments:
yield interval
if output and firstdataset: firstdataset = False
| apache-2.0 |
blindpenguin/blackboard | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/flock_tool.py | 1835 | 1748 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| gpl-3.0 |
Kevindeving/android_kernel_lge_gee | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
mvaled/OpenUpgrade | openerp/addons/base/ir/ir_sequence.py | 83 | 14810 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
import openerp
from openerp.osv import osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class ir_sequence_type(openerp.osv.osv.osv):
_name = 'ir.sequence.type'
_order = 'name'
_columns = {
'name': openerp.osv.fields.char('Name', required=True),
'code': openerp.osv.fields.char('Code', size=32, required=True),
}
_sql_constraints = [
('code_unique', 'unique(code)', '`code` must be unique.'),
]
def _code_get(self, cr, uid, context=None):
cr.execute('select code, name from ir_sequence_type')
return cr.fetchall()
class ir_sequence(openerp.osv.osv.osv):
""" Sequence model.
The sequence model allows to define and use so-called sequence objects.
Such objects are used to generate unique identifiers in a transaction-safe
way.
"""
_name = 'ir.sequence'
_order = 'name'
def _get_number_next_actual(self, cr, user, ids, field_name, arg, context=None):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
res = dict.fromkeys(ids)
for element in self.browse(cr, user, ids, context=context):
if element.implementation != 'standard':
res[element.id] = element.number_next
else:
# get number from postgres sequence. Cannot use
# currval, because that might give an error when
# not having used nextval before.
statement = (
"SELECT last_value, increment_by, is_called"
" FROM ir_sequence_%03d"
% element.id)
cr.execute(statement)
(last_value, increment_by, is_called) = cr.fetchone()
if is_called:
res[element.id] = last_value + increment_by
else:
res[element.id] = last_value
return res
def _set_number_next_actual(self, cr, uid, id, name, value, args=None, context=None):
return self.write(cr, uid, id, {'number_next': value or 0}, context=context)
_columns = {
'name': openerp.osv.fields.char('Name', size=64, required=True),
'code': openerp.osv.fields.selection(_code_get, 'Sequence Type', size=64),
'implementation': openerp.osv.fields.selection( # TODO update the view
[('standard', 'Standard'), ('no_gap', 'No gap')],
'Implementation', required=True,
help="Two sequence object implementations are offered: Standard "
"and 'No gap'. The later is slower than the former but forbids any"
" gap in the sequence (while they are possible in the former)."),
'active': openerp.osv.fields.boolean('Active'),
'prefix': openerp.osv.fields.char('Prefix', help="Prefix value of the record for the sequence"),
'suffix': openerp.osv.fields.char('Suffix', help="Suffix value of the record for the sequence"),
'number_next': openerp.osv.fields.integer('Next Number', required=True, help="Next number of this sequence"),
'number_next_actual': openerp.osv.fields.function(_get_number_next_actual, fnct_inv=_set_number_next_actual, type='integer', required=True, string='Next Number', help='Next number that will be used. This number can be incremented frequently so the displayed value might already be obsolete'),
'number_increment': openerp.osv.fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"),
'padding' : openerp.osv.fields.integer('Number Padding', required=True, help="Odoo will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."),
'company_id': openerp.osv.fields.many2one('res.company', 'Company'),
}
_defaults = {
'implementation': 'standard',
'active': True,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c),
'number_increment': 1,
'number_next': 1,
'number_next_actual': 1,
'padding' : 0,
}
def init(self, cr):
return # Don't do the following index yet.
# CONSTRAINT/UNIQUE INDEX on (code, company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate (code, NULL) ir_sequences.
cr.execute("""
SELECT indexname FROM pg_indexes WHERE indexname =
'ir_sequence_unique_code_company_id_idx'""")
if not cr.fetchone():
cr.execute("""
CREATE UNIQUE INDEX ir_sequence_unique_code_company_id_idx
ON ir_sequence (code, (COALESCE(company_id,-1)))""")
def _create_sequence(self, cr, id, number_increment, number_next):
""" Create a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
sql = "CREATE SEQUENCE ir_sequence_%03d INCREMENT BY %%s START WITH %%s" % id
cr.execute(sql, (number_increment, number_next))
def _drop_sequence(self, cr, ids):
""" Drop the PostreSQL sequence if it exists.
There is no access rights check.
"""
ids = ids if isinstance(ids, (list, tuple)) else [ids]
assert all(isinstance(i, (int, long)) for i in ids), \
"Only ids in (int, long) allowed."
names = ','.join('ir_sequence_%03d' % i for i in ids)
# RESTRICT is the default; it prevents dropping the sequence if an
# object depends on it.
cr.execute("DROP SEQUENCE IF EXISTS %s RESTRICT " % names)
def _alter_sequence(self, cr, id, number_increment, number_next=None):
""" Alter a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
seq_name = 'ir_sequence_%03d' % (id,)
cr.execute("SELECT relname FROM pg_class WHERE relkind = %s AND relname=%s", ('S', seq_name))
if not cr.fetchone():
# sequence is not created yet, we're inside create() so ignore it, will be set later
return
statement = "ALTER SEQUENCE %s INCREMENT BY %d" % (seq_name, number_increment)
if number_next is not None:
statement += " RESTART WITH %d" % (number_next, )
cr.execute(statement)
def create(self, cr, uid, values, context=None):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
values = self._add_missing_default_values(cr, uid, values, context)
values['id'] = super(ir_sequence, self).create(cr, uid, values, context)
if values['implementation'] == 'standard':
self._create_sequence(cr, values['id'], values['number_increment'], values['number_next'])
return values['id']
def unlink(self, cr, uid, ids, context=None):
super(ir_sequence, self).unlink(cr, uid, ids, context)
self._drop_sequence(cr, ids)
return True
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids, (list, tuple)):
ids = [ids]
new_implementation = values.get('implementation')
rows = self.read(cr, uid, ids, ['implementation', 'number_increment', 'number_next'], context)
super(ir_sequence, self).write(cr, uid, ids, values, context)
for row in rows:
# 4 cases: we test the previous impl. against the new one.
i = values.get('number_increment', row['number_increment'])
n = values.get('number_next', row['number_next'])
if row['implementation'] == 'standard':
if new_implementation in ('standard', None):
# Implementation has NOT changed.
# Only change sequence if really requested.
if row['number_next'] != n:
self._alter_sequence(cr, row['id'], i, n)
else:
# Just in case only increment changed
self._alter_sequence(cr, row['id'], i)
else:
self._drop_sequence(cr, row['id'])
else:
if new_implementation in ('no_gap', None):
pass
else:
self._create_sequence(cr, row['id'], i, n)
return True
def _interpolate(self, s, d):
if s:
return s % d
return ''
def _interpolation_dict(self):
t = time.localtime() # Actually, the server is always in UTC.
return {
'year': time.strftime('%Y', t),
'month': time.strftime('%m', t),
'day': time.strftime('%d', t),
'y': time.strftime('%y', t),
'doy': time.strftime('%j', t),
'woy': time.strftime('%W', t),
'weekday': time.strftime('%w', t),
'h24': time.strftime('%H', t),
'h12': time.strftime('%I', t),
'min': time.strftime('%M', t),
'sec': time.strftime('%S', t),
}
def _next(self, cr, uid, ids, context=None):
if not ids:
return False
if context is None:
context = {}
force_company = context.get('force_company')
if not force_company:
force_company = self.pool.get('res.users').browse(cr, uid, uid).company_id.id
sequences = self.read(cr, uid, ids, ['name','company_id','implementation','number_next','prefix','suffix','padding'])
preferred_sequences = [s for s in sequences if s['company_id'] and s['company_id'][0] == force_company ]
seq = preferred_sequences[0] if preferred_sequences else sequences[0]
if seq['implementation'] == 'standard':
cr.execute("SELECT nextval('ir_sequence_%03d')" % seq['id'])
seq['number_next'] = cr.fetchone()
else:
cr.execute("SELECT number_next FROM ir_sequence WHERE id=%s FOR UPDATE NOWAIT", (seq['id'],))
cr.execute("UPDATE ir_sequence SET number_next=number_next+number_increment WHERE id=%s ", (seq['id'],))
self.invalidate_cache(cr, uid, ['number_next'], [seq['id']], context=context)
d = self._interpolation_dict()
try:
interpolated_prefix = self._interpolate(seq['prefix'], d)
interpolated_suffix = self._interpolate(seq['suffix'], d)
except ValueError:
raise osv.except_osv(_('Warning'), _('Invalid prefix or suffix for sequence \'%s\'') % (seq.get('name')))
return interpolated_prefix + '%%0%sd' % seq['padding'] % seq['number_next'] + interpolated_suffix
def next_by_id(self, cr, uid, sequence_id, context=None):
""" Draw an interpolated string using the specified sequence."""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&',('id','=', sequence_id),('company_id','in',company_ids)])
return self._next(cr, uid, ids, context)
def next_by_code(self, cr, uid, sequence_code, context=None):
""" Draw an interpolated string using a sequence with the requested code.
If several sequences with the correct code are available to the user
(multi-company cases), the one from the user's current company will
be used.
:param dict context: context dictionary may contain a
``force_company`` key with the ID of the company to
use instead of the user's current company for the
sequence selection. A matching sequence for that
specific company will get higher priority.
"""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&', ('code', '=', sequence_code), ('company_id', 'in', company_ids)])
return self._next(cr, uid, ids, context)
def get_id(self, cr, uid, sequence_code_or_id, code_or_id='id', context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by the ``sequence_code_or_id``
argument, which can be a code or an id (as controlled by the
``code_or_id`` argument. This method is deprecated.
"""
# TODO: bump up to warning after 6.1 release
_logger.debug("ir_sequence.get() and ir_sequence.get_id() are deprecated. "
"Please use ir_sequence.next_by_code() or ir_sequence.next_by_id().")
if code_or_id == 'id':
return self.next_by_id(cr, uid, sequence_code_or_id, context)
else:
return self.next_by_code(cr, uid, sequence_code_or_id, context)
def get(self, cr, uid, code, context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by its code. This method is
deprecated.
"""
return self.get_id(cr, uid, code, 'code', context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
isra17/DIE | DIE/UI/AboutScreen.py | 7 | 1977 |
from idaapi import Form
import os
import DIE.Lib.DieConfig
from sark.qt import QtGui, QtCore
class AboutWindow(QtGui.QDialog):
def __init__(self):
super(AboutWindow, self).__init__()
self.initUI()
def initUI(self):
config = DIE.Lib.DieConfig.get_config()
self.setFixedSize(400, 330)
self.setWindowTitle("About DIE")
image = QtGui.QImage(os.path.join(config.icons_path, "logo.png"))
pixmap = QtGui.QPixmap.fromImage(image)
logo = QtGui.QLabel(self)
logo.setFixedSize(pixmap.size())
logo.move(0.5*(self.width() - logo.width()), 20)
logo.setPixmap(pixmap)
title = QtGui.QLabel("DIE",self)
title.setAlignment(QtCore.Qt.AlignCenter)
font = title.font()
font.setPointSize(16)
font.setBold(True)
title.setFont(font)
title.setFixedWidth(400)
title.move(0, logo.height() + logo.y() + 20)
subtitle = QtGui.QLabel("Dynamic IDA Enrichment framework",self)
font = subtitle.font()
font.setPointSize(14)
subtitle.setFont(font)
subtitle.setAlignment(QtCore.Qt.AlignCenter)
subtitle.setFixedWidth(400)
subtitle.move(0, title.height() + title.y() + 10)
version = QtGui.QLabel("Version 0.1",self)
font = subtitle.font()
font.setPointSize(12)
version.setFont(font)
version.setAlignment(QtCore.Qt.AlignCenter)
version.setFixedWidth(400)
version.move(0, subtitle.height() + subtitle.y() + 30)
author = QtGui.QLabel("Written by Yaniv Balmas @ynvb - Check Point Software Technologies",self)
font = subtitle.font()
font.setPointSize(12)
author.setFont(font)
author.setAlignment(QtCore.Qt.AlignCenter)
author.setFixedWidth(400)
author.move(0, version.height() + version.y())
self.show()
| mit |
stevekuznetsov/ansible | lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py | 48 | 7633 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_affinitygroup
short_description: Manages affinity groups on Apache CloudStack based clouds.
description:
- Create and remove affinity groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the affinity group.
required: true
affinty_type:
description:
- Type of the affinity group. If not specified, first found affinity type is used.
required: false
default: null
description:
description:
- Description of the affinity group.
required: false
default: null
state:
description:
- State of the affinity group.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the affinity group is related to.
required: false
default: null
account:
description:
- Account the affinity group is related to.
required: false
default: null
project:
description:
- Name of the project the affinity group is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
affinty_type: host anti-affinity
# Remove a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
state: absent
'''
RETURN = '''
---
id:
description: UUID of the affinity group.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of affinity group.
returned: success
type: string
sample: app
description:
description: Description of affinity group.
returned: success
type: string
sample: application affinity group
affinity_type:
description: Type of affinity group.
returned: success
type: string
sample: host anti-affinity
project:
description: Name of project the affinity group is related to.
returned: success
type: string
sample: Production
domain:
description: Domain the affinity group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the affinity group is related to.
returned: success
type: string
sample: example account
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackAffinityGroup, self).__init__(module)
self.returns = {
'type': 'affinity_type',
}
self.affinity_group = None
def get_affinity_group(self):
if not self.affinity_group:
args = {
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'name': self.module.params.get('name'),
}
affinity_groups = self.cs.listAffinityGroups(**args)
if affinity_groups:
self.affinity_group = affinity_groups['affinitygroup'][0]
return self.affinity_group
def get_affinity_type(self):
affinity_type = self.module.params.get('affinty_type')
affinity_types = self.cs.listAffinityGroupTypes()
if affinity_types:
if not affinity_type:
return affinity_types['affinityGroupType'][0]['type']
for a in affinity_types['affinityGroupType']:
if a['type'] == affinity_type:
return a['type']
self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type)
def create_affinity_group(self):
affinity_group = self.get_affinity_group()
if not affinity_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.get_affinity_type(),
'description': self.module.params.get('description'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
res = self.cs.createAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
affinity_group = self.poll_job(res, 'affinitygroup')
return affinity_group
def remove_affinity_group(self):
affinity_group = self.get_affinity_group()
if affinity_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
res = self.cs.deleteAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
self.poll_job(res, 'affinitygroup')
return affinity_group
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
affinty_type=dict(default=None),
description=dict(default=None),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(default=None),
account=dict(default=None),
project=dict(default=None),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_ag = AnsibleCloudStackAffinityGroup(module)
state = module.params.get('state')
if state in ['absent']:
affinity_group = acs_ag.remove_affinity_group()
else:
affinity_group = acs_ag.create_affinity_group()
result = acs_ag.get_result(affinity_group)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ghedsouza/snakebite | test/effective_user_test.py | 7 | 1151 | from minicluster_testbase import MiniClusterTestBase
from snakebite.client import Client
import os
class EffectiveUserTest(MiniClusterTestBase):
ERR_MSG_TOUCH = "org.apache.hadoop.security.AccessControlException\nPermission denied: user=__foobar"
ERR_MSG_STAT = "`/foobar2': No such file or directory"
VALID_FILE = '/foobar'
INVALID_FILE = '/foobar2'
def setUp(self):
self.custom_client = Client(self.cluster.host, self.cluster.port)
self.custom_foobar_client = Client(host=self.cluster.host,
port=self.cluster.port,
effective_user='__foobar')
def test_touch(self):
print tuple(self.custom_client.touchz([self.VALID_FILE]))
try:
tuple(self.custom_foobar_client.touchz([self.INVALID_FILE]))
except Exception, e:
self.assertTrue(e.message.startswith(self.ERR_MSG_TOUCH))
self.custom_client.stat([self.VALID_FILE])
try:
self.custom_client.stat([self.INVALID_FILE])
except Exception, e:
self.assertEquals(e.message, self.ERR_MSG_STAT)
| apache-2.0 |
gdsfactory/gdsfactory | pp/components/coupler.py | 1 | 2755 | import pp
from pp.component import Component
from pp.components.coupler_straight import coupler_straight
from pp.components.coupler_symmetric import coupler_symmetric
from pp.cross_section import get_waveguide_settings
from pp.snap import assert_on_1nm_grid
from pp.types import ComponentFactory
@pp.cell_with_validator
def coupler(
gap: float = 0.236,
length: float = 20.0,
coupler_symmetric_factory: ComponentFactory = coupler_symmetric,
coupler_straight_factory: ComponentFactory = coupler_straight,
dy: float = 5.0,
dx: float = 10.0,
waveguide: str = "strip",
**kwargs
) -> Component:
r"""Symmetric coupler.
Args:
gap: between straights
length: of coupling region
coupler_symmetric_factory
coupler_straight_factory
dy: port to port vertical spacing
dx: length of bend in x direction
waveguide: from tech.waveguide
kwargs: overwrites waveguide_settings
.. code::
dx dx
|------| |------|
W1 ________ _______E1
\ / |
\ length / |
======================= gap | dy
/ \ |
________/ \_______ |
W0 E0
coupler_straight_factory coupler_symmetric_factory
"""
assert_on_1nm_grid(length)
assert_on_1nm_grid(gap)
c = Component()
waveguide_settings = get_waveguide_settings(waveguide, **kwargs)
sbend = coupler_symmetric_factory(gap=gap, dy=dy, dx=dx, **waveguide_settings)
sr = c << sbend
sl = c << sbend
cs = c << coupler_straight_factory(length=length, gap=gap, **waveguide_settings)
sl.connect("W1", destination=cs.ports["W0"])
sr.connect("W0", destination=cs.ports["E0"])
c.add_port("W1", port=sl.ports["E0"])
c.add_port("W0", port=sl.ports["E1"])
c.add_port("E0", port=sr.ports["E0"])
c.add_port("E1", port=sr.ports["E1"])
c.absorb(sl)
c.absorb(sr)
c.absorb(cs)
c.length = sbend.length
c.min_bend_radius = sbend.min_bend_radius
return c
if __name__ == "__main__":
# c = pp.Component()
# cp1 = c << coupler(gap=0.2)
# cp2 = c << coupler(gap=0.5)
# cp1.ymin = 0
# cp2.ymin = 0
# c = coupler(gap=0.2, waveguide="nitride")
# c = coupler(width=0.9, length=1, dy=2, gap=0.2)
# print(c.settings_changed)
c = coupler(gap=0.2, waveguide="nitride")
# c = coupler(gap=0.2, waveguide="strip_heater")
c.show()
| mit |
40223104/2015cd_w17 | static/Brython3.1.1-20150328-091302/Lib/urllib/parse.py | 735 | 35170 | """Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
import sys
import collections
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
"parse_qsl", "quote", "quote_plus", "quote_from_bytes",
"unquote", "unquote_plus", "unquote_to_bytes"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache and the quoters cache."""
_parse_cache.clear()
_safe_quoters.clear()
# Helpers for bytes handling
# For 3.2, we deliberately require applications that
# handle improperly quoted URLs to do their own
# decoding and encoding. If valid use cases are
# presented, we may relax this by using latin-1
# decoding internally for 3.3
_implicit_encoding = 'ascii'
_implicit_errors = 'strict'
def _noop(obj):
return obj
def _encode_result(obj, encoding=_implicit_encoding,
errors=_implicit_errors):
return obj.encode(encoding, errors)
def _decode_args(args, encoding=_implicit_encoding,
errors=_implicit_errors):
return tuple(x.decode(encoding, errors) if x else '' for x in args)
def _coerce_args(*args):
# Invokes decode if necessary to create str args
# and returns the coerced inputs along with
# an appropriate result coercion function
# - noop for str inputs
# - encoding function otherwise
str_input = isinstance(args[0], str)
for arg in args[1:]:
# We special-case the empty string to support the
# "scheme=''" default argument to some functions
if arg and isinstance(arg, str) != str_input:
raise TypeError("Cannot mix str and non-str arguments")
if str_input:
return args + (_noop,)
return _decode_args(args) + (_encode_result,)
# Result objects are more helpful than simple tuples
class _ResultMixinStr(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
class _ResultMixinBytes(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
class _NetlocResultMixinBase(object):
"""Shared methods for the parsed result objects containing a netloc element"""
__slots__ = ()
@property
def username(self):
return self._userinfo[0]
@property
def password(self):
return self._userinfo[1]
@property
def hostname(self):
hostname = self._hostinfo[0]
if not hostname:
hostname = None
elif hostname is not None:
hostname = hostname.lower()
return hostname
@property
def port(self):
port = self._hostinfo[1]
if port is not None:
port = int(port, 10)
# Return None on an illegal port
if not ( 0 <= port <= 65535):
return None
return port
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition('@')
if have_info:
username, have_password, password = userinfo.partition(':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition('@')
_, have_open_br, bracketed = hostinfo.partition('[')
if have_open_br:
hostname, _, port = bracketed.partition(']')
_, have_port, port = port.partition(':')
else:
hostname, have_port, port = hostinfo.partition(':')
if not have_port:
port = None
return hostname, port
class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, have_port, port = port.partition(b':')
else:
hostname, have_port, port = hostinfo.partition(b':')
if not have_port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
class DefragResult(_DefragResultBase, _ResultMixinStr):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + '#' + self.fragment
else:
return self.url
class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + b'#' + self.fragment
else:
return self.url
class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Set up the encode/decode result pairs
def _fix_result_transcoding():
_result_pairs = (
(DefragResult, DefragResultBytes),
(SplitResult, SplitResultBytes),
(ParseResult, ParseResultBytes),
)
for _decoded, _encoded in _result_pairs:
_decoded._encoded_counterpart = _encoded
_encoded._decoded_counterpart = _decoded
_fix_result_transcoding()
del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if path[:1] == '/':
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
url, _coerce_result = _coerce_args(url)
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
else:
frag = ''
defrag = url
return _coerce_result(DefragResult(defrag, frag))
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _hexdig for b in _hexdig}
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextobyte[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors)
_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-')
_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
_safe_quoters = {}
class Quoter(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(safe)
def __repr__(self):
# Without this, will just display as a defaultdict
return "<Quoter %r>" % dict(self)
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding must
not be specified if string is a str.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return ''
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = safe.encode('ascii', 'ignore')
else:
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return ''.join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The query arg may be either a string or a bytes type. When query arg is a
string, the safe, encoding and error parameters are sent the quote_plus for
encoding.
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
else:
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_plus(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_plus(elt, safe)
else:
elt = quote_plus(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# urllib.parse.unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
| gpl-3.0 |
Murali-group/GraphSpace | applications/uniprot/models.py | 1 | 1246 | from __future__ import unicode_literals
from sqlalchemy import ForeignKeyConstraint, text
from applications.users.models import *
from django.conf import settings
from graphspace.mixins import *
Base = settings.BASE
# ================== Table Definitions =================== #
class UniprotAlias(IDMixin, TimeStampMixin, Base):
__tablename__ = 'uniprot_alias'
accession_number = Column(String, nullable=False)
alias_source = Column(String, nullable=False)
alias_name = Column(String, nullable=False)
constraints = (
UniqueConstraint('accession_number', 'alias_source', 'alias_name', name='_uniprot_alias_uc_accession_number_alias_source_alias_name'),
)
indices = (
Index('uniprot_alias_idx_accession_number', text("accession_number gin_trgm_ops"), postgresql_using="gin"),
Index('uniprot_alias_idx_alias_name', text("alias_name gin_trgm_ops"), postgresql_using="gin"),
)
@declared_attr
def __table_args__(cls):
args = cls.constraints + cls.indices
return args
def serialize(cls, **kwargs):
return {
# 'id': cls.id,
'id': cls.accession_number,
'alias_source': cls.alias_source,
'alias_name': cls.alias_name,
'created_at': cls.created_at.isoformat(),
'updated_at': cls.updated_at.isoformat()
}
| gpl-2.0 |
yg257/Pangea | lib/boto-2.34.0/boto/gs/connection.py | 157 | 5478 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.gs.bucket import Bucket
from boto.s3.connection import S3Connection
from boto.s3.connection import SubdomainCallingFormat
from boto.s3.connection import check_lowercase_bucketname
from boto.utils import get_utf8_value
class Location(object):
DEFAULT = 'US'
EU = 'EU'
class GSConnection(S3Connection):
DefaultHost = 'storage.googleapis.com'
QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s'
def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/',
suppress_consec_slashes=True):
super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, calling_format, path,
"google", Bucket,
suppress_consec_slashes=suppress_consec_slashes)
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None,
storage_class='STANDARD'):
"""
Creates a new bucket. By default it's located in the USA. You can
pass Location.EU to create bucket in the EU. You can also pass
a LocationConstraint for where the bucket should be located, and
a StorageClass describing how the data should be stored.
:type bucket_name: string
:param bucket_name: The name of the new bucket.
:type headers: dict
:param headers: Additional headers to pass along with the request to GCS.
:type location: :class:`boto.gs.connection.Location`
:param location: The location of the new bucket.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GCS.
:type storage_class: string
:param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if not location:
location = Location.DEFAULT
location_elem = ('<LocationConstraint>%s</LocationConstraint>'
% location)
if storage_class:
storage_class_elem = ('<StorageClass>%s</StorageClass>'
% storage_class)
else:
storage_class_elem = ''
data = ('<CreateBucketConfiguration>%s%s</CreateBucketConfiguration>'
% (location_elem, storage_class_elem))
response = self.make_request(
'PUT', get_utf8_value(bucket_name), headers=headers,
data=get_utf8_value(data))
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def get_bucket(self, bucket_name, validate=True, headers=None):
"""
Retrieves a bucket by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
you are unsure if the bucket exists or not, you can use the
``S3Connection.lookup`` method, which will either return a valid bucket
or ``None``.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to fetch all keys within the
given bucket. (Default: ``True``)
"""
bucket = self.bucket_class(self, bucket_name)
if validate:
bucket.get_all_keys(headers, maxkeys=0)
return bucket
| apache-2.0 |
GodBlessPP/2015cdb | static/Brython3.1.1-20150328-091302/Lib/heapq.py | 628 | 18065 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| gpl-3.0 |
redhat-openstack/nova | nova/tests/api/openstack/compute/plugins/v3/test_extended_volumes.py | 12 | 16239 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute.plugins.v3 import extended_volumes
from nova import compute
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova import volume
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID1)
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=UUID1)
def fake_compute_get_all(*args, **kwargs):
db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
objects.InstanceList(),
db_list, fields)
def fake_bdms_get_all_by_instance(*args, **kwargs):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': UUID1, 'source_type': 'volume',
'destination_type': 'volume', 'id': 1}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': UUID2, 'source_type': 'volume',
'destination_type': 'volume', 'id': 2})]
def fake_attach_volume(self, context, instance, volume_id,
device, disk_bus, device_type):
pass
def fake_attach_volume_not_found_vol(self, context, instance, volume_id,
device, disk_bus, device_type):
raise exception.VolumeNotFound(volume_id=volume_id)
def fake_attach_volume_invalid_device_path(self, context, instance,
volume_id, device, disk_bus,
device_type):
raise exception.InvalidDevicePath(path=device)
def fake_attach_volume_instance_invalid_state(self, context, instance,
volume_id, device, disk_bus,
device_type):
raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
method='', attr='')
def fake_attach_volume_invalid_volume(self, context, instance,
volume_id, device, disk_bus,
device_type):
raise exception.InvalidVolume(reason='')
def fake_detach_volume(self, context, instance, volume):
pass
def fake_swap_volume(self, context, instance,
old_volume_id, new_volume_id):
pass
def fake_swap_volume_invalid_volume(self, context, instance,
volume_id, device):
raise exception.InvalidVolume(reason='', volume_id=volume_id)
def fake_swap_volume_unattached_volume(self, context, instance,
volume_id, device):
raise exception.VolumeUnattached(reason='', volume_id=volume_id)
def fake_detach_volume_invalid_volume(self, context, instance, volume):
raise exception.InvalidVolume(reason='')
def fake_swap_volume_instance_invalid_state(self, context, instance,
volume_id, device):
raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
method='', attr='')
def fake_volume_get(*args, **kwargs):
pass
def fake_volume_get_not_found(*args, **kwargs):
raise exception.VolumeNotFound(volume_id=UUID1)
class ExtendedVolumesTest(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-volumes:'
def setUp(self):
super(ExtendedVolumesTest, self).setUp()
self.Controller = extended_volumes.ExtendedVolumesController()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
self.stubs.Set(volume.cinder.API, 'get', fake_volume_get)
self.stubs.Set(compute.api.API, 'detach_volume', fake_detach_volume)
self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume)
self.app = fakes.wsgi_app_v3(init_only=('os-extended-volumes',
'servers'))
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
if body:
req.body = jsonutils.dumps(body)
req.method = 'POST'
req.content_type = 'application/json'
res = req.get_response(self.app)
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def test_show(self):
url = '/v3/servers/%s' % UUID1
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
exp_volumes = [{'id': UUID1}, {'id': UUID2}]
if self.content_type == 'application/json':
actual = server.get('%svolumes_attached' % self.prefix)
self.assertEqual(exp_volumes, actual)
def test_detail(self):
url = '/v3/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
exp_volumes = [{'id': UUID1}, {'id': UUID2}]
for i, server in enumerate(self._get_servers(res.body)):
if self.content_type == 'application/json':
actual = server.get('%svolumes_attached' % self.prefix)
self.assertEqual(exp_volumes, actual)
def test_detach(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"detach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 202)
def test_detach_volume_from_locked_server(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'detach_volume',
fakes.fake_actions_to_locked_server)
res = self._make_request(url, {"detach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 409)
def test_detach_with_non_existed_vol(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
res = self._make_request(url, {"detach": {"volume_id": UUID2}})
self.assertEqual(res.status_int, 404)
def test_detach_with_non_existed_instance(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
res = self._make_request(url, {"detach": {"volume_id": UUID2}})
self.assertEqual(res.status_int, 404)
def test_detach_with_invalid_vol(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'detach_volume',
fake_detach_volume_invalid_volume)
res = self._make_request(url, {"detach": {"volume_id": UUID2}})
self.assertEqual(res.status_int, 400)
def test_detach_with_bad_id(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"detach": {"volume_id": 'xxx'}})
self.assertEqual(res.status_int, 400)
def test_detach_without_id(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"detach": {}})
self.assertEqual(res.status_int, 400)
def test_detach_volume_with_invalid_request(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"detach": None})
self.assertEqual(res.status_int, 400)
@mock.patch('nova.objects.BlockDeviceMapping.is_root',
new_callable=mock.PropertyMock)
def test_detach_volume_root(self, mock_isroot):
url = "/v3/servers/%s/action" % UUID1
mock_isroot.return_value = True
res = self._make_request(url, {"detach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 403)
def test_attach_volume(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"attach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 202)
def test_attach_volume_to_locked_server(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'attach_volume',
fakes.fake_actions_to_locked_server)
res = self._make_request(url, {"attach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 409)
def test_attach_volume_disk_bus_and_disk_dev(self):
url = "/v3/servers/%s/action" % UUID1
self._make_request(url, {"attach": {"volume_id": UUID1,
"device": "/dev/vdb",
"disk_bus": "ide",
"device_type": "cdrom"}})
def test_attach_volume_with_bad_id(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"attach": {"volume_id": 'xxx'}})
self.assertEqual(res.status_int, 400)
def test_attach_volume_without_id(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"attach": {}})
self.assertEqual(res.status_int, 400)
def test_attach_volume_with_invalid_request(self):
url = "/v3/servers/%s/action" % UUID1
res = self._make_request(url, {"attach": None})
self.assertEqual(res.status_int, 400)
def test_attach_volume_with_non_existe_vol(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'attach_volume',
fake_attach_volume_not_found_vol)
res = self._make_request(url, {"attach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 404)
def test_attach_volume_with_non_existed_instance(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
res = self._make_request(url, {"attach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 404)
def test_attach_volume_with_invalid_device_path(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'attach_volume',
fake_attach_volume_invalid_device_path)
res = self._make_request(url, {"attach": {"volume_id": UUID1,
'device': 'xxx'}})
self.assertEqual(res.status_int, 400)
def test_attach_volume_with_instance_invalid_state(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'attach_volume',
fake_attach_volume_instance_invalid_state)
res = self._make_request(url, {"attach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 409)
def test_attach_volume_with_invalid_volume(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'attach_volume',
fake_attach_volume_invalid_volume)
res = self._make_request(url, {"attach": {"volume_id": UUID1}})
self.assertEqual(res.status_int, 400)
def test_attach_volume_with_invalid_request_body(self):
url = "/v3/servers/%s/action" % UUID1
self.stubs.Set(compute.api.API, 'attach_volume',
fake_attach_volume_invalid_volume)
res = self._make_request(url, {"attach": None})
self.assertEqual(res.status_int, 400)
def _test_swap(self, uuid=UUID1, body=None):
body = body or {'swap_volume_attachment': {'old_volume_id': uuid,
'new_volume_id': UUID2}}
req = webob.Request.blank('/v3/servers/%s/action' % UUID1)
req.method = 'PUT'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = context.get_admin_context()
return self.Controller.swap(req, UUID1, body=body)
def test_swap_volume(self):
self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
result = self._test_swap()
self.assertEqual('202 Accepted', result.status)
def test_swap_volume_for_locked_server(self):
def fake_swap_volume_for_locked_server(self, context, instance,
old_volume, new_volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute.api.API, 'swap_volume',
fake_swap_volume_for_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
def test_swap_volume_for_locked_server_new(self):
self.stubs.Set(compute.api.API, 'swap_volume',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
def test_swap_volume_instance_not_found(self):
self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
def test_swap_volume_with_bad_action(self):
self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
body = {'swap_volume_attachment_bad_action': None}
self.assertRaises(exception.ValidationError, self._test_swap,
body=body)
def test_swap_volume_with_invalid_body(self):
self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
body = {'swap_volume_attachment': {'bad_volume_id_body': UUID1,
'new_volume_id': UUID2}}
self.assertRaises(exception.ValidationError, self._test_swap,
body=body)
def test_swap_volume_with_invalid_volume(self):
self.stubs.Set(compute.api.API, 'swap_volume',
fake_swap_volume_invalid_volume)
self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
def test_swap_volume_with_unattached_volume(self):
self.stubs.Set(compute.api.API, 'swap_volume',
fake_swap_volume_unattached_volume)
self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
def test_swap_volume_with_bad_state_instance(self):
self.stubs.Set(compute.api.API, 'swap_volume',
fake_swap_volume_instance_invalid_state)
self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
def test_swap_volume_no_attachment(self):
self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
self.assertRaises(webob.exc.HTTPNotFound, self._test_swap, UUID3)
def test_swap_volume_not_found(self):
self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
| apache-2.0 |
tms/node-gyp | gyp/test/lib/TestCmd.py | 330 | 52544 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import errno
import os
import os.path
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import types
import UserList
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python_executable',
'TestCmd'
]
try:
import difflib
except ImportError:
__all__.append('simple_diff')
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
try:
from UserString import UserString
except ImportError:
class UserString:
pass
if hasattr(types, 'UnicodeType'):
def is_String(e):
return type(e) is types.StringType \
or type(e) is types.UnicodeType \
or isinstance(e, UserString)
else:
def is_String(e):
return type(e) is types.StringType or isinstance(e, UserString)
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
else:
tempfile.template = 'testcmd.'
re_space = re.compile('\s')
_Cleanup = []
_chain_to_exitfunc = None
def _clean():
global _Cleanup
cleanlist = filter(None, _Cleanup)
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
if _chain_to_exitfunc:
_chain_to_exitfunc()
try:
import atexit
except ImportError:
# TODO(1.5): atexit requires python 2.0, so chain sys.exitfunc
try:
_chain_to_exitfunc = sys.exitfunc
except AttributeError:
pass
sys.exitfunc = _clean
else:
atexit.register(_clean)
try:
zip
except NameError:
def zip(*lists):
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
class Collector:
def __init__(self, top):
self.entries = [top]
def __call__(self, arg, dirname, names):
pathjoin = lambda n, d=dirname: os.path.join(d, n)
self.entries.extend(map(pathjoin, names))
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self = None, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED
and exits with a status of 1. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at)
sys.exit(1)
def no_result(self = None, condition = 1, function = None, skip = 0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test
and exits with a status of 2. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
if os.environ.get('TESTCMD_DEBUG_SKIPS'):
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
else:
sys.stderr.write("NO RESULT\n")
sys.exit(2)
def pass_test(self = None, condition = 1, function = None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test
and exits with a status of 0. If a condition argument is supplied,
the test passes only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines = None, matches = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(matches):
matches = string.split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines = None, res = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(res):
res = string.split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(lines[i]):
return
return 1
def match_re_dotall(lines = None, res = None):
"""
"""
if not type(lines) is type(""):
lines = string.join(lines, "\n")
if not type(res) is type(""):
res = string.join(res, "\n")
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if expr.match(lines):
return 1
try:
import difflib
except ImportError:
pass
else:
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A function with the same calling signature as difflib.context_diff
(diff -c) and difflib.unified_diff (diff -u) but which prints
output like the simple, unadorned 'diff" command.
"""
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
result = []
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
result.append("%sd%d" % (comma(a1, a2), b1))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
elif op == 'insert':
result.append("%da%s" % (a1, comma(b1, b2)))
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
elif op == 'replace':
result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
result.append('---')
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
return result
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + ['']*(-diff)
elif diff > 0:
b = b + ['']*diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(bline):
result.append("%sc%s" % (i+1, i+1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i+1
return result
if os.name == 'java':
python_executable = os.path.join(sys.prefix, 'jython')
else:
python_executable = sys.executable
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
return f
return None
default_sleep_seconds = 1
try:
import subprocess
except ImportError:
# The subprocess module doesn't exist in this version of Python,
# so we're going to cobble up something that looks just enough
# like its API for our purposes below.
import new
subprocess = new.module('subprocess')
subprocess.PIPE = 'PIPE'
subprocess.STDOUT = 'STDOUT'
subprocess.mswindows = (sys.platform == 'win32')
try:
import popen2
popen2.Popen3
except AttributeError:
class Popen3:
universal_newlines = 1
def __init__(self, command, **kw):
if sys.platform == 'win32' and command[0] == '"':
command = '"' + command + '"'
(stdin, stdout, stderr) = os.popen3(' ' + command)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def close_output(self):
self.stdout.close()
self.resultcode = self.stderr.close()
def wait(self):
resultcode = self.resultcode
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
else:
try:
popen2.Popen4
except AttributeError:
# A cribbed Popen4 class, with some retrofitted code from
# the Python 1.5 Popen3 class methods to do certain things
# by hand.
class Popen4(popen2.Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
for i in range(3, popen2.MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
popen2._active.append(self)
popen2.Popen4 = Popen4
class Popen3(popen2.Popen3, popen2.Popen4):
universal_newlines = 1
def __init__(self, command, **kw):
if kw.get('stderr') == 'STDOUT':
apply(popen2.Popen4.__init__, (self, command, 1))
else:
apply(popen2.Popen3.__init__, (self, command, 1))
self.stdin = self.tochild
self.stdout = self.fromchild
self.stderr = self.childerr
def wait(self, *args, **kw):
resultcode = apply(popen2.Popen3.wait, (self,)+args, kw)
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
subprocess.Popen = Popen3
# From Josiah Carlson,
# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
try: fcntl.F_GETFL
except AttributeError: fcntl.F_GETFL = 3
try: fcntl.F_SETFL
except AttributeError: fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
#if self.universal_newlines:
# read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
#if self.universal_newlines:
# r = self._translate_newlines(r)
return r
finally:
if not conn.closed and not flags is None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time()+t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x-time.time())/tr, 0))
return ''.join(y)
# TODO(3.0: rewrite to use memoryview()
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = buffer(data, sent)
try:
object
except NameError:
class object:
pass
class TestCmd(object):
"""Class TestCmd
"""
def __init__(self, description = None,
program = None,
interpreter = None,
workdir = None,
subdir = None,
verbose = None,
match = None,
diff = None,
combine = 0,
universal_newlines = 1):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
if match is not None:
self.match_function = match
else:
self.match_function = match_re
if diff is not None:
self.diff_function = diff
else:
try:
difflib
except NameError:
pass
else:
self.diff_function = simple_diff
#self.diff_function = difflib.context_diff
#self.diff_function = difflib.unified_diff
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
if os.name == 'posix':
def escape(self, arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
# Windows does not allow special characters in file names
# anyway, so no need for an escape function, we will just quote
# the arg.
def escape(self, arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
def canonicalize(self, path):
if is_List(path):
path = apply(os.path.join, tuple(path))
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory
path name."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition = None):
"""Removes any temporary working directories for the specified
TestCmd environment. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def command_args(self, program = None,
interpreter = None,
arguments = None):
if program:
if type(program) == type('') and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not type(program) in [type([]), type(())]:
program = [program]
cmd = list(program)
if interpreter:
if not type(interpreter) in [type([]), type(())]:
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if type(arguments) == type(''):
arguments = string.split(arguments)
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
try:
difflib
except NameError:
def diff(self, a, b, name, *args, **kw):
print self.banner('Expected %s' % name)
print a
print self.banner('Actual %s' % name)
print b
else:
def diff(self, a, b, name, *args, **kw):
print self.banner(name)
args = (a.splitlines(), b.splitlines()) + args
lines = apply(self.diff_function, args, kw)
for l in lines:
print l
def fail_test(self, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_function(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def match_re_dotall(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re_dotall(lines, res)
def no_result(self, condition = 1, function = None, skip = 0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition = 1, function = None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode = 'rb'):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
with open(file, mode) as f:
result = f.read()
return result
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
cmd_string = string.join(map(self.escape, cmd), ' ')
if self.verbose:
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
# On Windows, if we make stdin a pipe when we plan to send
# no input, and the test program exits before
# Popen calls msvcrt.open_osfhandle, that call will fail.
# So don't use a pipe for stdin if we don't need one.
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = subprocess.PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = subprocess.STDOUT
else:
stderr_value = subprocess.PIPE
return Popen(cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr_value,
universal_newlines=universal_newlines)
def finish(self, popen, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument, recording the exit status,
standard output and error output.
"""
popen.stdin.close()
self.status = popen.wait()
if not self.status:
self.status = 0
self._stdout.append(popen.stdout.read())
if popen.stderr:
stderr = popen.stderr.read()
else:
stderr = ''
self._stderr.append(stderr)
def run(self, program = None,
interpreter = None,
arguments = None,
chdir = None,
stdin = None,
universal_newlines = None):
"""Runs a test of the program or script for the test
environment. Standard output and error output are saved for
future retrieval via the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program,
interpreter,
arguments,
universal_newlines,
stdin=stdin)
if stdin:
if is_List(stdin):
for line in stdin:
p.stdin.write(line)
else:
p.stdin.write(stdin)
p.stdin.close()
out = p.stdout.read()
if p.stderr is None:
err = ''
else:
err = p.stderr.read()
try:
close_output = p.close_output
except AttributeError:
p.stdout.close()
if not p.stderr is None:
p.stderr.close()
else:
close_output()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.wait()
if not self.status:
self.status = 0
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds = default_sleep_seconds):
"""Sleeps at least the specified number of seconds. If no
number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run = None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run = None):
"""Returns the standard output from the specified run number.
If there is no specified run number, then returns the standard
output of the last run. If the run number is less than zero,
then returns the standard output from that many runs back from
the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working
directory, one for each argument. An argument may be a list,
in which case the list elements are concatenated using the
os.path.join() method. Subdirectories multiple levels deep
must be created using a separate argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except OSError:
pass
else:
count = count + 1
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
link = self.canonicalize(link)
os.symlink(target, link)
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mktemp(prefix=tempfile.template)
except TypeError:
path = tempfile.mktemp()
os.mkdir(path)
# Symlinks in the path will report things
# differently from os.getcwd(), so chdir there
# and back to fetch the canonical path.
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
# Uppercase the drive letter since the case of drive
# letters is pretty much random on win32:
drive,rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
#
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or
directory path name. The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Find an executable file.
"""
if is_List(file):
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified
path name. If the path is a null string (''), a unique
directory name is created.
"""
if (path != None):
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file
within the current temporary working directory. Concatenates
the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def readable(self, top, read=1):
"""Make the specified directory tree readable (read == 1)
or not (read == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif read:
# It's a directory and we're trying to turn on read
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# read permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off read
# permission, which means we have to chmod the directoreis
# in the tree bottom-up, lest disabling read permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def writable(self, top, write=1):
"""Make the specified directory tree writable (write == 1)
or not (write == None).
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IWRITE)
except OSError: pass
else:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IREAD)
except OSError: pass
else:
if write:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
if os.path.isfile(top):
do_chmod(top)
else:
col = Collector(top)
os.path.walk(top, col, None)
for d in col.entries: do_chmod(d)
def executable(self, top, execute=1):
"""Make the specified directory tree executable (execute == 1)
or not (execute == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif execute:
# It's a directory and we're trying to turn on execute
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# execute permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off execute
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling execute permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def write(self, file, content, mode = 'wb'):
"""Writes the specified content text (second argument) to the
specified file name (first argument). The file name may be
a list, in which case the elements are concatenated with the
os.path.join() method. The file is created under the temporary
working directory. Any subdirectories in the path must already
exist. The I/O mode for the file may be specified; it must
begin with a 'w'. The default is 'wb' (binary write).
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
with open(file, mode) as f:
f.write(content)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
setjet/spark | examples/src/main/python/ml/kmeans_example.py | 69 | 1828 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import KMeans
# $example off$
from pyspark.sql import SparkSession
"""
An example demonstrating k-means clustering.
Run with:
bin/spark-submit examples/src/main/python/ml/kmeans_example.py
This example requires NumPy (http://www.numpy.org/).
"""
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("KMeansExample")\
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
# Trains a k-means model.
kmeans = KMeans().setK(2).setSeed(1)
model = kmeans.fit(dataset)
# Evaluate clustering by computing Within Set Sum of Squared Errors.
wssse = model.computeCost(dataset)
print("Within Set Sum of Squared Errors = " + str(wssse))
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
# $example off$
spark.stop()
| apache-2.0 |
CodyTXR0KR/cyanideBot | cyanide_bot/bot_logic.py | 2 | 9045 | # -*- coding: utf-8 -*-
### cyanide_bot
### GNU/GPL v2
### Author: Cody Rocker
### Author_email: [email protected]
### 2016
#-----------------------------------
# Requires: """
# - Python 3 """
# - imgurpython """
#-----------------------------------
import sys
import urllib.request
import urllib.error
import re
import smtplib
import keyring
from socket import gethostname
# Email dependancies
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from time import strftime
from .imgur_api_client import Client
class ImgurBot():
def __init__(self, static):
self.static = static
self.debug = static.debugger()
self.load_settings()
self.client = Client(static).login()
self.debug.log('ImgurBot successfuly initialized.')
def load_settings(self):
self.debug.log('ImgurBot.load_settings()')
config = self.static.get_bot_settings()
self.botmail = config.get('messaging', 'botmail')
self.devmail = config.get('messaging', 'devmail')
self.messaging_enabled = config.getboolean('messaging', 'enabled')
# Email message to developer
def send_message(self, MODE, message):
self.debug.log('ImgurBot.send_message() :: sending {0}...'.format(MODE))
if self.messaging_enabled:
msg = MIMEMultipart()
msg['From'] = self.botmail
msg['To'] = self.devmail
if MODE == "message":
msg['Subject'] = '{0}.cyanideBot -- message'.format(gethostname())
text = MIMEText(
'cyanideBot.explosmdotnet posted an image to Imgur\n'
'%s' % message)
elif MODE == "error":
msg['Subject'] = '{0}.cyanideBot -- error'.format(gethostname())
text = MIMEText(
'cyanideBot.explosmdotnet failed with error:\n'
'%s' % str(message))
msg.attach(text)
try:
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.ehlo()
password = keyring.get_password('cyanide_bot', 'botmail')
server.login(self.botmail, password)
password = None
server.sendmail(self.botmail, self.devmail, msg.as_string())
server.quit()
except Exception as error:
self.debug.log_error('ImgurBot.send_message() :: Failed.', error)
sys.exit()
self.debug.log('ImgurBot.send_message() :: {0} sent.'.format(MODE))
else:
self.debug.log('ImgurBot.send_message() :: messaging disabled.')
###==========================================================###
### -- CYANIDE-BOT functions ###
### -- This is where specific behaviors should be modified. ###
###==========================================================###
def get_urls(self, random=False):
self.debug.log('ImgurBot.get_urls()')
self.urls = {} # define in local scope to ensure clean empty dict
## test if current comic is an amiated episode
if random == False:
response = urllib.request.urlopen('http://explosm.net')
html = response.read()
random = self.isAnimation(html)
try:
if random:
response = urllib.request.urlopen('http://explosm.net/comics/random')
html = response.read()
self.urls['imgUrl'] = 'http://{0}'.format(re.findall(
b'<img id="main-comic" src="//(.*?)"/>', html)[0].decode('utf-8'))
self.debug.log('Image Url: {0}'.format(self.urls['imgUrl']))
self.urls['permalinkUrl'] = re.findall(
b'<input id="permalink" type="text" value="(.*?)" onclick=',
html)[0].decode('utf-8')
self.debug.log('Permalink Url: {0}'.format(self.urls['permalinkUrl']))
else:
# response = urllib.request.urlopen('http://explosm.net')
# html = response.read()
self.urls['imgUrl'] = 'http://{0}'.format(re.findall(
b'<img id="featured-comic" src="//(.*?)"/></a>',
html)[0].decode('utf-8'))
self.debug.log('Image Url: {0}'.format(self.urls['imgUrl']))
self.urls['permalinkUrl'] = re.findall(
b'<input id="permalink" type="text" value="(.*?)" onclick=',
html)[0].decode('utf-8')
self.debug.log('Permalink Url: {0}'.format(self.urls['permalinkUrl']))
# self.urls['hotlinkUrl'] = 'http://explosm.net{0}'.format(re.findall(
# b'<a href="(.*?)"><img id="featured-comic" src="',
# html)[0].decode('utf-8'))
# self.debug.log('Hotlink Url: {0}'.format(self.urls['hotlinkUrl']))
except Exception as error:
self.send_message('error', error)
self.debug.log_error('ImgurBot.get_urls() :: Failed.', error)
sys.exit()
self.debug.log('ImgurBot.get_urls() :: Complete.')
return self.urls
def isAnimation(self, html):
isAnimation = False
try:
current_comic = re.findall(
b'<a href="(.*?)"><img id="featured-comic" src="//files.explosm.net/comics/.*"/></a>',
html)[0].decode('utf-8')
except Exception as e:
self.debug.log_error('ImgurBot.isAnimation threw an exception', e)
if '//explosm.net/show/episode/' in current_comic:
self.debug.log('ImgurBot.isAnimation :: animation found, switching to random')
self.urls['hotlinkUrl'] = current_comic
isAnimation = True
return isAnimation
def make_post(self, publish=False, tag_image=False, tag='', random=False):
self.debug.log('ImgurBot.make_post()')
# Fetch image and build post metadata
urls = self.get_urls(random=random)
meta = {}
meta['album'] = None
meta['name'] = None
meta['title'] = 'Daily dose of Cyanide for ' + get_date()
meta['description'] = (
'Permalink -- %s\nFind more at -- http://explosm.net' % (
urls['permalinkUrl']))
try: # imgur_api functionality
# Perform upload action from provided url
upload_response = self.upload_from_url(urls['imgUrl'], meta)
if publish:
# Publish image to gallery if -p, --publish is true
self.publish_to_gallery(
upload_response['item_id'], upload_response['title'])
if tag_image:
if tag is not '':
# Tag the image with user defined tag
self.tag_image(tag, upload_response['item_id'])
except Exception as error:
self.send_message('error', error)
self.debug.log_error('ImgurBot.make_post() :: Failed.', error)
sys.exit()
self.send_message('message', upload_response['link'])
self.debug.log('ImgurBot.make_post() :: Complete.')
def upload_from_url(self, url, meta):
self.debug.log('ImgurBot.upload_from_url()')
try:
upload_response = self.client.upload_from_url(url, meta, anon=False)
response = {}
response['item_id'] = upload_response['id']
response['title'] = upload_response['title']
response['link'] = upload_response['link']
except Exception as error:
self.debug.log_error('ImgurBot.upload_from_url() :: Failed', error)
sys.exit()
self.debug.log('ImgurBot.upload_from_url() :: Complete.')
return response
def publish_to_gallery(self, item_id, title):
self.debug.log('ImgurBot.publish_to_gallery()')
try:
# publish_response will be True if operation successful
publish_response = self.client.share_on_imgur(item_id, title)
except Exception as error:
self.debug.log_error('ImgurBot.publish_to_gallery() :: Failed.', error)
sys.exit()
self.debug.log('ImgurBot.publish_to_gallery() :: Complete.')
return publish_response
def tag_image(self, tag, item_id):
self.debug.log('ImgurBot.tag_image()')
try:
tag_response = self.client.gallery_tag_image(tag, item_id)
self.debug.log('tag_response=%s' % tag_response)
except Exception as error:
self.debug.log_error('ImgurBot.tag_image() :: Failed', error)
sys.exit()
self.debug.log('ImgurBot.tag_image() :: Image tagged with: [{0}]'.format(tag))
return tag_response
# Return formatted date string
def get_date():
return strftime('%b %d %Y') | gpl-2.0 |
denisshockwave/image_processing_ocr_server | venv/lib/python2.7/site-packages/flask_wtf/i18n.py | 117 | 1720 | # coding: utf-8
"""
flask_wtf.i18n
~~~~~~~~~~~~~~
Internationalization support for Flask WTF.
:copyright: (c) 2013 by Hsiaoming Yang.
"""
from flask import _request_ctx_stack
from flask_babel import get_locale
from babel import support
try:
from wtforms.i18n import messages_path
except ImportError:
from wtforms.ext.i18n.utils import messages_path
__all__ = ('Translations', 'translations')
def _get_translations():
"""Returns the correct gettext translations.
Copy from flask-babel with some modifications.
"""
ctx = _request_ctx_stack.top
if ctx is None:
return None
# babel should be in extensions for get_locale
if 'babel' not in ctx.app.extensions:
return None
translations = getattr(ctx, 'wtforms_translations', None)
if translations is None:
dirname = messages_path()
translations = support.Translations.load(
dirname, [get_locale()], domain='wtforms'
)
ctx.wtforms_translations = translations
return translations
class Translations(object):
def gettext(self, string):
t = _get_translations()
if t is None:
return string
if hasattr(t, 'ugettext'):
return t.ugettext(string)
# Python 3 has no ugettext
return t.gettext(string)
def ngettext(self, singular, plural, n):
t = _get_translations()
if t is None:
if n == 1:
return singular
return plural
if hasattr(t, 'ungettext'):
return t.ungettext(singular, plural, n)
# Python 3 has no ungettext
return t.ngettext(singular, plural, n)
translations = Translations()
| gpl-3.0 |
TobbeTripitaka/src | book/rsf/school/pydemo.py | 3 | 1693 | # ------------------------------------------------------------
# strings
a='StPetersburg'
len(a)
a[0]
a[4:7]
b=a+' '+'workshop'
print b
c=b+2014
c=b+' '+str(2014)
print c
# ------------------------------------------------------------
# lists
d = ['StPetersburg', 'workshop']
len(d)
print d[0]
print d[1]
d.append('2014')
print d
# ------------------------------------------------------------
# tuple = a sequence of immutable Python objects.
t = ('StPetersburg', 'workshop')
t = t + (2014,)
print t
# ------------------------------------------------------------
# dictionaries
e={'what':'workshop','where':'StPetersburg','when':2014}
print e
print e['where']+' '+e['what']+' '+str(e['when'])
f=dict(what='workshop',where='Melbourne',when=2013)
print f
print f['where']+' '+f['what']+' '+str(f['when'])
# ------------------------------------------------------------
# loops
for i in range(len(a)):
print a[i]
for i in range(len(d)):
print d[i]
for i in t:
print i
for key in e.keys():
print key,e[key]
# ------------------------------------------------------------
# conditional statements
for k in range(5):
if k < 2:
print k,'<2'
else:
print k,'>=2'
try:
b+2014
except:
print "error!"
# ------------------------------------------------------------
# functions
def m8rschool(year):
workshops=dict(StPetersburg=2014,Melbourne=2013)
for key in workshops.keys():
if workshops[key]==year:
return key
print m8rschool(2014)
def increment(a,b=5):
return a+b
# ------------------------------------------------------------
# modules
import math
x=math.sqrt(increment(4))
print x
| gpl-2.0 |
rohithredd94/Computer-Vision-using-OpenCV | Particle-Filter-Tracking/PF_Tracker.py | 1 | 4110 | import cv2
import numpy as mp
from similarity import *
from hist import *
class PF_Tracker:
def __init__(self, model, search_space, num_particles=100, state_dims=2,
control_std=10, sim_std=20, alpha=0.0):
self.model = model
self.search_space = search_space[::-1]
self.num_particles = num_particles
self.state_dims = state_dims
self.control_std = control_std
self.sim_std = sim_std
self.alpha = alpha
#Initialize particles using a uniform distribution
self.particles = np.array([np.random.uniform(0, self.search_space[i],self.num_particles) for i in range(self.state_dims)]).T
self.weights = np.ones(len(self.particles)) / len(self.particles)
self.idxs = np.arange(num_particles)
self.estimate_state()
def update(self, frame):
self.displace()
self.observe(frame)
self.resample()
self.estimate_state()
if self.alpha > 0:
self.update_model(frame)
def displace(self):
#Displace particles using a normal distribution centered around 0
self.particles += np.random.normal(0, self.control_std,
self.particles.shape)
def observe(self, img):
#Get patches corresponding to each particle
mh, mw = self.model.shape[:2]
minx = (self.particles[:,0] - mw/2).astype(np.int)
miny = (self.particles[:,1] - mh/2).astype(np.int)
candidates = [img[miny[i]:miny[i]+mh, minx[i]:minx[i]+mw]
for i in range(self.num_particles)]
#Compute importance weight - similarity of each patch to the model
self.weights = np.array([similarity(cand, self.model, self.sim_std) for cand in candidates])
self.weights /= np.sum(self.weights)
def resample(self):
sw, sh = self.search_space[:2]
mh, mw = self.model.shape[:2]
j = np.random.choice(self.idxs, self.num_particles, True,
p=self.weights.T) #Sample new particle indices using the distribution of the weights
control = np.random.normal(0, self.control_std, self.particles.shape) #Get a random control input from a normal distribution
self.particles = np.array(self.particles[j])
self.particles[:,0] = np.clip(self.particles[:,0], 0, sw - 1)
self.particles[:,1] = np.clip(self.particles[:,1], 0, sh - 1)
def estimate_state(self):
state_idx = np.random.choice(self.idxs, 1, p=self.weights)
self.state = self.particles[state_idx][0]
def update_model(self, frame):
#Get current model based on belief
mh, mw = self.model.shape[:2]
minx = int(self.state[0] - mw/2)
miny = int(self.state[1] - mh/2)
best_model = frame[miny:miny+mh, minx:minx+mw]
#Apply appearance model update if new model shape is unchanged
if best_model.shape == self.model.shape:
self.model = self.alpha * best_model + (1-self.alpha) * self.model
self.model = self.model.astype(np.uint8)
def visualize_filter(self, img):
self.draw_particles(img)
self.draw_window(img)
self.draw_std(img)
def draw_particles(self, img):
for p in self.particles:
cv2.circle(img, tuple(p.astype(int)), 2, (180,255,0), -1)
def draw_window(self, img):
best_idx = cv2.minMaxLoc(self.weights)[3][1]
best_state = self.particles[best_idx]
pt1 = (best_state - np.array(self.model.shape[::-1])/2).astype(np.int)
pt2 = pt1 + np.array(self.model.shape[::-1])
cv2.rectangle(img, tuple(pt1), tuple(pt2), (0,255,0), 2)
def draw_std(self, img):
weighted_sum = 0
dist = np.linalg.norm(self.particles - self.state)
weighted_sum = np.sum(dist * self.weights.reshape((-1,1)))
cv2.circle(img, tuple(self.state.astype(np.int)),
int(weighted_sum), (255,255,255), 1) | mit |
albertomurillo/ansible | test/integration/targets/module_precedence/lib_with_extension/ping.py | 320 | 2144 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
result['location'] = 'library'
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
weso/CWR-DataApi | tests/grammar/factory/record/test_publisher_territory.py | 1 | 4905 | # -*- coding: utf-8 -*-
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR Publisher Territory of Control (SPT) grammar tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestPublisherTerritoryGrammar(unittest.TestCase):
"""
Tests that the NPN grammar decodes correctly formatted strings
"""
def setUp(self):
self.grammar = get_record_grammar('publisher_territory')
def test_valid_common(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT000001790000054770 013330133301333I0484Y001'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(179, result.transaction_sequence_n)
self.assertEqual(547, result.record_sequence_n)
self.assertEqual('70', result.ip_n)
self.assertEqual(13.33, result.pr_collection_share)
self.assertEqual(13.33, result.mr_collection_share)
self.assertEqual(13.33, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(484, result.tis_numeric_code)
self.assertEqual(True, result.shares_change)
self.assertEqual(1, result.sequence_n)
def test_valid_common_short(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT0000000100000002160694172 050000500000000I0484N01'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(1, result.transaction_sequence_n)
self.assertEqual(2, result.record_sequence_n)
self.assertEqual('160694172', result.ip_n)
self.assertEqual(50, result.pr_collection_share)
self.assertEqual(50, result.mr_collection_share)
self.assertEqual(0, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(484, result.tis_numeric_code)
self.assertEqual(False, result.shares_change)
self.assertEqual(1, result.sequence_n)
def test_valid_full(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT0000123400000023A12345678 010120500002520I0008Y012'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual('A12345678', result.ip_n)
self.assertEqual(10.12, result.pr_collection_share)
self.assertEqual(50, result.mr_collection_share)
self.assertEqual(25.2, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(8, result.tis_numeric_code)
self.assertEqual(True, result.shares_change)
self.assertEqual(12, result.sequence_n)
def test_valid_min(self):
"""
Tests that Publisher Territory of Control grammar decodes correctly formatted record prefixes.
This test contains all the optional fields.
"""
record = 'SPT0000000100000001 000000000000000I0008Y012'
result = self.grammar.parseString(record)[0]
self.assertEqual('SPT', result.record_type)
self.assertEqual(1, result.transaction_sequence_n)
self.assertEqual(1, result.record_sequence_n)
self.assertEqual(None, result.ip_n)
self.assertEqual(0, result.pr_collection_share)
self.assertEqual(0, result.mr_collection_share)
self.assertEqual(0, result.sr_collection_share)
self.assertEqual('I', result.inclusion_exclusion_indicator)
self.assertEqual(8, result.tis_numeric_code)
self.assertEqual(True, result.shares_change)
self.assertEqual(12, result.sequence_n)
class TestPublisherTerritoryGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('publisher_territory')
def test_empty(self):
"""
Tests that a exception is thrown when the the works number is zero.
"""
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
| mit |
2014c2g19/2014c2g19 | w2/static/Brython2.0.0-20140209-164925/Lib/gc.py | 743 | 3548 | """This module provides access to the garbage collector for reference cycles.
enable() -- Enable automatic garbage collection.
disable() -- Disable automatic garbage collection.
isenabled() -- Returns true if automatic collection is enabled.
collect() -- Do a full collection right now.
get_count() -- Return the current collection counts.
set_debug() -- Set debugging flags.
get_debug() -- Get debugging flags.
set_threshold() -- Set the collection thresholds.
get_threshold() -- Return the current the collection thresholds.
get_objects() -- Return a list of all objects tracked by the collector.
is_tracked() -- Returns true if a given object is tracked.
get_referrers() -- Return the list of objects that refer to an object.
get_referents() -- Return the list of objects that an object refers to.
"""
DEBUG_COLLECTABLE = 2
DEBUG_LEAK = 38
DEBUG_SAVEALL = 32
DEBUG_STATS = 1
DEBUG_UNCOLLECTABLE = 4
class __loader__:
pass
callbacks = []
def collect(*args,**kw):
"""collect([generation]) -> n
With no arguments, run a full collection. The optional argument
may be an integer specifying which generation to collect. A ValueError
is raised if the generation number is invalid.
The number of unreachable objects is returned.
"""
pass
def disable(*args,**kw):
"""disable() -> None
Disable automatic garbage collection.
"""
pass
def enable(*args,**kw):
"""enable() -> None
Enable automatic garbage collection.
"""
pass
garbage = []
def get_count(*args,**kw):
"""get_count() -> (count0, count1, count2)
Return the current collection counts
"""
pass
def get_debug(*args,**kw):
"""get_debug() -> flags
Get the garbage collection debugging flags.
"""
pass
def get_objects(*args,**kw):
"""get_objects() -> [...]
Return a list of objects tracked by the collector (excluding the list
returned).
"""
pass
def get_referents(*args,**kw):
"""get_referents(*objs) -> list Return the list of objects that are directly referred to by objs."""
pass
def get_referrers(*args,**kw):
"""get_referrers(*objs) -> list Return the list of objects that directly refer to any of objs."""
pass
def get_threshold(*args,**kw):
"""get_threshold() -> (threshold0, threshold1, threshold2)
Return the current collection thresholds
"""
pass
def is_tracked(*args,**kw):
"""is_tracked(obj) -> bool
Returns true if the object is tracked by the garbage collector.
Simple atomic objects will return false.
"""
pass
def isenabled(*args,**kw):
"""isenabled() -> status
Returns true if automatic garbage collection is enabled.
"""
pass
def set_debug(*args,**kw):
"""set_debug(flags) -> None
Set the garbage collection debugging flags. Debugging information is
written to sys.stderr.
flags is an integer and can have the following bits turned on:
DEBUG_STATS - Print statistics during collection.
DEBUG_COLLECTABLE - Print collectable objects found.
DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.
DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
DEBUG_LEAK - Debug leaking programs (everything but STATS).
"""
pass
def set_threshold(*args,**kw):
"""set_threshold(threshold0, [threshold1, threshold2]) -> None
Sets the collection thresholds. Setting threshold0 to zero disables
collection.
"""
pass
| gpl-2.0 |
marcydoty/geraldo | site/newsite/site-geraldo/django/contrib/admindocs/views.py | 15 | 15909 | from django import template, templatetags
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import inspect, os, re
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
def get_root_path():
from django.contrib import admin
try:
return urlresolvers.reverse(admin.site.root, args=[''])
except urlresolvers.NoReverseMatch:
return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/")
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': get_root_path(),
}, context_instance=RequestContext(request))
doc_index = staff_member_required(doc_index)
def bookmarklets(request):
admin_root = get_root_path()
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
bookmarklets = staff_member_required(bookmarklets)
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
for module_name, library in template.libraries.items():
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': get_root_path(),
'tags': tags
}, context_instance=RequestContext(request))
template_tag_index = staff_member_required(template_tag_index)
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
for module_name, library in template.libraries.items():
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': get_root_path(),
'filters': filters
}, context_instance=RequestContext(request))
template_filter_index = staff_member_required(template_filter_index)
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [__import__(m, {}, {}, ['']) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = __import__(settings_mod.ROOT_URLCONF, {}, {}, [''])
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'name': func.__name__,
'module': func.__module__,
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': get_root_path(),
'views': views
}, context_instance=RequestContext(request))
view_index = staff_member_required(view_index)
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(__import__(mod, {}, {}, ['']), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': get_root_path(),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
view_detail = staff_member_required(view_detail)
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': get_root_path(),
'models': m_list
}, context_instance=RequestContext(request))
model_index = staff_member_required(model_index)
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404, _("App %r not found") % app_label
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404, _("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label}
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': get_root_path(),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
model_detail = staff_member_required(model_detail)
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = __import__(site_settings_module, {}, {}, [''])
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, "%s.html" % template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': get_root_path(),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
template_detail = staff_member_required(template_detail)
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for e in templatetags.__path__:
libraries = [os.path.splitext(p)[0] for p in os.listdir(e) if p.endswith('.py') and p[0].isalpha()]
for library_name in libraries:
try:
lib = template.get_library("django.templatetags.%s" % library_name.split('.')[-1])
except template.InvalidTemplateLibrary:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
# Maps Field objects to their human-readable data types, as strings.
# Column-type strings can contain format strings; they'll be interpolated
# against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
DATA_TYPE_MAPPING = {
'AutoField' : _('Integer'),
'BooleanField' : _('Boolean (Either True or False)'),
'CharField' : _('String (up to %(max_length)s)'),
'CommaSeparatedIntegerField': _('Comma-separated integers'),
'DateField' : _('Date (without time)'),
'DateTimeField' : _('Date (with time)'),
'DecimalField' : _('Decimal number'),
'EmailField' : _('E-mail address'),
'FileField' : _('File path'),
'FilePathField' : _('File path'),
'FloatField' : _('Floating point number'),
'ForeignKey' : _('Integer'),
'ImageField' : _('File path'),
'IntegerField' : _('Integer'),
'IPAddressField' : _('IP address'),
'ManyToManyField' : '',
'NullBooleanField' : _('Boolean (Either True, False or None)'),
'OneToOneField' : _('Relation to parent model'),
'PhoneNumberField' : _('Phone number'),
'PositiveIntegerField' : _('Integer'),
'PositiveSmallIntegerField' : _('Integer'),
'SlugField' : _('String (up to %(max_length)s)'),
'SmallIntegerField' : _('Integer'),
'TextField' : _('Text'),
'TimeField' : _('Time'),
'URLField' : _('URL'),
'USStateField' : _('U.S. state (two uppercase letters)'),
'XMLField' : _('XML text'),
}
def get_readable_field_data_type(field):
return DATA_TYPE_MAPPING[field.get_internal_type()] % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern))
except ViewDoesNotExist:
continue
elif hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
else:
raise TypeError, _("%s does not appear to be a urlpattern object") % p
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| lgpl-3.0 |
nwswanson/checkin | server/vendor/wtforms/utils.py | 194 | 1504 |
class UnsetValue(object):
"""
An unset value.
This is used in situations where a blank value like `None` is acceptable
usually as the default value of a class variable or function parameter
(iow, usually when `None` is a valid value.)
"""
def __str__(self):
return '<unset value>'
def __repr__(self):
return '<unset value>'
def __bool__(self):
return False
def __nonzero__(self):
return False
unset_value = UnsetValue()
class WebobInputWrapper(object):
"""
Wrap a webob MultiDict for use as passing as `formdata` to Field.
Since for consistency, we have decided in WTForms to support as input a
small subset of the API provided in common between cgi.FieldStorage,
Django's QueryDict, and Werkzeug's MultiDict, we need to wrap Webob, the
only supported framework whose multidict does not fit this API, but is
nevertheless used by a lot of frameworks.
While we could write a full wrapper to support all the methods, this will
undoubtedly result in bugs due to some subtle differences between the
various wrappers. So we will keep it simple.
"""
def __init__(self, multidict):
self._wrapped = multidict
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
def __contains__(self, name):
return (name in self._wrapped)
def getlist(self, name):
return self._wrapped.getall(name)
| mit |
akarki15/mozillians | vendor-local/lib/python/tablib/packages/openpyxl3/writer/strings.py | 55 | 2928 | # file openpyxl/writer/strings.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write the shared string table."""
# Python stdlib imports
from io import StringIO
# package imports
from ..shared.xmltools import start_tag, end_tag, tag, XMLGenerator
def create_string_table(workbook):
"""Compile the string table for a workbook."""
strings = set()
for sheet in workbook.worksheets:
for cell in sheet.get_cell_collection():
if cell.data_type == cell.TYPE_STRING and cell._value is not None:
strings.add(cell.value)
return dict((key, i) for i, key in enumerate(strings))
def write_string_table(string_table):
"""Write the string table xml."""
temp_buffer = StringIO()
doc = XMLGenerator(temp_buffer, 'utf-8')
start_tag(doc, 'sst', {'xmlns':
'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'uniqueCount': '%d' % len(string_table)})
strings_to_write = sorted(iter(string_table.items()),
key=lambda pair: pair[1])
for key in [pair[0] for pair in strings_to_write]:
start_tag(doc, 'si')
if key.strip() != key:
attr = {'xml:space': 'preserve'}
else:
attr = {}
tag(doc, 't', attr, key)
end_tag(doc, 'si')
end_tag(doc, 'sst')
string_table_xml = temp_buffer.getvalue()
temp_buffer.close()
return string_table_xml
class StringTableBuilder(object):
def __init__(self):
self.counter = 0
self.dct = {}
def add(self, key):
key = key.strip()
try:
return self.dct[key]
except KeyError:
res = self.dct[key] = self.counter
self.counter += 1
return res
def get_table(self):
return self.dct
| bsd-3-clause |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/test_coercion.py | 121 | 11399 | import copy
import unittest
from test.test_support import run_unittest, TestFailed, check_warnings
# Fake a number that implements numeric methods through __coerce__
class CoerceNumber:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return '<CoerceNumber %s>' % repr(self.arg)
def __coerce__(self, other):
if isinstance(other, CoerceNumber):
return self.arg, other.arg
else:
return (self.arg, other)
# New-style class version of CoerceNumber
class CoerceTo(object):
def __init__(self, arg):
self.arg = arg
def __coerce__(self, other):
if isinstance(other, CoerceTo):
return self.arg, other.arg
else:
return self.arg, other
# Fake a number that implements numeric ops through methods.
class MethodNumber:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<MethodNumber %s>' % repr(self.arg)
def __add__(self,other):
return self.arg + other
def __radd__(self,other):
return other + self.arg
def __sub__(self,other):
return self.arg - other
def __rsub__(self,other):
return other - self.arg
def __mul__(self,other):
return self.arg * other
def __rmul__(self,other):
return other * self.arg
def __div__(self,other):
return self.arg / other
def __rdiv__(self,other):
return other / self.arg
def __truediv__(self,other):
return self.arg / other
def __rtruediv__(self,other):
return other / self.arg
def __floordiv__(self,other):
return self.arg // other
def __rfloordiv__(self,other):
return other // self.arg
def __pow__(self,other):
return self.arg ** other
def __rpow__(self,other):
return other ** self.arg
def __mod__(self,other):
return self.arg % other
def __rmod__(self,other):
return other % self.arg
def __cmp__(self, other):
return cmp(self.arg, other)
candidates = [2, 2L, 4.0, 2+0j, [1], (2,), None,
MethodNumber(2), CoerceNumber(2)]
infix_binops = [ '+', '-', '*', '**', '%', '//', '/' ]
TE = TypeError
# b = both normal and augmented give same result list
# s = single result lists for normal and augmented
# e = equals other results
# result lists: ['+', '-', '*', '**', '%', '//', ('classic /', 'new /')]
# ^^^^^^^^^^^^^^^^^^^^^^
# 2-tuple if results differ
# else only one value
infix_results = {
# 2
(0,0): ('b', [4, 0, 4, 4, 0, 1, (1, 1.0)]),
(0,1): ('e', (0,0)),
(0,2): ('b', [6.0, -2.0, 8.0, 16.0, 2.0, 0.0, 0.5]),
(0,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(0,4): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
(0,5): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
(0,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(0,7): ('e', (0,0)),
(0,8): ('e', (0,0)),
# 2L
(1,0): ('e', (0,0)),
(1,1): ('e', (0,1)),
(1,2): ('e', (0,2)),
(1,3): ('e', (0,3)),
(1,4): ('e', (0,4)),
(1,5): ('e', (0,5)),
(1,6): ('e', (0,6)),
(1,7): ('e', (0,7)),
(1,8): ('e', (0,8)),
# 4.0
(2,0): ('b', [6.0, 2.0, 8.0, 16.0, 0.0, 2.0, 2.0]),
(2,1): ('e', (2,0)),
(2,2): ('b', [8.0, 0.0, 16.0, 256.0, 0.0, 1.0, 1.0]),
(2,3): ('b', [6+0j, 2+0j, 8+0j, 16+0j, 0+0j, 2+0j, 2+0j]),
(2,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(2,5): ('e', (2,4)),
(2,6): ('e', (2,4)),
(2,7): ('e', (2,0)),
(2,8): ('e', (2,0)),
# (2+0j)
(3,0): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(3,1): ('e', (3,0)),
(3,2): ('b', [6+0j, -2+0j, 8+0j, 16+0j, 2+0j, 0+0j, 0.5+0j]),
(3,3): ('b', [4+0j, 0+0j, 4+0j, 4+0j, 0+0j, 1+0j, 1+0j]),
(3,4): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(3,5): ('e', (3,4)),
(3,6): ('e', (3,4)),
(3,7): ('e', (3,0)),
(3,8): ('e', (3,0)),
# [1]
(4,0): ('b', [TE, TE, [1, 1], TE, TE, TE, TE]),
(4,1): ('e', (4,0)),
(4,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,3): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,4): ('b', [[1, 1], TE, TE, TE, TE, TE, TE]),
(4,5): ('s', [TE, TE, TE, TE, TE, TE, TE], [[1, 2], TE, TE, TE, TE, TE, TE]),
(4,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(4,7): ('e', (4,0)),
(4,8): ('e', (4,0)),
# (2,)
(5,0): ('b', [TE, TE, (2, 2), TE, TE, TE, TE]),
(5,1): ('e', (5,0)),
(5,2): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(5,3): ('e', (5,2)),
(5,4): ('e', (5,2)),
(5,5): ('b', [(2, 2), TE, TE, TE, TE, TE, TE]),
(5,6): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(5,7): ('e', (5,0)),
(5,8): ('e', (5,0)),
# None
(6,0): ('b', [TE, TE, TE, TE, TE, TE, TE]),
(6,1): ('e', (6,0)),
(6,2): ('e', (6,0)),
(6,3): ('e', (6,0)),
(6,4): ('e', (6,0)),
(6,5): ('e', (6,0)),
(6,6): ('e', (6,0)),
(6,7): ('e', (6,0)),
(6,8): ('e', (6,0)),
# MethodNumber(2)
(7,0): ('e', (0,0)),
(7,1): ('e', (0,1)),
(7,2): ('e', (0,2)),
(7,3): ('e', (0,3)),
(7,4): ('e', (0,4)),
(7,5): ('e', (0,5)),
(7,6): ('e', (0,6)),
(7,7): ('e', (0,7)),
(7,8): ('e', (0,8)),
# CoerceNumber(2)
(8,0): ('e', (0,0)),
(8,1): ('e', (0,1)),
(8,2): ('e', (0,2)),
(8,3): ('e', (0,3)),
(8,4): ('e', (0,4)),
(8,5): ('e', (0,5)),
(8,6): ('e', (0,6)),
(8,7): ('e', (0,7)),
(8,8): ('e', (0,8)),
}
def process_infix_results():
for key in sorted(infix_results):
val = infix_results[key]
if val[0] == 'e':
infix_results[key] = infix_results[val[1]]
else:
if val[0] == 's':
res = (val[1], val[2])
elif val[0] == 'b':
res = (val[1], val[1])
for i in range(1):
if isinstance(res[i][6], tuple):
if 1/2 == 0:
# testing with classic (floor) division
res[i][6] = res[i][6][0]
else:
# testing with -Qnew
res[i][6] = res[i][6][1]
infix_results[key] = res
with check_warnings(("classic (int|long) division", DeprecationWarning),
quiet=True):
process_infix_results()
# now infix_results has two lists of results for every pairing.
prefix_binops = [ 'divmod' ]
prefix_results = [
[(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)],
[(1L,0L), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1L,0L)],
[(2.0,0.0), (2.0,0.0), (1.0,0.0), ((2+0j),0j), TE, TE, TE, TE, (2.0,0.0)],
[((1+0j),0j), ((1+0j),0j), (0j,(2+0j)), ((1+0j),0j), TE, TE, TE, TE, ((1+0j),0j)],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[TE, TE, TE, TE, TE, TE, TE, TE, TE],
[(1,0), (1L,0L), (0.0,2.0), ((1+0j),0j), TE, TE, TE, TE, (1,0)]
]
def format_float(value):
if abs(value) < 0.01:
return '0.0'
else:
return '%.1f' % value
# avoid testing platform fp quirks
def format_result(value):
if isinstance(value, complex):
return '(%s + %sj)' % (format_float(value.real),
format_float(value.imag))
elif isinstance(value, float):
return format_float(value)
return str(value)
class CoercionTest(unittest.TestCase):
def test_infix_binops(self):
for ia, a in enumerate(candidates):
for ib, b in enumerate(candidates):
results = infix_results[(ia, ib)]
for op, res, ires in zip(infix_binops, results[0], results[1]):
if res is TE:
self.assertRaises(TypeError, eval,
'a %s b' % op, {'a': a, 'b': b})
else:
self.assertEqual(format_result(res),
format_result(eval('a %s b' % op)),
'%s %s %s == %s failed' % (a, op, b, res))
try:
z = copy.copy(a)
except copy.Error:
z = a # assume it has no inplace ops
if ires is TE:
try:
exec 'z %s= b' % op
except TypeError:
pass
else:
self.fail("TypeError not raised")
else:
exec('z %s= b' % op)
self.assertEqual(ires, z)
def test_prefix_binops(self):
for ia, a in enumerate(candidates):
for ib, b in enumerate(candidates):
for op in prefix_binops:
res = prefix_results[ia][ib]
if res is TE:
self.assertRaises(TypeError, eval,
'%s(a, b)' % op, {'a': a, 'b': b})
else:
self.assertEqual(format_result(res),
format_result(eval('%s(a, b)' % op)),
'%s(%s, %s) == %s failed' % (op, a, b, res))
def test_cmptypes(self):
# Built-in tp_compare slots expect their arguments to have the
# same type, but a user-defined __coerce__ doesn't have to obey.
# SF #980352
evil_coercer = CoerceTo(42)
# Make sure these don't crash any more
self.assertNotEqual(cmp(u'fish', evil_coercer), 0)
self.assertNotEqual(cmp(slice(1), evil_coercer), 0)
# ...but that this still works
class WackyComparer(object):
def __cmp__(slf, other):
self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other)
return 0
__hash__ = None # Invalid cmp makes this unhashable
self.assertEqual(cmp(WackyComparer(), evil_coercer), 0)
# ...and classic classes too, since that code path is a little different
class ClassicWackyComparer:
def __cmp__(slf, other):
self.assertTrue(other == 42, 'expected evil_coercer, got %r' % other)
return 0
self.assertEqual(cmp(ClassicWackyComparer(), evil_coercer), 0)
def test_infinite_rec_classic_classes(self):
# if __coerce__() returns its arguments reversed it causes an infinite
# recursion for classic classes.
class Tester:
def __coerce__(self, other):
return other, self
exc = TestFailed("__coerce__() returning its arguments reverse "
"should raise RuntimeError")
try:
Tester() + 1
except (RuntimeError, TypeError):
return
except:
raise exc
else:
raise exc
def test_main():
with check_warnings(("complex divmod.., // and % are deprecated",
DeprecationWarning),
("classic (int|long) division", DeprecationWarning),
quiet=True):
run_unittest(CoercionTest)
if __name__ == "__main__":
test_main()
| mit |
garg10may/youtube-dl | youtube_dl/extractor/dcn.py | 34 | 2915 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
int_or_none,
parse_iso8601,
)
class DCNIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)'
_TEST = {
'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887',
'info_dict':
{
'id': '17375',
'ext': 'mp4',
'title': 'رحلة العمر : الحلقة 1',
'description': 'md5:0156e935d870acb8ef0a66d24070c6d6',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 2041,
'timestamp': 1227504126,
'upload_date': '20081124',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
request = compat_urllib_request.Request(
'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
headers={'Origin': 'http://www.dcndigital.ae'})
video = self._download_json(request, video_id)
title = video.get('title_en') or video['title_ar']
webpage = self._download_webpage(
'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?'
+ compat_urllib_parse.urlencode({
'id': video['id'],
'user_id': video['user_id'],
'signature': video['signature'],
'countries': 'Q0M=',
'filter': 'DENY',
}), video_id)
m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url')
formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
rtsp_url = self._search_regex(
r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False)
if rtsp_url:
formats.append({
'url': rtsp_url,
'format_id': 'rtsp',
})
self._sort_formats(formats)
img = video.get('img')
thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None
duration = int_or_none(video.get('duration'))
description = video.get('description_en') or video.get('description_ar')
timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
}
| unlicense |
ingadhoc/odoo | openerp/osv/orm.py | 199 | 6222 | import simplejson
from lxml import etree
from ..exceptions import except_orm
from ..models import (
MetaModel,
BaseModel,
Model, TransientModel, AbstractModel,
MAGIC_COLUMNS,
LOG_ACCESS_COLUMNS,
)
from openerp.tools.safe_eval import safe_eval as eval
# extra definitions for backward compatibility
browse_record_list = BaseModel
class browse_record(object):
""" Pseudo-class for testing record instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and len(inst) <= 1
class browse_null(object):
""" Pseudo-class for testing null instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and not inst
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
| agpl-3.0 |
ychfan/tensorflow | tensorflow/contrib/labeled_tensor/__init__.py | 144 | 4001 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Labels for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.labeled_tensor.python.ops import core as _core
from tensorflow.contrib.labeled_tensor.python.ops import io_ops as _io_ops
from tensorflow.contrib.labeled_tensor.python.ops import nn
from tensorflow.contrib.labeled_tensor.python.ops import ops as _ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar as _sugar
# pylint: disable=invalid-name
# Core types.
Axis = _core.Axis
Axes = _core.Axes
LabeledTensor = _core.LabeledTensor
as_axis = _core.as_axis
convert_to_labeled_tensor = _core.convert_to_labeled_tensor
identity = _core.identity
slice = _core.slice_function # pylint: disable=redefined-builtin
transpose = _core.transpose
expand_dims = _core.expand_dims
align = _core.align
axis_order_scope = _core.axis_order_scope
check_axis_order = _core.check_axis_order
impose_axis_order = _core.impose_axis_order
AxisOrderError = _core.AxisOrderError
define_unary_op = _core.define_unary_op
define_binary_op = _core.define_binary_op
define_reduce_op = _ops.define_reduce_op
abs = _core.abs_function # pylint: disable=redefined-builtin
neg = _core.neg
sign = _core.sign
reciprocal = _core.reciprocal
square = _core.square
round = _core.round_function # pylint: disable=redefined-builtin
sqrt = _core.sqrt
rsqrt = _core.rsqrt
exp = _core.exp
log = _core.log
ceil = _core.ceil
floor = _core.floor
cos = _core.cos
sin = _core.sin
tan = _core.tan
acos = _core.acos
asin = _core.asin
atan = _core.atan
lgamma = _core.lgamma
digamma = _core.digamma
erf = _core.erf
erfc = _core.erfc
logical_not = _core.logical_not
tanh = _core.tanh
sigmoid = _core.sigmoid
add = _core.add
sub = _core.sub
mul = _core.mul
div = _core.div
mod = _core.mod
pow = _core.pow_function # pylint: disable=redefined-builtin
equal = _core.equal
greater = _core.greater
greater_equal = _core.greater_equal
not_equal = _core.not_equal
less = _core.less
less_equal = _core.less_equal
logical_and = _core.logical_and
logical_or = _core.logical_or
logical_xor = _core.logical_xor
maximum = _core.maximum
minimum = _core.minimum
squared_difference = _core.squared_difference
igamma = _core.igamma
igammac = _core.igammac
zeta = _core.zeta
polygamma = _core.polygamma
select = _ops.select
concat = _ops.concat
pack = _ops.pack
unpack = _ops.unpack
reshape = _ops.reshape
rename_axis = _ops.rename_axis
random_crop = _ops.random_crop
map_fn = _ops.map_fn
foldl = _ops.foldl
squeeze = _ops.squeeze
matmul = _ops.matmul
tile = _ops.tile
pad = _ops.pad
constant = _ops.constant
zeros_like = _ops.zeros_like
ones_like = _ops.ones_like
cast = _ops.cast
verify_tensor_all_finite = _ops.verify_tensor_all_finite
boolean_mask = _ops.boolean_mask
where = _ops.where
reduce_all = _ops.reduce_all
reduce_any = _ops.reduce_any
reduce_logsumexp = _ops.reduce_logsumexp
reduce_max = _ops.reduce_max
reduce_mean = _ops.reduce_mean
reduce_min = _ops.reduce_min
reduce_prod = _ops.reduce_prod
reduce_sum = _ops.reduce_sum
batch = _ops.batch
shuffle_batch = _ops.shuffle_batch
FixedLenFeature = _io_ops.FixedLenFeature
parse_example = _io_ops.parse_example
parse_single_example = _io_ops.parse_single_example
placeholder = _io_ops.placeholder
ReshapeCoder = _sugar.ReshapeCoder
| apache-2.0 |
jwlawson/tensorflow | tensorflow/contrib/py2tf/api_test.py | 3 | 2115 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.py2tf import api
from tensorflow.contrib.py2tf import config
from tensorflow.contrib.py2tf.pyct import parser
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ApiTest(test.TestCase):
def test_to_graph_basic(self):
def test_fn(x, s):
while math_ops.reduce_sum(x) > s:
x //= 2
return x
config.DEFAULT_UNCOMPILED_MODULES.add((math_ops.__name__,))
config.COMPILED_IMPORT_STATEMENTS = (
'from tensorflow.python.ops '
'import control_flow_ops as tf',
)
compiled_fn = api.to_graph(test_fn)
with self.test_session() as sess:
x = compiled_fn(constant_op.constant([4, 8]), 4)
self.assertListEqual([1, 2], sess.run(x).tolist())
def test_to_code_basic(self):
def test_fn(x, s):
while math_ops.reduce_sum(x) > s:
x /= 2
return x
config.DEFAULT_UNCOMPILED_MODULES.add((math_ops.__name__,))
compiled_code = api.to_code(test_fn)
# Just check for some key words and that it is parseable Python code.
self.assertRegexpMatches(compiled_code, 'tf\\.while_loop')
self.assertIsNotNone(parser.parse_str(compiled_code))
if __name__ == '__main__':
test.main()
| apache-2.0 |
timduru/platform-external-chromium_org | tools/symsrc/pefile.py | 187 | 139621 | # -*- coding: Latin-1 -*-
"""pefile, Portable Executable reader module
All the PE file basic structures are available with their default names
as attributes of the instance returned.
Processed elements such as the import table are made available with lowercase
names, to differentiate them from the upper case basic structure names.
pefile has been tested against the limits of valid PE headers, that is, malware.
Lots of packed malware attempt to abuse the format way beyond its standard use.
To the best of my knowledge most of the abuses are handled gracefully.
Copyright (c) 2005, 2006, 2007, 2008 Ero Carrera <[email protected]>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
__author__ = 'Ero Carrera'
__version__ = '1.2.9.1'
__contact__ = '[email protected]'
import os
import struct
import time
import math
import re
import exceptions
import string
import array
sha1, sha256, sha512, md5 = None, None, None, None
try:
import hashlib
sha1 = hashlib.sha1
sha256 = hashlib.sha256
sha512 = hashlib.sha512
md5 = hashlib.md5
except ImportError:
try:
import sha
sha1 = sha.new
except ImportError:
pass
try:
import md5
md5 = md5.new
except ImportError:
pass
fast_load = False
IMAGE_DOS_SIGNATURE = 0x5A4D
IMAGE_OS2_SIGNATURE = 0x454E
IMAGE_OS2_SIGNATURE_LE = 0x454C
IMAGE_VXD_SIGNATURE = 0x454C
IMAGE_NT_SIGNATURE = 0x00004550
IMAGE_NUMBEROF_DIRECTORY_ENTRIES= 16
IMAGE_ORDINAL_FLAG = 0x80000000L
IMAGE_ORDINAL_FLAG64 = 0x8000000000000000L
OPTIONAL_HEADER_MAGIC_PE = 0x10b
OPTIONAL_HEADER_MAGIC_PE_PLUS = 0x20b
directory_entry_types = [
('IMAGE_DIRECTORY_ENTRY_EXPORT', 0),
('IMAGE_DIRECTORY_ENTRY_IMPORT', 1),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', 2),
('IMAGE_DIRECTORY_ENTRY_EXCEPTION', 3),
('IMAGE_DIRECTORY_ENTRY_SECURITY', 4),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', 5),
('IMAGE_DIRECTORY_ENTRY_DEBUG', 6),
('IMAGE_DIRECTORY_ENTRY_COPYRIGHT', 7),
('IMAGE_DIRECTORY_ENTRY_GLOBALPTR', 8),
('IMAGE_DIRECTORY_ENTRY_TLS', 9),
('IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG', 10),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', 11),
('IMAGE_DIRECTORY_ENTRY_IAT', 12),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', 13),
('IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR',14),
('IMAGE_DIRECTORY_ENTRY_RESERVED', 15) ]
DIRECTORY_ENTRY = dict([(e[1], e[0]) for e in directory_entry_types]+directory_entry_types)
image_characteristics = [
('IMAGE_FILE_RELOCS_STRIPPED', 0x0001),
('IMAGE_FILE_EXECUTABLE_IMAGE', 0x0002),
('IMAGE_FILE_LINE_NUMS_STRIPPED', 0x0004),
('IMAGE_FILE_LOCAL_SYMS_STRIPPED', 0x0008),
('IMAGE_FILE_AGGRESIVE_WS_TRIM', 0x0010),
('IMAGE_FILE_LARGE_ADDRESS_AWARE', 0x0020),
('IMAGE_FILE_16BIT_MACHINE', 0x0040),
('IMAGE_FILE_BYTES_REVERSED_LO', 0x0080),
('IMAGE_FILE_32BIT_MACHINE', 0x0100),
('IMAGE_FILE_DEBUG_STRIPPED', 0x0200),
('IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP', 0x0400),
('IMAGE_FILE_NET_RUN_FROM_SWAP', 0x0800),
('IMAGE_FILE_SYSTEM', 0x1000),
('IMAGE_FILE_DLL', 0x2000),
('IMAGE_FILE_UP_SYSTEM_ONLY', 0x4000),
('IMAGE_FILE_BYTES_REVERSED_HI', 0x8000) ]
IMAGE_CHARACTERISTICS = dict([(e[1], e[0]) for e in
image_characteristics]+image_characteristics)
section_characteristics = [
('IMAGE_SCN_CNT_CODE', 0x00000020),
('IMAGE_SCN_CNT_INITIALIZED_DATA', 0x00000040),
('IMAGE_SCN_CNT_UNINITIALIZED_DATA', 0x00000080),
('IMAGE_SCN_LNK_OTHER', 0x00000100),
('IMAGE_SCN_LNK_INFO', 0x00000200),
('IMAGE_SCN_LNK_REMOVE', 0x00000800),
('IMAGE_SCN_LNK_COMDAT', 0x00001000),
('IMAGE_SCN_MEM_FARDATA', 0x00008000),
('IMAGE_SCN_MEM_PURGEABLE', 0x00020000),
('IMAGE_SCN_MEM_16BIT', 0x00020000),
('IMAGE_SCN_MEM_LOCKED', 0x00040000),
('IMAGE_SCN_MEM_PRELOAD', 0x00080000),
('IMAGE_SCN_ALIGN_1BYTES', 0x00100000),
('IMAGE_SCN_ALIGN_2BYTES', 0x00200000),
('IMAGE_SCN_ALIGN_4BYTES', 0x00300000),
('IMAGE_SCN_ALIGN_8BYTES', 0x00400000),
('IMAGE_SCN_ALIGN_16BYTES', 0x00500000),
('IMAGE_SCN_ALIGN_32BYTES', 0x00600000),
('IMAGE_SCN_ALIGN_64BYTES', 0x00700000),
('IMAGE_SCN_ALIGN_128BYTES', 0x00800000),
('IMAGE_SCN_ALIGN_256BYTES', 0x00900000),
('IMAGE_SCN_ALIGN_512BYTES', 0x00A00000),
('IMAGE_SCN_ALIGN_1024BYTES', 0x00B00000),
('IMAGE_SCN_ALIGN_2048BYTES', 0x00C00000),
('IMAGE_SCN_ALIGN_4096BYTES', 0x00D00000),
('IMAGE_SCN_ALIGN_8192BYTES', 0x00E00000),
('IMAGE_SCN_ALIGN_MASK', 0x00F00000),
('IMAGE_SCN_LNK_NRELOC_OVFL', 0x01000000),
('IMAGE_SCN_MEM_DISCARDABLE', 0x02000000),
('IMAGE_SCN_MEM_NOT_CACHED', 0x04000000),
('IMAGE_SCN_MEM_NOT_PAGED', 0x08000000),
('IMAGE_SCN_MEM_SHARED', 0x10000000),
('IMAGE_SCN_MEM_EXECUTE', 0x20000000),
('IMAGE_SCN_MEM_READ', 0x40000000),
('IMAGE_SCN_MEM_WRITE', 0x80000000L) ]
SECTION_CHARACTERISTICS = dict([(e[1], e[0]) for e in
section_characteristics]+section_characteristics)
debug_types = [
('IMAGE_DEBUG_TYPE_UNKNOWN', 0),
('IMAGE_DEBUG_TYPE_COFF', 1),
('IMAGE_DEBUG_TYPE_CODEVIEW', 2),
('IMAGE_DEBUG_TYPE_FPO', 3),
('IMAGE_DEBUG_TYPE_MISC', 4),
('IMAGE_DEBUG_TYPE_EXCEPTION', 5),
('IMAGE_DEBUG_TYPE_FIXUP', 6),
('IMAGE_DEBUG_TYPE_OMAP_TO_SRC', 7),
('IMAGE_DEBUG_TYPE_OMAP_FROM_SRC', 8),
('IMAGE_DEBUG_TYPE_BORLAND', 9),
('IMAGE_DEBUG_TYPE_RESERVED10', 10) ]
DEBUG_TYPE = dict([(e[1], e[0]) for e in debug_types]+debug_types)
subsystem_types = [
('IMAGE_SUBSYSTEM_UNKNOWN', 0),
('IMAGE_SUBSYSTEM_NATIVE', 1),
('IMAGE_SUBSYSTEM_WINDOWS_GUI', 2),
('IMAGE_SUBSYSTEM_WINDOWS_CUI', 3),
('IMAGE_SUBSYSTEM_OS2_CUI', 5),
('IMAGE_SUBSYSTEM_POSIX_CUI', 7),
('IMAGE_SUBSYSTEM_WINDOWS_CE_GUI', 9),
('IMAGE_SUBSYSTEM_EFI_APPLICATION', 10),
('IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER', 11),
('IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER', 12),
('IMAGE_SUBSYSTEM_EFI_ROM', 13),
('IMAGE_SUBSYSTEM_XBOX', 14)]
SUBSYSTEM_TYPE = dict([(e[1], e[0]) for e in subsystem_types]+subsystem_types)
machine_types = [
('IMAGE_FILE_MACHINE_UNKNOWN', 0),
('IMAGE_FILE_MACHINE_AM33', 0x1d3),
('IMAGE_FILE_MACHINE_AMD64', 0x8664),
('IMAGE_FILE_MACHINE_ARM', 0x1c0),
('IMAGE_FILE_MACHINE_EBC', 0xebc),
('IMAGE_FILE_MACHINE_I386', 0x14c),
('IMAGE_FILE_MACHINE_IA64', 0x200),
('IMAGE_FILE_MACHINE_MR32', 0x9041),
('IMAGE_FILE_MACHINE_MIPS16', 0x266),
('IMAGE_FILE_MACHINE_MIPSFPU', 0x366),
('IMAGE_FILE_MACHINE_MIPSFPU16',0x466),
('IMAGE_FILE_MACHINE_POWERPC', 0x1f0),
('IMAGE_FILE_MACHINE_POWERPCFP',0x1f1),
('IMAGE_FILE_MACHINE_R4000', 0x166),
('IMAGE_FILE_MACHINE_SH3', 0x1a2),
('IMAGE_FILE_MACHINE_SH3DSP', 0x1a3),
('IMAGE_FILE_MACHINE_SH4', 0x1a6),
('IMAGE_FILE_MACHINE_SH5', 0x1a8),
('IMAGE_FILE_MACHINE_THUMB', 0x1c2),
('IMAGE_FILE_MACHINE_WCEMIPSV2',0x169),
]
MACHINE_TYPE = dict([(e[1], e[0]) for e in machine_types]+machine_types)
relocation_types = [
('IMAGE_REL_BASED_ABSOLUTE', 0),
('IMAGE_REL_BASED_HIGH', 1),
('IMAGE_REL_BASED_LOW', 2),
('IMAGE_REL_BASED_HIGHLOW', 3),
('IMAGE_REL_BASED_HIGHADJ', 4),
('IMAGE_REL_BASED_MIPS_JMPADDR', 5),
('IMAGE_REL_BASED_SECTION', 6),
('IMAGE_REL_BASED_REL', 7),
('IMAGE_REL_BASED_MIPS_JMPADDR16', 9),
('IMAGE_REL_BASED_IA64_IMM64', 9),
('IMAGE_REL_BASED_DIR64', 10),
('IMAGE_REL_BASED_HIGH3ADJ', 11) ]
RELOCATION_TYPE = dict([(e[1], e[0]) for e in relocation_types]+relocation_types)
dll_characteristics = [
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0001', 0x0001),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0002', 0x0002),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0004', 0x0004),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x0008', 0x0008),
('IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE', 0x0040),
('IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY', 0x0080),
('IMAGE_DLL_CHARACTERISTICS_NX_COMPAT', 0x0100),
('IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION', 0x0200),
('IMAGE_DLL_CHARACTERISTICS_NO_SEH', 0x0400),
('IMAGE_DLL_CHARACTERISTICS_NO_BIND', 0x0800),
('IMAGE_DLL_CHARACTERISTICS_RESERVED_0x1000', 0x1000),
('IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER', 0x2000),
('IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE', 0x8000) ]
DLL_CHARACTERISTICS = dict([(e[1], e[0]) for e in dll_characteristics]+dll_characteristics)
# Resource types
resource_type = [
('RT_CURSOR', 1),
('RT_BITMAP', 2),
('RT_ICON', 3),
('RT_MENU', 4),
('RT_DIALOG', 5),
('RT_STRING', 6),
('RT_FONTDIR', 7),
('RT_FONT', 8),
('RT_ACCELERATOR', 9),
('RT_RCDATA', 10),
('RT_MESSAGETABLE', 11),
('RT_GROUP_CURSOR', 12),
('RT_GROUP_ICON', 14),
('RT_VERSION', 16),
('RT_DLGINCLUDE', 17),
('RT_PLUGPLAY', 19),
('RT_VXD', 20),
('RT_ANICURSOR', 21),
('RT_ANIICON', 22),
('RT_HTML', 23),
('RT_MANIFEST', 24) ]
RESOURCE_TYPE = dict([(e[1], e[0]) for e in resource_type]+resource_type)
# Language definitions
lang = [
('LANG_NEUTRAL', 0x00),
('LANG_INVARIANT', 0x7f),
('LANG_AFRIKAANS', 0x36),
('LANG_ALBANIAN', 0x1c),
('LANG_ARABIC', 0x01),
('LANG_ARMENIAN', 0x2b),
('LANG_ASSAMESE', 0x4d),
('LANG_AZERI', 0x2c),
('LANG_BASQUE', 0x2d),
('LANG_BELARUSIAN', 0x23),
('LANG_BENGALI', 0x45),
('LANG_BULGARIAN', 0x02),
('LANG_CATALAN', 0x03),
('LANG_CHINESE', 0x04),
('LANG_CROATIAN', 0x1a),
('LANG_CZECH', 0x05),
('LANG_DANISH', 0x06),
('LANG_DIVEHI', 0x65),
('LANG_DUTCH', 0x13),
('LANG_ENGLISH', 0x09),
('LANG_ESTONIAN', 0x25),
('LANG_FAEROESE', 0x38),
('LANG_FARSI', 0x29),
('LANG_FINNISH', 0x0b),
('LANG_FRENCH', 0x0c),
('LANG_GALICIAN', 0x56),
('LANG_GEORGIAN', 0x37),
('LANG_GERMAN', 0x07),
('LANG_GREEK', 0x08),
('LANG_GUJARATI', 0x47),
('LANG_HEBREW', 0x0d),
('LANG_HINDI', 0x39),
('LANG_HUNGARIAN', 0x0e),
('LANG_ICELANDIC', 0x0f),
('LANG_INDONESIAN', 0x21),
('LANG_ITALIAN', 0x10),
('LANG_JAPANESE', 0x11),
('LANG_KANNADA', 0x4b),
('LANG_KASHMIRI', 0x60),
('LANG_KAZAK', 0x3f),
('LANG_KONKANI', 0x57),
('LANG_KOREAN', 0x12),
('LANG_KYRGYZ', 0x40),
('LANG_LATVIAN', 0x26),
('LANG_LITHUANIAN', 0x27),
('LANG_MACEDONIAN', 0x2f),
('LANG_MALAY', 0x3e),
('LANG_MALAYALAM', 0x4c),
('LANG_MANIPURI', 0x58),
('LANG_MARATHI', 0x4e),
('LANG_MONGOLIAN', 0x50),
('LANG_NEPALI', 0x61),
('LANG_NORWEGIAN', 0x14),
('LANG_ORIYA', 0x48),
('LANG_POLISH', 0x15),
('LANG_PORTUGUESE', 0x16),
('LANG_PUNJABI', 0x46),
('LANG_ROMANIAN', 0x18),
('LANG_RUSSIAN', 0x19),
('LANG_SANSKRIT', 0x4f),
('LANG_SERBIAN', 0x1a),
('LANG_SINDHI', 0x59),
('LANG_SLOVAK', 0x1b),
('LANG_SLOVENIAN', 0x24),
('LANG_SPANISH', 0x0a),
('LANG_SWAHILI', 0x41),
('LANG_SWEDISH', 0x1d),
('LANG_SYRIAC', 0x5a),
('LANG_TAMIL', 0x49),
('LANG_TATAR', 0x44),
('LANG_TELUGU', 0x4a),
('LANG_THAI', 0x1e),
('LANG_TURKISH', 0x1f),
('LANG_UKRAINIAN', 0x22),
('LANG_URDU', 0x20),
('LANG_UZBEK', 0x43),
('LANG_VIETNAMESE', 0x2a),
('LANG_GAELIC', 0x3c),
('LANG_MALTESE', 0x3a),
('LANG_MAORI', 0x28),
('LANG_RHAETO_ROMANCE',0x17),
('LANG_SAAMI', 0x3b),
('LANG_SORBIAN', 0x2e),
('LANG_SUTU', 0x30),
('LANG_TSONGA', 0x31),
('LANG_TSWANA', 0x32),
('LANG_VENDA', 0x33),
('LANG_XHOSA', 0x34),
('LANG_ZULU', 0x35),
('LANG_ESPERANTO', 0x8f),
('LANG_WALON', 0x90),
('LANG_CORNISH', 0x91),
('LANG_WELSH', 0x92),
('LANG_BRETON', 0x93) ]
LANG = dict(lang+[(e[1], e[0]) for e in lang])
# Sublanguage definitions
sublang = [
('SUBLANG_NEUTRAL', 0x00),
('SUBLANG_DEFAULT', 0x01),
('SUBLANG_SYS_DEFAULT', 0x02),
('SUBLANG_ARABIC_SAUDI_ARABIA', 0x01),
('SUBLANG_ARABIC_IRAQ', 0x02),
('SUBLANG_ARABIC_EGYPT', 0x03),
('SUBLANG_ARABIC_LIBYA', 0x04),
('SUBLANG_ARABIC_ALGERIA', 0x05),
('SUBLANG_ARABIC_MOROCCO', 0x06),
('SUBLANG_ARABIC_TUNISIA', 0x07),
('SUBLANG_ARABIC_OMAN', 0x08),
('SUBLANG_ARABIC_YEMEN', 0x09),
('SUBLANG_ARABIC_SYRIA', 0x0a),
('SUBLANG_ARABIC_JORDAN', 0x0b),
('SUBLANG_ARABIC_LEBANON', 0x0c),
('SUBLANG_ARABIC_KUWAIT', 0x0d),
('SUBLANG_ARABIC_UAE', 0x0e),
('SUBLANG_ARABIC_BAHRAIN', 0x0f),
('SUBLANG_ARABIC_QATAR', 0x10),
('SUBLANG_AZERI_LATIN', 0x01),
('SUBLANG_AZERI_CYRILLIC', 0x02),
('SUBLANG_CHINESE_TRADITIONAL', 0x01),
('SUBLANG_CHINESE_SIMPLIFIED', 0x02),
('SUBLANG_CHINESE_HONGKONG', 0x03),
('SUBLANG_CHINESE_SINGAPORE', 0x04),
('SUBLANG_CHINESE_MACAU', 0x05),
('SUBLANG_DUTCH', 0x01),
('SUBLANG_DUTCH_BELGIAN', 0x02),
('SUBLANG_ENGLISH_US', 0x01),
('SUBLANG_ENGLISH_UK', 0x02),
('SUBLANG_ENGLISH_AUS', 0x03),
('SUBLANG_ENGLISH_CAN', 0x04),
('SUBLANG_ENGLISH_NZ', 0x05),
('SUBLANG_ENGLISH_EIRE', 0x06),
('SUBLANG_ENGLISH_SOUTH_AFRICA', 0x07),
('SUBLANG_ENGLISH_JAMAICA', 0x08),
('SUBLANG_ENGLISH_CARIBBEAN', 0x09),
('SUBLANG_ENGLISH_BELIZE', 0x0a),
('SUBLANG_ENGLISH_TRINIDAD', 0x0b),
('SUBLANG_ENGLISH_ZIMBABWE', 0x0c),
('SUBLANG_ENGLISH_PHILIPPINES', 0x0d),
('SUBLANG_FRENCH', 0x01),
('SUBLANG_FRENCH_BELGIAN', 0x02),
('SUBLANG_FRENCH_CANADIAN', 0x03),
('SUBLANG_FRENCH_SWISS', 0x04),
('SUBLANG_FRENCH_LUXEMBOURG', 0x05),
('SUBLANG_FRENCH_MONACO', 0x06),
('SUBLANG_GERMAN', 0x01),
('SUBLANG_GERMAN_SWISS', 0x02),
('SUBLANG_GERMAN_AUSTRIAN', 0x03),
('SUBLANG_GERMAN_LUXEMBOURG', 0x04),
('SUBLANG_GERMAN_LIECHTENSTEIN', 0x05),
('SUBLANG_ITALIAN', 0x01),
('SUBLANG_ITALIAN_SWISS', 0x02),
('SUBLANG_KASHMIRI_SASIA', 0x02),
('SUBLANG_KASHMIRI_INDIA', 0x02),
('SUBLANG_KOREAN', 0x01),
('SUBLANG_LITHUANIAN', 0x01),
('SUBLANG_MALAY_MALAYSIA', 0x01),
('SUBLANG_MALAY_BRUNEI_DARUSSALAM', 0x02),
('SUBLANG_NEPALI_INDIA', 0x02),
('SUBLANG_NORWEGIAN_BOKMAL', 0x01),
('SUBLANG_NORWEGIAN_NYNORSK', 0x02),
('SUBLANG_PORTUGUESE', 0x02),
('SUBLANG_PORTUGUESE_BRAZILIAN', 0x01),
('SUBLANG_SERBIAN_LATIN', 0x02),
('SUBLANG_SERBIAN_CYRILLIC', 0x03),
('SUBLANG_SPANISH', 0x01),
('SUBLANG_SPANISH_MEXICAN', 0x02),
('SUBLANG_SPANISH_MODERN', 0x03),
('SUBLANG_SPANISH_GUATEMALA', 0x04),
('SUBLANG_SPANISH_COSTA_RICA', 0x05),
('SUBLANG_SPANISH_PANAMA', 0x06),
('SUBLANG_SPANISH_DOMINICAN_REPUBLIC', 0x07),
('SUBLANG_SPANISH_VENEZUELA', 0x08),
('SUBLANG_SPANISH_COLOMBIA', 0x09),
('SUBLANG_SPANISH_PERU', 0x0a),
('SUBLANG_SPANISH_ARGENTINA', 0x0b),
('SUBLANG_SPANISH_ECUADOR', 0x0c),
('SUBLANG_SPANISH_CHILE', 0x0d),
('SUBLANG_SPANISH_URUGUAY', 0x0e),
('SUBLANG_SPANISH_PARAGUAY', 0x0f),
('SUBLANG_SPANISH_BOLIVIA', 0x10),
('SUBLANG_SPANISH_EL_SALVADOR', 0x11),
('SUBLANG_SPANISH_HONDURAS', 0x12),
('SUBLANG_SPANISH_NICARAGUA', 0x13),
('SUBLANG_SPANISH_PUERTO_RICO', 0x14),
('SUBLANG_SWEDISH', 0x01),
('SUBLANG_SWEDISH_FINLAND', 0x02),
('SUBLANG_URDU_PAKISTAN', 0x01),
('SUBLANG_URDU_INDIA', 0x02),
('SUBLANG_UZBEK_LATIN', 0x01),
('SUBLANG_UZBEK_CYRILLIC', 0x02),
('SUBLANG_DUTCH_SURINAM', 0x03),
('SUBLANG_ROMANIAN', 0x01),
('SUBLANG_ROMANIAN_MOLDAVIA', 0x02),
('SUBLANG_RUSSIAN', 0x01),
('SUBLANG_RUSSIAN_MOLDAVIA', 0x02),
('SUBLANG_CROATIAN', 0x01),
('SUBLANG_LITHUANIAN_CLASSIC', 0x02),
('SUBLANG_GAELIC', 0x01),
('SUBLANG_GAELIC_SCOTTISH', 0x02),
('SUBLANG_GAELIC_MANX', 0x03) ]
SUBLANG = dict(sublang+[(e[1], e[0]) for e in sublang])
class UnicodeStringWrapperPostProcessor:
"""This class attemps to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decission about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped ASCII representation of the string."""
def convert_char(char):
if char in string.printable:
return char
else:
return r'\x%02x' % ord(char)
if self.string:
return ''.join([convert_char(c) for c in self.string])
return ''
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr+2,
max_length=self.__get_pascal_16_length())
def ask_pascal_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
with the possible length contained in the first word.
"""
length = self.__get_pascal_16_length()
if length == (next_rva_ptr - (self.rva_ptr+2)) / 2:
self.length = length
return True
return False
def __get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(self.rva_ptr, 2)
except PEFormatError, e:
return False
if len(data)<2:
return False
return struct.unpack('<H', data)[0]
#def render_pascal_8(self):
# """"""
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
def render_unicode_16(self):
""""""
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
class PEFormatError(Exception):
"""Generic PE format error exception."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Dump:
"""Convenience class for dumping the PE information."""
def __init__(self):
self.text = ''
def add_lines(self, txt, indent=0):
"""Adds a list of lines.
The list can be indented with the optional argument 'indent'.
"""
for line in txt:
self.add_line(line, indent)
def add_line(self, txt, indent=0):
"""Adds a line.
The line can be indented with the optional argument 'indent'.
"""
self.add(txt+'\n', indent)
def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError, e:
s.append(repr(c))
txt = ''.join(s)
self.text += ' '*indent+txt
def add_header(self, txt):
"""Adds a header element."""
self.add_line('-'*10+txt+'-'*10+'\n')
def add_newline(self):
"""Adds a newline."""
self.text += '\n'
def get_text(self):
"""Get the text in its current state."""
return self.text
class Structure:
"""Prepare structure object to extract members from data.
Format is a list containing definitions for the elements
of the structure.
"""
def __init__(self, format, name=None, file_offset=None):
# Format is forced little endian, for big endian non Intel platforms
self.__format__ = '<'
self.__keys__ = []
# self.values = {}
self.__format_length__ = 0
self.__set_format__(format[1])
self._all_zeroes = False
self.__unpacked_data_elms__ = None
self.__file_offset__ = file_offset
if name:
self.name = name
else:
self.name = format[0]
def __get_format__(self):
return self.__format__
def get_file_offset(self):
return self.__file_offset__
def set_file_offset(self, offset):
self.__file_offset__ = offset
def all_zeroes(self):
"""Returns true is the unpacked data is all zeroes."""
return self._all_zeroes
def __set_format__(self, format):
for elm in format:
if ',' in elm:
elm_type, elm_name = elm.split(',', 1)
self.__format__ += elm_type
elm_names = elm_name.split(',')
names = []
for elm_name in elm_names:
if elm_name in self.__keys__:
search_list = [x[:len(elm_name)] for x in self.__keys__]
occ_count = search_list.count(elm_name)
elm_name = elm_name+'_'+str(occ_count)
names.append(elm_name)
# Some PE header structures have unions on them, so a certain
# value might have different names, so each key has a list of
# all the possible members referring to the data.
self.__keys__.append(names)
self.__format_length__ = struct.calcsize(self.__format__)
def sizeof(self):
"""Return size of the structure."""
return self.__format_length__
def __unpack__(self, data):
if len(data)>self.__format_length__:
data = data[:self.__format_length__]
# OC Patch:
# Some malware have incorrect header lengths.
# Fail gracefully if this occurs
# Buggy malware: a29b0118af8b7408444df81701ad5a7f
#
elif len(data)<self.__format_length__:
raise PEFormatError('Data length less than expected header length.')
if data.count(chr(0)) == len(data):
self._all_zeroes = True
self.__unpacked_data_elms__ = struct.unpack(self.__format__, data)
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
# self.values[key] = self.__unpacked_data_elms__[i]
setattr(self, key, self.__unpacked_data_elms__[i])
def __pack__(self):
new_values = []
for i in xrange(len(self.__unpacked_data_elms__)):
for key in self.__keys__[i]:
new_val = getattr(self, key)
old_val = self.__unpacked_data_elms__[i]
# In the case of Unions, when the first changed value
# is picked the loop is exited
if new_val != old_val:
break
new_values.append(new_val)
return struct.pack(self.__format__, *new_values)
def __str__(self):
return '\n'.join( self.dump() )
def __repr__(self):
return '<Structure: %s>' % (' '.join( [' '.join(s.split()) for s in self.dump()] ))
def dump(self, indentation=0):
"""Returns a string representation of the structure."""
dump = []
dump.append('[%s]' % self.name)
# Refer to the __set_format__ method for an explanation
# of the following construct.
for keys in self.__keys__:
for key in keys:
val = getattr(self, key)
if isinstance(val, int) or isinstance(val, long):
val_str = '0x%-8X' % (val)
if key == 'TimeDateStamp' or key == 'dwTimeStamp':
try:
val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))
except exceptions.ValueError, e:
val_str += ' [INVALID TIME]'
else:
val_str = ''.join(filter(lambda c:c != '\0', str(val)))
dump.append('%-30s %s' % (key+':', val_str))
return dump
class SectionStructure(Structure):
"""Convenience section handling class."""
def get_data(self, start, length=None):
"""Get data chunk from a section.
Allows to query data from the section by passing the
addresses where the PE file would be loaded by default.
It is then possible to retrieve code and data by its real
addresses as it would be if loaded.
"""
offset = start - self.VirtualAddress
if length:
end = offset+length
else:
end = len(self.data)
return self.data[offset:end]
def get_rva_from_offset(self, offset):
return offset - self.PointerToRawData + self.VirtualAddress
def get_offset_from_rva(self, rva):
return (rva - self.VirtualAddress) + self.PointerToRawData
def contains_offset(self, offset):
"""Check whether the section contains the file offset provided."""
if not self.PointerToRawData:
# bss and other sections containing only uninitialized data must have 0
# and do not take space in the file
return False
return self.PointerToRawData <= offset < self.VirtualAddress + self.SizeOfRawData
def contains_rva(self, rva):
"""Check whether the section contains the address provided."""
# PECOFF documentation v8 says:
# The total size of the section when loaded into memory.
# If this value is greater than SizeOfRawData, the section is zero-padded.
# This field is valid only for executable images and should be set to zero
# for object files.
if len(self.data) < self.SizeOfRawData:
size = self.Misc_VirtualSize
else:
size = max(self.SizeOfRawData, self.Misc_VirtualSize)
return self.VirtualAddress <= rva < self.VirtualAddress + size
def contains(self, rva):
#print "DEPRECATION WARNING: you should use contains_rva() instead of contains()"
return self.contains_rva(rva)
def set_data(self, data):
"""Set the data belonging to the section."""
self.data = data
def get_entropy(self):
"""Calculate and return the entropy for the section."""
return self.entropy_H( self.data )
def get_hash_sha1(self):
"""Get the SHA-1 hex-digest of the section's data."""
if sha1 is not None:
return sha1( self.data ).hexdigest()
def get_hash_sha256(self):
"""Get the SHA-256 hex-digest of the section's data."""
if sha256 is not None:
return sha256( self.data ).hexdigest()
def get_hash_sha512(self):
"""Get the SHA-512 hex-digest of the section's data."""
if sha512 is not None:
return sha512( self.data ).hexdigest()
def get_hash_md5(self):
"""Get the MD5 hex-digest of the section's data."""
if md5 is not None:
return md5( self.data ).hexdigest()
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
class DataContainer:
"""Generic data container."""
def __init__(self, **args):
for key, value in args.items():
setattr(self, key, value)
class ImportDescData(DataContainer):
"""Holds import descriptor information.
dll: name of the imported DLL
imports: list of imported symbols (ImportData instances)
struct: IMAGE_IMPORT_DESCRIPTOR sctruture
"""
class ImportData(DataContainer):
"""Holds imported symbol's information.
ordinal: Ordinal of the symbol
name: Name of the symbol
bound: If the symbol is bound, this contains
the address.
"""
class ExportDirData(DataContainer):
"""Holds export directory information.
struct: IMAGE_EXPORT_DIRECTORY structure
symbols: list of exported symbols (ExportData instances)
"""
class ExportData(DataContainer):
"""Holds exported symbols' information.
ordinal: ordinal of the symbol
address: address of the symbol
name: name of the symbol (None if the symbol is
exported by ordinal only)
forwarder: if the symbol is forwarded it will
contain the name of the target symbol,
None otherwise.
"""
class ResourceDirData(DataContainer):
"""Holds resource directory information.
struct: IMAGE_RESOURCE_DIRECTORY structure
entries: list of entries (ResourceDirEntryData instances)
"""
class ResourceDirEntryData(DataContainer):
"""Holds resource directory entry data.
struct: IMAGE_RESOURCE_DIRECTORY_ENTRY structure
name: If the resource is identified by name this
attribute will contain the name string. None
otherwise. If identified by id, the id is
availabe at 'struct.Id'
id: the id, also in struct.Id
directory: If this entry has a lower level directory
this attribute will point to the
ResourceDirData instance representing it.
data: If this entry has no futher lower directories
and points to the actual resource data, this
attribute will reference the corresponding
ResourceDataEntryData instance.
(Either of the 'directory' or 'data' attribute will exist,
but not both.)
"""
class ResourceDataEntryData(DataContainer):
"""Holds resource data entry information.
struct: IMAGE_RESOURCE_DATA_ENTRY structure
lang: Primary language ID
sublang: Sublanguage ID
"""
class DebugData(DataContainer):
"""Holds debug information.
struct: IMAGE_DEBUG_DIRECTORY structure
"""
class BaseRelocationData(DataContainer):
"""Holds base relocation information.
struct: IMAGE_BASE_RELOCATION structure
entries: list of relocation data (RelocationData instances)
"""
class RelocationData(DataContainer):
"""Holds relocation information.
type: Type of relocation
The type string is can be obtained by
RELOCATION_TYPE[type]
rva: RVA of the relocation
"""
class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
class BoundImportDescData(DataContainer):
"""Holds bound import descriptor data.
This directory entry will provide with information on the
DLLs this PE files has been bound to (if bound at all).
The structure will contain the name and timestamp of the
DLL at the time of binding so that the loader can know
whether it differs from the one currently present in the
system and must, therefore, re-bind the PE's imports.
struct: IMAGE_BOUND_IMPORT_DESCRIPTOR structure
name: DLL name
entries: list of entries (BoundImportRefData instances)
the entries will exist if this DLL has forwarded
symbols. If so, the destination DLL will have an
entry in this list.
"""
class BoundImportRefData(DataContainer):
"""Holds bound import forwader reference data.
Contains the same information as the bound descriptor but
for forwarded DLLs, if any.
struct: IMAGE_BOUND_FORWARDER_REF structure
name: dll name
"""
class PE:
"""A Portable Executable representation.
This class provides access to most of the information in a PE file.
It expects to be supplied the name of the file to load or PE data
to process and an optional argument 'fast_load' (False by default)
which controls whether to load all the directories information,
which can be quite time consuming.
pe = pefile.PE('module.dll')
pe = pefile.PE(name='module.dll')
would load 'module.dll' and process it. If the data would be already
available in a buffer the same could be achieved with:
pe = pefile.PE(data=module_dll_data)
The "fast_load" can be set to a default by setting its value in the
module itself by means,for instance, of a "pefile.fast_load = True".
That will make all the subsequent instances not to load the
whole PE structure. The "full_load" method can be used to parse
the missing data at a later stage.
Basic headers information will be available in the attributes:
DOS_HEADER
NT_HEADERS
FILE_HEADER
OPTIONAL_HEADER
All of them will contain among their attrbitues the members of the
corresponding structures as defined in WINNT.H
The raw data corresponding to the header (from the beginning of the
file up to the start of the first section) will be avaiable in the
instance's attribute 'header' as a string.
The sections will be available as a list in the 'sections' attribute.
Each entry will contain as attributes all the structure's members.
Directory entries will be available as attributes (if they exist):
(no other entries are processed at this point)
DIRECTORY_ENTRY_IMPORT (list of ImportDescData instances)
DIRECTORY_ENTRY_EXPORT (ExportDirData instance)
DIRECTORY_ENTRY_RESOURCE (ResourceDirData instance)
DIRECTORY_ENTRY_DEBUG (list of DebugData instances)
DIRECTORY_ENTRY_BASERELOC (list of BaseRelocationData instances)
DIRECTORY_ENTRY_TLS
DIRECTORY_ENTRY_BOUND_IMPORT (list of BoundImportData instances)
The following dictionary attributes provide ways of mapping different
constants. They will accept the numeric value and return the string
representation and the opposite, feed in the string and get the
numeric constant:
DIRECTORY_ENTRY
IMAGE_CHARACTERISTICS
SECTION_CHARACTERISTICS
DEBUG_TYPE
SUBSYSTEM_TYPE
MACHINE_TYPE
RELOCATION_TYPE
RESOURCE_TYPE
LANG
SUBLANG
"""
#
# Format specifications for PE structures.
#
__IMAGE_DOS_HEADER_format__ = ('IMAGE_DOS_HEADER',
('H,e_magic', 'H,e_cblp', 'H,e_cp',
'H,e_crlc', 'H,e_cparhdr', 'H,e_minalloc',
'H,e_maxalloc', 'H,e_ss', 'H,e_sp', 'H,e_csum',
'H,e_ip', 'H,e_cs', 'H,e_lfarlc', 'H,e_ovno', '8s,e_res',
'H,e_oemid', 'H,e_oeminfo', '20s,e_res2',
'L,e_lfanew'))
__IMAGE_FILE_HEADER_format__ = ('IMAGE_FILE_HEADER',
('H,Machine', 'H,NumberOfSections',
'L,TimeDateStamp', 'L,PointerToSymbolTable',
'L,NumberOfSymbols', 'H,SizeOfOptionalHeader',
'H,Characteristics'))
__IMAGE_DATA_DIRECTORY_format__ = ('IMAGE_DATA_DIRECTORY',
('L,VirtualAddress', 'L,Size'))
__IMAGE_OPTIONAL_HEADER_format__ = ('IMAGE_OPTIONAL_HEADER',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'L,SizeOfCode',
'L,SizeOfInitializedData', 'L,SizeOfUninitializedData',
'L,AddressOfEntryPoint', 'L,BaseOfCode', 'L,BaseOfData',
'L,ImageBase', 'L,SectionAlignment', 'L,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders',
'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'L,SizeOfStackReserve', 'L,SizeOfStackCommit',
'L,SizeOfHeapReserve', 'L,SizeOfHeapCommit',
'L,LoaderFlags', 'L,NumberOfRvaAndSizes' ))
__IMAGE_OPTIONAL_HEADER64_format__ = ('IMAGE_OPTIONAL_HEADER64',
('H,Magic', 'B,MajorLinkerVersion',
'B,MinorLinkerVersion', 'L,SizeOfCode',
'L,SizeOfInitializedData', 'L,SizeOfUninitializedData',
'L,AddressOfEntryPoint', 'L,BaseOfCode',
'Q,ImageBase', 'L,SectionAlignment', 'L,FileAlignment',
'H,MajorOperatingSystemVersion', 'H,MinorOperatingSystemVersion',
'H,MajorImageVersion', 'H,MinorImageVersion',
'H,MajorSubsystemVersion', 'H,MinorSubsystemVersion',
'L,Reserved1', 'L,SizeOfImage', 'L,SizeOfHeaders',
'L,CheckSum', 'H,Subsystem', 'H,DllCharacteristics',
'Q,SizeOfStackReserve', 'Q,SizeOfStackCommit',
'Q,SizeOfHeapReserve', 'Q,SizeOfHeapCommit',
'L,LoaderFlags', 'L,NumberOfRvaAndSizes' ))
__IMAGE_NT_HEADERS_format__ = ('IMAGE_NT_HEADERS', ('L,Signature',))
__IMAGE_SECTION_HEADER_format__ = ('IMAGE_SECTION_HEADER',
('8s,Name', 'L,Misc,Misc_PhysicalAddress,Misc_VirtualSize',
'L,VirtualAddress', 'L,SizeOfRawData', 'L,PointerToRawData',
'L,PointerToRelocations', 'L,PointerToLinenumbers',
'H,NumberOfRelocations', 'H,NumberOfLinenumbers',
'L,Characteristics'))
__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__ = ('IMAGE_DELAY_IMPORT_DESCRIPTOR',
('L,grAttrs', 'L,szName', 'L,phmod', 'L,pIAT', 'L,pINT',
'L,pBoundIAT', 'L,pUnloadIAT', 'L,dwTimeStamp'))
__IMAGE_IMPORT_DESCRIPTOR_format__ = ('IMAGE_IMPORT_DESCRIPTOR',
('L,OriginalFirstThunk,Characteristics',
'L,TimeDateStamp', 'L,ForwarderChain', 'L,Name', 'L,FirstThunk'))
__IMAGE_EXPORT_DIRECTORY_format__ = ('IMAGE_EXPORT_DIRECTORY',
('L,Characteristics',
'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion', 'L,Name',
'L,Base', 'L,NumberOfFunctions', 'L,NumberOfNames',
'L,AddressOfFunctions', 'L,AddressOfNames', 'L,AddressOfNameOrdinals'))
__IMAGE_RESOURCE_DIRECTORY_format__ = ('IMAGE_RESOURCE_DIRECTORY',
('L,Characteristics',
'L,TimeDateStamp', 'H,MajorVersion', 'H,MinorVersion',
'H,NumberOfNamedEntries', 'H,NumberOfIdEntries'))
__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__ = ('IMAGE_RESOURCE_DIRECTORY_ENTRY',
('L,Name',
'L,OffsetToData'))
__IMAGE_RESOURCE_DATA_ENTRY_format__ = ('IMAGE_RESOURCE_DATA_ENTRY',
('L,OffsetToData', 'L,Size', 'L,CodePage', 'L,Reserved'))
__VS_VERSIONINFO_format__ = ( 'VS_VERSIONINFO',
('H,Length', 'H,ValueLength', 'H,Type' ))
__VS_FIXEDFILEINFO_format__ = ( 'VS_FIXEDFILEINFO',
('L,Signature', 'L,StrucVersion', 'L,FileVersionMS', 'L,FileVersionLS',
'L,ProductVersionMS', 'L,ProductVersionLS', 'L,FileFlagsMask', 'L,FileFlags',
'L,FileOS', 'L,FileType', 'L,FileSubtype', 'L,FileDateMS', 'L,FileDateLS'))
__StringFileInfo_format__ = ( 'StringFileInfo',
('H,Length', 'H,ValueLength', 'H,Type' ))
__StringTable_format__ = ( 'StringTable',
('H,Length', 'H,ValueLength', 'H,Type' ))
__String_format__ = ( 'String',
('H,Length', 'H,ValueLength', 'H,Type' ))
__Var_format__ = ( 'Var', ('H,Length', 'H,ValueLength', 'H,Type' ))
__IMAGE_THUNK_DATA_format__ = ('IMAGE_THUNK_DATA',
('L,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_THUNK_DATA64_format__ = ('IMAGE_THUNK_DATA',
('Q,ForwarderString,Function,Ordinal,AddressOfData',))
__IMAGE_DEBUG_DIRECTORY_format__ = ('IMAGE_DEBUG_DIRECTORY',
('L,Characteristics', 'L,TimeDateStamp', 'H,MajorVersion',
'H,MinorVersion', 'L,Type', 'L,SizeOfData', 'L,AddressOfRawData',
'L,PointerToRawData'))
__IMAGE_BASE_RELOCATION_format__ = ('IMAGE_BASE_RELOCATION',
('L,VirtualAddress', 'L,SizeOfBlock') )
__IMAGE_TLS_DIRECTORY_format__ = ('IMAGE_TLS_DIRECTORY',
('L,StartAddressOfRawData', 'L,EndAddressOfRawData',
'L,AddressOfIndex', 'L,AddressOfCallBacks',
'L,SizeOfZeroFill', 'L,Characteristics' ) )
__IMAGE_TLS_DIRECTORY64_format__ = ('IMAGE_TLS_DIRECTORY',
('Q,StartAddressOfRawData', 'Q,EndAddressOfRawData',
'Q,AddressOfIndex', 'Q,AddressOfCallBacks',
'L,SizeOfZeroFill', 'L,Characteristics' ) )
__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__ = ('IMAGE_BOUND_IMPORT_DESCRIPTOR',
('L,TimeDateStamp', 'H,OffsetModuleName', 'H,NumberOfModuleForwarderRefs'))
__IMAGE_BOUND_FORWARDER_REF_format__ = ('IMAGE_BOUND_FORWARDER_REF',
('L,TimeDateStamp', 'H,OffsetModuleName', 'H,Reserved') )
def __init__(self, name=None, data=None, fast_load=None):
self.sections = []
self.__warnings = []
self.PE_TYPE = None
if not name and not data:
return
# This list will keep track of all the structures created.
# That will allow for an easy iteration through the list
# in order to save the modifications made
self.__structures__ = []
if not fast_load:
fast_load = globals()['fast_load']
self.__parse__(name, data, fast_load)
def __unpack_data__(self, format, data, file_offset):
"""Apply structure format to raw data.
Returns and unpacked structure object if successful, None otherwise.
"""
structure = Structure(format, file_offset=file_offset)
#if len(data) < structure.sizeof():
# return None
try:
structure.__unpack__(data)
except PEFormatError, err:
self.__warnings.append(
'Corrupt header "%s" at file offset %d. Exception: %s' % (
format[0], file_offset, str(err)) )
return None
self.__structures__.append(structure)
return structure
def __parse__(self, fname, data, fast_load):
"""Parse a Portable Executable file.
Loads a PE file, parsing all its structures and making them available
through the instance's attributes.
"""
if fname:
fd = file(fname, 'rb')
self.__data__ = fd.read()
fd.close()
elif data:
self.__data__ = data
self.DOS_HEADER = self.__unpack_data__(
self.__IMAGE_DOS_HEADER_format__,
self.__data__, file_offset=0)
if not self.DOS_HEADER or self.DOS_HEADER.e_magic != IMAGE_DOS_SIGNATURE:
raise PEFormatError('DOS Header magic not found.')
# OC Patch:
# Check for sane value in e_lfanew
#
if self.DOS_HEADER.e_lfanew > len(self.__data__):
raise PEFormatError('Invalid e_lfanew value, probably not a PE file')
nt_headers_offset = self.DOS_HEADER.e_lfanew
self.NT_HEADERS = self.__unpack_data__(
self.__IMAGE_NT_HEADERS_format__,
self.__data__[nt_headers_offset:],
file_offset = nt_headers_offset)
# We better check the signature right here, before the file screws
# around with sections:
# OC Patch:
# Some malware will cause the Signature value to not exist at all
if not self.NT_HEADERS or not self.NT_HEADERS.Signature:
raise PEFormatError('NT Headers not found.')
if self.NT_HEADERS.Signature != IMAGE_NT_SIGNATURE:
raise PEFormatError('Invalid NT Headers signature.')
self.FILE_HEADER = self.__unpack_data__(
self.__IMAGE_FILE_HEADER_format__,
self.__data__[nt_headers_offset+4:],
file_offset = nt_headers_offset+4)
image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# Set the image's flags according the the Characteristics member
self.set_flags(self.FILE_HEADER, self.FILE_HEADER.Characteristics, image_flags)
optional_header_offset = \
nt_headers_offset+4+self.FILE_HEADER.sizeof()
# Note: location of sections can be controlled from PE header:
sections_offset = optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
self.__data__[optional_header_offset:],
file_offset = optional_header_offset)
# According to solardesigner's findings for his
# Tiny PE project, the optional header does not
# need fields beyond "Subsystem" in order to be
# loadable by the Windows loader (given that zeroes
# are acceptable values and the header is loaded
# in a zeroed memory page)
# If trying to parse a full Optional Header fails
# we try to parse it again with some 0 padding
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
# Add enough zeroes to make up for the unused fields
#
padding_length = 128
# Create padding
#
padded_data = self.__data__[optional_header_offset:] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER_format__,
padded_data,
file_offset = optional_header_offset)
# Check the Magic in the OPTIONAL_HEADER and set the PE file
# type accordingly
#
if self.OPTIONAL_HEADER is not None:
if self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE
elif self.OPTIONAL_HEADER.Magic == OPTIONAL_HEADER_MAGIC_PE_PLUS:
self.PE_TYPE = OPTIONAL_HEADER_MAGIC_PE_PLUS
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
self.__data__[optional_header_offset:],
file_offset = optional_header_offset)
# Again, as explained above, we try to parse
# a reduced form of the Optional Header which
# is still valid despite not including all
# structure members
#
MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE = 69+4
if ( self.OPTIONAL_HEADER is None and
len(self.__data__[optional_header_offset:])
>= MINIMUM_VALID_OPTIONAL_HEADER_RAW_SIZE ):
padding_length = 128
padded_data = self.__data__[optional_header_offset:] + (
'\0' * padding_length)
self.OPTIONAL_HEADER = self.__unpack_data__(
self.__IMAGE_OPTIONAL_HEADER64_format__,
padded_data,
file_offset = optional_header_offset)
if not self.FILE_HEADER:
raise PEFormatError('File Header missing')
# OC Patch:
# Die gracefully if there is no OPTIONAL_HEADER field
# 975440f5ad5e2e4a92c4d9a5f22f75c1
if self.PE_TYPE is None or self.OPTIONAL_HEADER is None:
raise PEFormatError("No Optional Header found, invalid PE32 or PE32+ file")
dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
# Set the Dll Characteristics flags according the the DllCharacteristics member
self.set_flags(
self.OPTIONAL_HEADER,
self.OPTIONAL_HEADER.DllCharacteristics,
dll_characteristics_flags)
self.OPTIONAL_HEADER.DATA_DIRECTORY = []
#offset = (optional_header_offset + self.FILE_HEADER.SizeOfOptionalHeader)
offset = (optional_header_offset + self.OPTIONAL_HEADER.sizeof())
self.NT_HEADERS.FILE_HEADER = self.FILE_HEADER
self.NT_HEADERS.OPTIONAL_HEADER = self.OPTIONAL_HEADER
# The NumberOfRvaAndSizes is sanitized to stay within
# reasonable limits so can be casted to an int
#
if self.OPTIONAL_HEADER.NumberOfRvaAndSizes > 0x10:
self.__warnings.append(
'Suspicious NumberOfRvaAndSizes in the Optional Header. ' +
'Normal values are never larger than 0x10, the value is: 0x%x' %
self.OPTIONAL_HEADER.NumberOfRvaAndSizes )
for i in xrange(int(0x7fffffffL & self.OPTIONAL_HEADER.NumberOfRvaAndSizes)):
if len(self.__data__[offset:]) == 0:
break
if len(self.__data__[offset:]) < 8:
data = self.__data__[offset:]+'\0'*8
else:
data = self.__data__[offset:]
dir_entry = self.__unpack_data__(
self.__IMAGE_DATA_DIRECTORY_format__,
data,
file_offset = offset)
if dir_entry is None:
break
# Would fail if missing an entry
# 1d4937b2fa4d84ad1bce0309857e70ca offending sample
try:
dir_entry.name = DIRECTORY_ENTRY[i]
except (KeyError, AttributeError):
break
offset += dir_entry.sizeof()
self.OPTIONAL_HEADER.DATA_DIRECTORY.append(dir_entry)
# If the offset goes outside the optional header,
# the loop is broken, regardless of how many directories
# NumberOfRvaAndSizes says there are
#
# We assume a normally sized optional header, hence that we do
# a sizeof() instead of reading SizeOfOptionalHeader.
# Then we add a default number of drectories times their size,
# if we go beyond that, we assume the number of directories
# is wrong and stop processing
if offset >= (optional_header_offset +
self.OPTIONAL_HEADER.sizeof() + 8*16) :
break
offset = self.parse_sections(sections_offset)
# OC Patch:
# There could be a problem if there are no raw data sections
# greater than 0
# fc91013eb72529da005110a3403541b6 example
# Should this throw an exception in the minimum header offset
# can't be found?
#
rawDataPointers = [
s.PointerToRawData for s in self.sections if s.PointerToRawData>0]
if len(rawDataPointers) > 0:
lowest_section_offset = min(rawDataPointers)
else:
lowest_section_offset = None
if not lowest_section_offset or lowest_section_offset<offset:
self.header = self.__data__[:offset]
else:
self.header = self.__data__[:lowest_section_offset]
# Check whether the entry point lies within a section
#
if self.get_section_by_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint) is not None:
# Check whether the entry point lies within the file
#
ep_offset = self.get_offset_from_rva(self.OPTIONAL_HEADER.AddressOfEntryPoint)
if ep_offset > len(self.__data__):
self.__warnings.append(
'Possibly corrupt file. AddressOfEntryPoint lies outside the file. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
else:
self.__warnings.append(
'AddressOfEntryPoint lies outside the sections\' boundaries. ' +
'AddressOfEntryPoint: 0x%x' %
self.OPTIONAL_HEADER.AddressOfEntryPoint )
if not fast_load:
self.parse_data_directories()
def get_warnings(self):
"""Return the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method returns the
full list.
"""
return self.__warnings
def show_warnings(self):
"""Print the list of warnings.
Non-critical problems found when parsing the PE file are
appended to a list of warnings. This method prints the
full list to standard output.
"""
for warning in self.__warnings:
print '>', warning
def full_load(self):
"""Process the data directories.
This mathod will load the data directories which might not have
been loaded if the "fast_load" option was used.
"""
self.parse_data_directories()
def write(self, filename=None):
"""Write the PE file.
This function will process all headers and components
of the PE file and include all changes made (by just
assigning to attributes in the PE objects) and write
the changes back to a file whose name is provided as
an argument. The filename is optional.
The data to be written to the file will be returned
as a 'str' object.
"""
file_data = list(self.__data__)
for struct in self.__structures__:
struct_data = list(struct.__pack__())
offset = struct.get_file_offset()
file_data[offset:offset+len(struct_data)] = struct_data
if hasattr(self, 'VS_VERSIONINFO'):
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
for key, entry in st_entry.entries.items():
offsets = st_entry.entries_offsets[key]
lengths = st_entry.entries_lengths[key]
if len( entry ) > lengths[1]:
uc = zip(
list(entry[:lengths[1]]), ['\0'] * lengths[1] )
l = list()
map(l.extend, uc)
file_data[
offsets[1] : offsets[1] + lengths[1]*2 ] = l
else:
uc = zip(
list(entry), ['\0'] * len(entry) )
l = list()
map(l.extend, uc)
file_data[
offsets[1] : offsets[1] + len(entry)*2 ] = l
remainder = lengths[1] - len(entry)
file_data[
offsets[1] + len(entry)*2 :
offsets[1] + lengths[1]*2 ] = [
u'\0' ] * remainder*2
new_file_data = ''.join( [ chr(ord(c)) for c in file_data ] )
if filename:
f = file(filename, 'wb+')
f.write(new_file_data)
f.close()
return new_file_data
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure(self.__IMAGE_SECTION_HEADER_format__)
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset:])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if section.PointerToRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if section.VirtualAddress > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
section_data_start = section.PointerToRawData
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
(section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_data_end = section_data_start+section.SizeOfRawData
section.set_data(self.__data__[section_data_start:section_data_end])
section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
self.set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set.' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset
def retrieve_flags(self, flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [(f[0], f[1]) for f in flag_dict.items() if
isinstance(f[0], str) and f[0].startswith(flag_filter)]
def set_flags(self, obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attritutes named after the flags provided in
"flags" and valued True/False, matching the results of applyin each
flag value from "flags" to flag_field.
"""
for flag in flags:
if flag[1] & flag_field:
setattr(obj, flag[0], True)
else:
setattr(obj, flag[0], False)
def parse_data_directories(self):
"""Parse and process the PE file's data directories."""
directory_parsing = (
('IMAGE_DIRECTORY_ENTRY_IMPORT', self.parse_import_directory),
('IMAGE_DIRECTORY_ENTRY_EXPORT', self.parse_export_directory),
('IMAGE_DIRECTORY_ENTRY_RESOURCE', self.parse_resources_directory),
('IMAGE_DIRECTORY_ENTRY_DEBUG', self.parse_debug_directory),
('IMAGE_DIRECTORY_ENTRY_BASERELOC', self.parse_relocations_directory),
('IMAGE_DIRECTORY_ENTRY_TLS', self.parse_directory_tls),
('IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT', self.parse_delay_import_directory),
('IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT', self.parse_directory_bound_imports) )
for entry in directory_parsing:
# OC Patch:
#
try:
dir_entry = self.OPTIONAL_HEADER.DATA_DIRECTORY[
DIRECTORY_ENTRY[entry[0]]]
except IndexError:
break
if dir_entry.VirtualAddress:
value = entry[1](dir_entry.VirtualAddress, dir_entry.Size)
if value:
setattr(self, entry[0][6:], value)
def parse_directory_bound_imports(self, rva, size):
""""""
bnd_descr = Structure(self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__)
bnd_descr_size = bnd_descr.sizeof()
start = rva
bound_imports = []
while True:
bnd_descr = self.__unpack_data__(
self.__IMAGE_BOUND_IMPORT_DESCRIPTOR_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
if bnd_descr is None:
# If can't parse directory then silently return.
# This directory does not necesarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'The Bound Imports directory exists but can\'t be parsed.')
return
if bnd_descr.all_zeroes():
break
rva += bnd_descr.sizeof()
forwarder_refs = []
for idx in xrange(bnd_descr.NumberOfModuleForwarderRefs):
# Both structures IMAGE_BOUND_IMPORT_DESCRIPTOR and
# IMAGE_BOUND_FORWARDER_REF have the same size.
bnd_frwd_ref = self.__unpack_data__(
self.__IMAGE_BOUND_FORWARDER_REF_format__,
self.__data__[rva:rva+bnd_descr_size],
file_offset = rva)
# OC Patch:
if not bnd_frwd_ref:
raise PEFormatError(
"IMAGE_BOUND_FORWARDER_REF cannot be read")
rva += bnd_frwd_ref.sizeof()
name_str = self.get_string_from_data(
start+bnd_frwd_ref.OffsetModuleName, self.__data__)
if not name_str:
break
forwarder_refs.append(BoundImportRefData(
struct = bnd_frwd_ref,
name = name_str))
name_str = self.get_string_from_data(
start+bnd_descr.OffsetModuleName, self.__data__)
if not name_str:
break
bound_imports.append(
BoundImportDescData(
struct = bnd_descr,
name = name_str,
entries = forwarder_refs))
return bound_imports
def parse_directory_tls(self, rva, size):
""""""
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_TLS_DIRECTORY_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_TLS_DIRECTORY64_format__
tls_struct = self.__unpack_data__(
format,
self.get_data(rva),
file_offset = self.get_offset_from_rva(rva))
if not tls_struct:
return None
return TlsData( struct = tls_struct )
def parse_relocations_directory(self, rva, size):
""""""
rlc = Structure(self.__IMAGE_BASE_RELOCATION_format__)
rlc_size = rlc.sizeof()
end = rva+size
relocations = []
while rva<end:
# OC Patch:
# Malware that has bad rva entries will cause an error.
# Just continue on after an exception
#
try:
rlc = self.__unpack_data__(
self.__IMAGE_BASE_RELOCATION_format__,
self.get_data(rva, rlc_size),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Invalid relocation information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
rlc = None
if not rlc:
break
reloc_entries = self.parse_relocations(
rva+rlc_size, rlc.VirtualAddress, rlc.SizeOfBlock-rlc_size)
relocations.append(
BaseRelocationData(
struct = rlc,
entries = reloc_entries))
if not rlc.SizeOfBlock:
break
rva += rlc.SizeOfBlock
return relocations
def parse_relocations(self, data_rva, rva, size):
""""""
data = self.get_data(data_rva, size)
entries = []
for idx in xrange(len(data)/2):
word = struct.unpack('<H', data[idx*2:(idx+1)*2])[0]
reloc_type = (word>>12)
reloc_offset = (word&0x0fff)
entries.append(
RelocationData(
type = reloc_type,
rva = reloc_offset+rva))
return entries
def parse_debug_directory(self, rva, size):
""""""
dbg = Structure(self.__IMAGE_DEBUG_DIRECTORY_format__)
dbg_size = dbg.sizeof()
debug = []
for idx in xrange(size/dbg_size):
try:
data = self.get_data(rva+dbg_size*idx, dbg_size)
except PEFormatError, e:
self.__warnings.append(
'Invalid debug information. Can\'t read ' +
'data at RVA: 0x%x' % rva)
return None
dbg = self.__unpack_data__(
self.__IMAGE_DEBUG_DIRECTORY_format__,
data, file_offset = self.get_offset_from_rva(rva+dbg_size*idx))
if not dbg:
return None
debug.append(
DebugData(
struct = dbg))
return debug
def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0):
"""Parse the resources directory.
Given the rva of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new atribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
# OC Patch:
original_rva = rva
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Invalid resources directory. Can\'t read ' +
'directory data at RVA: 0x%x' % rva)
return None
# Get the resource directory structure, that is, the header
# of the table preceding the actual entries
#
resource_dir = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource_dir is None:
# If can't parse resources directory then silently return.
# This directory does not necesarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'Invalid resources directory. Can\'t parse ' +
'directory data at RVA: 0x%x' % rva)
return None
dir_entries = []
# Advance the rva to the positon immediately following the directory
# table header and pointing to the first entry in the table
#
rva += resource_dir.sizeof()
number_of_entries = (
resource_dir.NumberOfNamedEntries +
resource_dir.NumberOfIdEntries )
strings_to_postprocess = list()
for idx in xrange(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self.__warnings.append(
'Error parsing the resources directory, ' +
'Entry %d is invalid, RVA = 0x%x. ' %
(idx, rva) )
break
entry_name = None
entry_id = None
# If all named entries have been processed, only Id ones
# remain
if idx >= resource_dir.NumberOfNamedEntries:
entry_id = res.Name
else:
ustr_offset = base_rva+res.NameOffset
try:
#entry_name = self.get_string_u_at_rva(ustr_offset, max_length=16)
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
strings_to_postprocess.append(entry_name)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the resources directory, ' +
'attempting to read entry name. ' +
'Can\'t read unicode string at offset 0x%x' %
(ustr_offset) )
if res.DataIsDirectory:
# OC Patch:
#
# One trick malware can do is to recursively reference
# the next directory. This causes hilarity to ensue when
# trying to parse everything correctly.
# If the original RVA given to this function is equal to
# the next one to parse, we assume that it's a trick.
# Instead of raising a PEFormatError this would skip some
# reasonable data so we just break.
#
# 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
if original_rva == (base_rva + res.OffsetToDirectory):
break
else:
entry_directory = self.parse_resources_directory(
base_rva+res.OffsetToDirectory,
base_rva=base_rva, level = level+1)
if not entry_directory:
break
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
directory = entry_directory))
else:
struct = self.parse_resource_data_entry(
base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(
struct = struct,
lang = res.Name & 0xff,
sublang = (res.Name>>8) & 0xff)
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
data = entry_data))
else:
break
# Check if this entry contains version information
#
if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries)>0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
# Maybe a malformed directory structure...?
# Lets ignore it
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(
struct = resource_dir,
entries = dir_entries)
return resource_directory_data
def parse_resource_data_entry(self, rva):
"""Parse a data entry from the resources directory."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing a resource directory data entry, ' +
'the RVA is invalid: 0x%x' % ( rva ) )
return None
data_entry = self.__unpack_data__(
self.__IMAGE_RESOURCE_DATA_ENTRY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
return data_entry
def parse_resource_entry(self, rva):
"""Parse a directory entry from the resources directory."""
resource = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_ENTRY_format__, self.get_data(rva),
file_offset = self.get_offset_from_rva(rva) )
if resource is None:
return None
#resource.NameIsString = (resource.Name & 0x80000000L) >> 31
resource.NameOffset = resource.Name & 0x7FFFFFFFL
resource.__pad = resource.Name & 0xFFFF0000L
resource.Id = resource.Name & 0x0000FFFFL
resource.DataIsDirectory = (resource.OffsetToData & 0x80000000L) >> 31
resource.OffsetToDirectory = resource.OffsetToData & 0x7FFFFFFFL
return resource
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
try:
versioninfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VS_VERSION_INFO string. Can\'t ' +
'read unicode string at offset 0x%x' % (
ustr_offset ) )
versioninfo_string = None
# If the structure does not contain the expected name, it's assumed to be invalid
#
if versioninfo_string != u'VS_VERSION_INFO':
self.__warnings.append('Invalid VS_VERSION_INFO block')
return
# Set the PE object's VS_VERSIONINFO to this one
#
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
#
self.VS_VERSIONINFO.Key = versioninfo_string
# Process the fixed version information, get the offset and structure
#
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
#
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
#
# Get the first one
#
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
#
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo struct
#
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
#
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringFileInfo string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
# Set such string as the Key attribute
#
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
#
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
#
if stringfileinfo_string == u'StringFileInfo':
if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
#
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
#
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Key string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Value string. ' +
'Can\'t read unicode string at offset 0x%x' % (
ustr_offset ) )
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
key_as_char = []
for c in key:
if ord(c)>128:
key_as_char.append('\\x%02x' %ord(c))
else:
key_as_char.append(c)
key_as_char = ''.join(key_as_char)
setattr(stringtable_struct, key_as_char, value)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
#
elif stringfileinfo_string == u'VarFileInfo':
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
#
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VarFileInfo Var string. ' +
'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
#
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
#
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break
def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the rva of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dicionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data(rva),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
try:
address_of_names = self.get_data(
export_dir.AddressOfNames, export_dir.NumberOfNames*4)
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals, export_dir.NumberOfNames*4)
address_of_functions = self.get_data(
export_dir.AddressOfFunctions, export_dir.NumberOfFunctions*4)
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
for i in xrange(export_dir.NumberOfNames):
symbol_name = self.get_string_at_rva(
self.get_dword_from_data(address_of_names, i))
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if symbol_ordinal*4<len(address_of_functions):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
# If the funcion's rva points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+symbol_ordinal,
address = symbol_address,
name = symbol_name,
forwarder = forwarder_str))
ordinals = [exp.ordinal for exp in exports]
for idx in xrange(export_dir.NumberOfFunctions):
if not idx+export_dir.Base in ordinals:
symbol_address = self.get_dword_from_data(
address_of_functions,
idx)
#
# Checking for forwarder again.
#
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
return ExportDirData(
struct = export_dir,
symbols = exports)
def dword_align(self, offset, base):
offset += base
return (offset+3) - ((offset+3)%4) - base
def parse_delay_import_directory(self, rva, size):
"""Walk and parse the delay import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some PEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_DELAY_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeores, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.pINT,
import_desc.pIAT,
None)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Delay import directory. ' +
'Invalid import data at RVA: 0x%x' % ( rva ) )
break
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.szName)
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def parse_import_directory(self, rva, size):
"""Walk and parse the import directory."""
import_descs = []
while True:
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the Import directory at RVA: 0x%x' % ( rva ) )
break
import_desc = self.__unpack_data__(
self.__IMAGE_IMPORT_DESCRIPTOR_format__,
data, file_offset = self.get_offset_from_rva(rva) )
# If the structure is all zeores, we reached the end of the list
if not import_desc or import_desc.all_zeroes():
break
rva += import_desc.sizeof()
try:
import_data = self.parse_imports(
import_desc.OriginalFirstThunk,
import_desc.FirstThunk,
import_desc.ForwarderChain)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the Import directory. ' +
'Invalid Import data at RVA: 0x%x' % ( rva ) )
break
#raise excp
if not import_data:
continue
dll = self.get_string_at_rva(import_desc.Name)
if dll:
import_descs.append(
ImportDescData(
struct = import_desc,
imports = import_data,
dll = dll))
return import_descs
def parse_imports(self, original_first_thunk, first_thunk, forwarder_chain):
"""Parse the imported symbols.
It will fill a list, which will be avalable as the dictionary
attribute "imports". Its keys will be the DLL names and the values
all the symbols imported from that object.
"""
imported_symbols = []
imports_section = self.get_section_by_rva(first_thunk)
if not imports_section:
raise PEFormatError, 'Invalid/corrupt imports.'
# Import Lookup Table. Contains ordinals or pointers to strings.
ilt = self.get_import_table(original_first_thunk)
# Import Address Table. May have identical content to ILT if
# PE file is not bounded, Will contain the address of the
# imported symbols once the binary is loaded or if it is already
# bound.
iat = self.get_import_table(first_thunk)
# OC Patch:
# Would crash if iat or ilt had None type
if not iat and not ilt:
raise PEFormatError(
'Invalid Import Table information. ' +
'Both ILT and IAT appear to be broken.')
if not iat and ilt:
table = ilt
elif iat and not ilt:
table = iat
elif ilt and ((len(ilt) and len(iat)==0) or (len(ilt) == len(iat))):
table = ilt
elif (ilt and len(ilt))==0 and (iat and len(iat)):
table = iat
else:
return None
for idx in xrange(len(table)):
imp_ord = None
imp_hint = None
imp_name = None
hint_name_table_rva = None
if table[idx].AddressOfData:
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
ordinal_flag = IMAGE_ORDINAL_FLAG
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
ordinal_flag = IMAGE_ORDINAL_FLAG64
# If imported by ordinal, we will append the ordinal number
#
if table[idx].AddressOfData & ordinal_flag:
import_by_ordinal = True
imp_ord = table[idx].AddressOfData & 0xffff
imp_name = None
else:
import_by_ordinal = False
try:
hint_name_table_rva = table[idx].AddressOfData & 0x7fffffff
data = self.get_data(hint_name_table_rva, 2)
# Get the Hint
imp_hint = self.get_word_from_data(data, 0)
imp_name = self.get_string_at_rva(table[idx].AddressOfData+2)
except PEFormatError, e:
pass
imp_address = first_thunk+self.OPTIONAL_HEADER.ImageBase+idx*4
if iat and ilt and ilt[idx].AddressOfData != iat[idx].AddressOfData:
imp_bound = iat[idx].AddressOfData
else:
imp_bound = None
if imp_name != '' and (imp_ord or imp_name):
imported_symbols.append(
ImportData(
import_by_ordinal = import_by_ordinal,
ordinal = imp_ord,
hint = imp_hint,
name = imp_name,
bound = imp_bound,
address = imp_address,
hint_name_table_rva = hint_name_table_rva))
return imported_symbols
def get_import_table(self, rva):
table = []
while True and rva:
try:
data = self.get_data(rva)
except PEFormatError, e:
self.__warnings.append(
'Error parsing the import table. ' +
'Invalid data at RVA: 0x%x' % ( rva ) )
return None
if self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE:
format = self.__IMAGE_THUNK_DATA_format__
elif self.PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS:
format = self.__IMAGE_THUNK_DATA64_format__
thunk_data = self.__unpack_data__(
format, data, file_offset=self.get_offset_from_rva(rva) )
if not thunk_data or thunk_data.all_zeroes():
break
rva += thunk_data.sizeof()
table.append(thunk_data)
return table
def get_memory_mapped_image(self, max_virtual_address=0x10000000, ImageBase=None):
"""Returns the data corresponding to the memory layout of the PE file.
The data includes the PE header and the sections loaded at offsets
corresponding to their relative virtual addresses. (the VirtualAddress
section header member).
Any offset in this data corresponds to the absolute memory address
ImageBase+offset.
The optional argument 'max_virtual_address' provides with means of limiting
which section are processed.
Any section with their VirtualAddress beyond this value will be skipped.
Normally, sections with values beyond this range are just there to confuse
tools. It's a common trick to see in packed executables.
If the 'ImageBase' optional argument is supplied, the file's relocations
will be applied to the image by calling the 'relocate_image()' method.
"""
# Collect all sections in one code block
data = self.header
for section in self.sections:
# Miscellanous integrity tests.
# Some packer will set these to bogus values to
# make tools go nuts.
#
if section.Misc_VirtualSize == 0 or section.SizeOfRawData == 0:
continue
if section.SizeOfRawData > len(self.__data__):
continue
if section.PointerToRawData > len(self.__data__):
continue
if section.VirtualAddress >= max_virtual_address:
continue
padding_length = section.VirtualAddress - len(data)
if padding_length>0:
data += '\0'*padding_length
elif padding_length<0:
data = data[:padding_length]
data += section.data
return data
def get_data(self, rva, length=None):
"""Get data regardless of the section where it lies on.
Given a rva and the size of the chunk to retrieve, this method
will find the section where the data lies and return the data.
"""
s = self.get_section_by_rva(rva)
if not s:
if rva<len(self.header):
if length:
end = rva+length
else:
end = None
return self.header[rva:end]
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_data(rva, length)
def get_rva_from_offset(self, offset):
"""Get the rva corresponding to this file offset. """
s = self.get_section_by_offset(offset)
if not s:
raise PEFormatError("specified offset (0x%x) doesn't belong to any section." % offset)
return s.get_rva_from_offset(offset)
def get_offset_from_rva(self, rva):
"""Get the file offset corresponding to this rva.
Given a rva , this method will find the section where the
data lies and return the offset within the file.
"""
s = self.get_section_by_rva(rva)
if not s:
raise PEFormatError, 'data at RVA can\'t be fetched. Corrupt header?'
return s.get_offset_from_rva(rva)
def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
if rva<len(self.header):
return self.get_string_from_data(rva, self.header)
return None
return self.get_string_from_data(rva-s.VirtualAddress, s.data)
def get_string_from_data(self, offset, data):
"""Get an ASCII string from within the data."""
# OC Patch
b = None
try:
b = data[offset]
except IndexError:
return ''
s = ''
while ord(b):
s += b
offset += 1
try:
b = data[offset]
except IndexError:
break
return s
def get_string_u_at_rva(self, rva, max_length = 2**16):
"""Get an Unicode string located at the given address."""
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, 2)
except PEFormatError, e:
return None
#length = struct.unpack('<H', data)[0]
s = u''
for idx in xrange(max_length):
try:
uchr = struct.unpack('<H', self.get_data(rva+2*idx, 2))[0]
except struct.error:
break
if unichr(uchr) == u'\0':
break
s += unichr(uchr)
return s
def get_section_by_offset(self, offset):
"""Get the section containing the given file offset."""
sections = [s for s in self.sections if s.contains_offset(offset)]
if sections:
return sections[0]
return None
def get_section_by_rva(self, rva):
"""Get the section containing the given address."""
sections = [s for s in self.sections if s.contains_rva(rva)]
if sections:
return sections[0]
return None
def __str__(self):
return self.dump_info()
def print_info(self):
"""Print all the PE header information in a human readable from."""
print self.dump_info()
def dump_info(self, dump=None):
"""Dump all the PE header information into human readable string."""
if dump is None:
dump = Dump()
warnings = self.get_warnings()
if warnings:
dump.add_header('Parsing Warnings')
for warning in warnings:
dump.add_line(warning)
dump.add_newline()
dump.add_header('DOS_HEADER')
dump.add_lines(self.DOS_HEADER.dump())
dump.add_newline()
dump.add_header('NT_HEADERS')
dump.add_lines(self.NT_HEADERS.dump())
dump.add_newline()
dump.add_header('FILE_HEADER')
dump.add_lines(self.FILE_HEADER.dump())
image_flags = self.retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')
dump.add('Flags: ')
flags = []
for flag in image_flags:
if getattr(self.FILE_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:
dump.add_header('OPTIONAL_HEADER')
dump.add_lines(self.OPTIONAL_HEADER.dump())
dll_characteristics_flags = self.retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLL_CHARACTERISTICS_')
dump.add('DllCharacteristics: ')
flags = []
for flag in dll_characteristics_flags:
if getattr(self.OPTIONAL_HEADER, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_newline()
dump.add_header('PE Sections')
section_flags = self.retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
for section in self.sections:
dump.add_lines(section.dump())
dump.add('Flags: ')
flags = []
for flag in section_flags:
if getattr(section, flag[0]):
flags.append(flag[0])
dump.add_line(', '.join(flags))
dump.add_line('Entropy: %f (Min=0.0, Max=8.0)' % section.get_entropy() )
if md5 is not None:
dump.add_line('MD5 hash: %s' % section.get_hash_md5() )
if sha1 is not None:
dump.add_line('SHA-1 hash: %s' % section.get_hash_sha1() )
if sha256 is not None:
dump.add_line('SHA-256 hash: %s' % section.get_hash_sha256() )
if sha512 is not None:
dump.add_line('SHA-512 hash: %s' % section.get_hash_sha512() )
dump.add_newline()
if (hasattr(self, 'OPTIONAL_HEADER') and
hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):
dump.add_header('Directories')
for idx in xrange(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):
directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
dump.add_lines(directory.dump())
dump.add_newline()
if hasattr(self, 'VS_VERSIONINFO'):
dump.add_header('Version Information')
dump.add_lines(self.VS_VERSIONINFO.dump())
dump.add_newline()
if hasattr(self, 'VS_FIXEDFILEINFO'):
dump.add_lines(self.VS_FIXEDFILEINFO.dump())
dump.add_newline()
if hasattr(self, 'FileInfo'):
for entry in self.FileInfo:
dump.add_lines(entry.dump())
dump.add_newline()
if hasattr(entry, 'StringTable'):
for st_entry in entry.StringTable:
[dump.add_line(' '+line) for line in st_entry.dump()]
dump.add_line(' LangID: '+st_entry.LangID)
dump.add_newline()
for str_entry in st_entry.entries.items():
dump.add_line(' '+str_entry[0]+': '+str_entry[1])
dump.add_newline()
elif hasattr(entry, 'Var'):
for var_entry in entry.Var:
if hasattr(var_entry, 'entry'):
[dump.add_line(' '+line) for line in var_entry.dump()]
dump.add_line(
' ' + var_entry.entry.keys()[0] +
': ' + var_entry.entry.values()[0])
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):
dump.add_header('Exported symbols')
dump.add_lines(self.DIRECTORY_ENTRY_EXPORT.struct.dump())
dump.add_newline()
dump.add_line('%-10s %-10s %s' % ('Ordinal', 'RVA', 'Name'))
for export in self.DIRECTORY_ENTRY_EXPORT.symbols:
dump.add('%-10d 0x%08Xh %s' % (
export.ordinal, export.address, export.name))
if export.forwarder:
dump.add_line(' forwarder: %s' % export.forwarder)
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):
dump.add_header('Imported symbols')
for module in self.DIRECTORY_ENTRY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
dump.add_header('Bound imports')
for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:
dump.add_lines(bound_imp_desc.struct.dump())
dump.add_line('DLL: %s' % bound_imp_desc.name)
dump.add_newline()
for bound_imp_ref in bound_imp_desc.entries:
dump.add_lines(bound_imp_ref.struct.dump(), 4)
dump.add_line('DLL: %s' % bound_imp_ref.name, 4)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
dump.add_header('Delay Imported symbols')
for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:
dump.add_lines(module.struct.dump())
dump.add_newline()
for symbol in module.imports:
if symbol.import_by_ordinal is True:
dump.add('%s Ordinal[%s] (Imported by Ordinal)' % (
module.dll, str(symbol.ordinal)))
else:
dump.add('%s.%s Hint[%s]' % (
module.dll, symbol.name, str(symbol.hint)))
if symbol.bound:
dump.add_line(' Bound: 0x%08X' % (symbol.bound))
else:
dump.add_newline()
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):
dump.add_header('Resource directory')
dump.add_lines(self.DIRECTORY_ENTRY_RESOURCE.struct.dump())
for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:
if resource_type.name is not None:
dump.add_line('Name: [%s]' % resource_type.name, 2)
else:
dump.add_line('Id: [0x%X] (%s)' % (
resource_type.struct.Id, RESOURCE_TYPE.get(
resource_type.struct.Id, '-')),
2)
dump.add_lines(resource_type.struct.dump(), 2)
if hasattr(resource_type, 'directory'):
dump.add_lines(resource_type.directory.struct.dump(), 4)
for resource_id in resource_type.directory.entries:
if resource_id.name is not None:
dump.add_line('Name: [%s]' % resource_id.name, 6)
else:
dump.add_line('Id: [0x%X]' % resource_id.struct.Id, 6)
dump.add_lines(resource_id.struct.dump(), 6)
if hasattr(resource_id, 'directory'):
dump.add_lines(resource_id.directory.struct.dump(), 8)
for resource_lang in resource_id.directory.entries:
# dump.add_line('\\--- LANG [%d,%d][%s]' % (
# resource_lang.data.lang,
# resource_lang.data.sublang,
# LANG[resource_lang.data.lang]), 8)
dump.add_lines(resource_lang.struct.dump(), 10)
dump.add_lines(resource_lang.data.struct.dump(), 12)
dump.add_newline()
dump.add_newline()
if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and
self.DIRECTORY_ENTRY_TLS and
self.DIRECTORY_ENTRY_TLS.struct ):
dump.add_header('TLS')
dump.add_lines(self.DIRECTORY_ENTRY_TLS.struct.dump())
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):
dump.add_header('Debug information')
for dbg in self.DIRECTORY_ENTRY_DEBUG:
dump.add_lines(dbg.struct.dump())
try:
dump.add_line('Type: '+DEBUG_TYPE[dbg.struct.Type])
except KeyError:
dump.add_line('Type: 0x%x(Unknown)' % dbg.struct.Type)
dump.add_newline()
if hasattr(self, 'DIRECTORY_ENTRY_BASERELOC'):
dump.add_header('Base relocations')
for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:
dump.add_lines(base_reloc.struct.dump())
for reloc in base_reloc.entries:
try:
dump.add_line('%08Xh %s' % (
reloc.rva, RELOCATION_TYPE[reloc.type][16:]), 4)
except KeyError:
dump.add_line('0x%08X 0x%x(Unknown)' % (
reloc.rva, reloc.type), 4)
dump.add_newline()
return dump.get_text()
# OC Patch
def get_physical_by_rva(self, rva):
"""Gets the physical address in the PE file from an RVA value."""
try:
return self.get_offset_from_rva(rva)
except Exception:
return None
##
# Double-Word get/set
##
def get_data_from_dword(self, dword):
"""Return a four byte string representing the double word value. (little endian)."""
return struct.pack('<L', dword)
def get_dword_from_data(self, data, offset):
"""Convert four bytes of data to a double word (little endian)
'offset' is assumed to index into a dword array. So setting it to
N will return a dword out of the data sarting at offset N*4.
Returns None if the data can't be turned into a double word.
"""
if (offset+1)*4 > len(data):
return None
return struct.unpack('<L', data[offset*4:(offset+1)*4])[0]
def get_dword_at_rva(self, rva):
"""Return the double word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_dword_from_data(self.get_data(rva)[:4], 0)
except PEFormatError:
return None
def get_dword_from_offset(self, offset):
"""Return the double word value at the given file offset. (little endian)"""
if offset+4 > len(self.__data__):
return None
return self.get_dword_from_data(self.__data__[offset:offset+4], 0)
def set_dword_at_rva(self, rva, dword):
"""Set the double word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_dword(dword))
def set_dword_at_offset(self, offset, dword):
"""Set the double word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_dword(dword))
##
# Word get/set
##
def get_data_from_word(self, word):
"""Return a two byte string representing the word value. (little endian)."""
return struct.pack('<H', word)
def get_word_from_data(self, data, offset):
"""Convert two bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data sarting at offset N*2.
Returns None if the data can't be turned into a word.
"""
if (offset+1)*2 > len(data):
return None
return struct.unpack('<H', data[offset*2:(offset+1)*2])[0]
def get_word_at_rva(self, rva):
"""Return the word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_word_from_data(self.get_data(rva)[:2], 0)
except PEFormatError:
return None
def get_word_from_offset(self, offset):
"""Return the word value at the given file offset. (little endian)"""
if offset+2 > len(self.__data__):
return None
return self.get_word_from_data(self.__data__[offset:offset+2], 0)
def set_word_at_rva(self, rva, word):
"""Set the word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_word(word))
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
##
# Quad-Word get/set
##
def get_data_from_qword(self, word):
"""Return a eight byte string representing the quad-word value. (little endian)."""
return struct.pack('<Q', word)
def get_qword_from_data(self, data, offset):
"""Convert eight bytes of data to a word (little endian)
'offset' is assumed to index into a word array. So setting it to
N will return a dword out of the data sarting at offset N*8.
Returns None if the data can't be turned into a quad word.
"""
if (offset+1)*8 > len(data):
return None
return struct.unpack('<Q', data[offset*8:(offset+1)*8])[0]
def get_qword_at_rva(self, rva):
"""Return the quad-word value at the given RVA.
Returns None if the value can't be read, i.e. the RVA can't be mapped
to a file offset.
"""
try:
return self.get_qword_from_data(self.get_data(rva)[:8], 0)
except PEFormatError:
return None
def get_qword_from_offset(self, offset):
"""Return the quad-word value at the given file offset. (little endian)"""
if offset+8 > len(self.__data__):
return None
return self.get_qword_from_data(self.__data__[offset:offset+8], 0)
def set_qword_at_rva(self, rva, qword):
"""Set the quad-word value at the file offset corresponding to the given RVA."""
return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
def set_qword_at_offset(self, offset, qword):
"""Set the quad-word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_qword(qword))
##
# Set bytes
##
def set_bytes_at_rva(self, rva, data):
"""Overwrite, with the given string, the bytes at the file offset corresponding to the given RVA.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
offset = self.get_physical_by_rva(rva)
if not offset:
raise False
return self.set_bytes_at_offset(offset, data)
def set_bytes_at_offset(self, offset, data):
"""Overwrite the bytes at the given file offset with the given string.
Return True if successful, False otherwise. It can fail if the
offset is outside the file's boundaries.
"""
if not isinstance(data, str):
raise TypeError('data should be of type: str')
if offset >= 0 and offset < len(self.__data__):
self.__data__ = ( self.__data__[:offset] +
data +
self.__data__[offset+len(data):] )
else:
return False
# Refresh the section's data with the modified information
#
for section in self.sections:
section_data_start = section.PointerToRawData
section_data_end = section_data_start+section.SizeOfRawData
section.data = self.__data__[section_data_start:section_data_end]
return True
def relocate_image(self, new_ImageBase):
"""Apply the relocation information to the image using the provided new image base.
This method will apply the relocation information to the image. Given the new base,
all the relocations will be processed and both the raw data and the section's data
will be fixed accordingly.
The resulting image can be retrieved as well through the method:
get_memory_mapped_image()
In order to get something that would more closely match what could be found in memory
once the Windows loader finished its work.
"""
relocation_difference = new_ImageBase - self.OPTIONAL_HEADER.ImageBase
for reloc in self.DIRECTORY_ENTRY_BASERELOC:
virtual_address = reloc.struct.VirtualAddress
size_of_block = reloc.struct.SizeOfBlock
# We iterate with an index because if the relocation is of type
# IMAGE_REL_BASED_HIGHADJ we need to also process the next entry
# at once and skip it for the next interation
#
entry_idx = 0
while entry_idx<len(reloc.entries):
entry = reloc.entries[entry_idx]
entry_idx += 1
if entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_ABSOLUTE']:
# Nothing to do for this type of relocation
pass
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGH']:
# Fix the high 16bits of a relocation
#
# Add high 16bits of relocation_difference to the
# 16bit value at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference>>16)&0xffff )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_LOW']:
# Fix the low 16bits of a relocation
#
# Add low 16 bits of relocation_difference to the 16bit value
# at RVA=entry.rva
self.set_word_at_rva(
entry.rva,
( self.get_word_at_rva(entry.rva) + relocation_difference)&0xffff)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHLOW']:
# Handle all high and low parts of a 32bit relocation
#
# Add relocation_difference to the value at RVA=entry.rva
self.set_dword_at_rva(
entry.rva,
self.get_dword_at_rva(entry.rva)+relocation_difference)
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_HIGHADJ']:
# Fix the high 16bits of a relocation and adjust
#
# Add high 16bits of relocation_difference to the 32bit value
# composed from the (16bit value at RVA=entry.rva)<<16 plus
# the 16bit value at the next relocation entry.
#
# If the next entry is beyond the array's limits,
# abort... the table is corrupt
#
if entry_idx == len(reloc.entries):
break
next_entry = reloc.entries[entry_idx]
entry_idx += 1
self.set_word_at_rva( entry.rva,
((self.get_word_at_rva(entry.rva)<<16) + next_entry.rva +
relocation_difference & 0xffff0000) >> 16 )
elif entry.type == RELOCATION_TYPE['IMAGE_REL_BASED_DIR64']:
# Apply the difference to the 64bit value at the offset
# RVA=entry.rva
self.set_qword_at_rva(
entry.rva,
self.get_qword_at_rva(entry.rva) + relocation_difference)
def verify_checksum(self):
return self.OPTIONAL_HEADER.CheckSum == self.generate_checksum()
def generate_checksum(self):
# Get the offset to the CheckSum field in the OptionalHeader
#
checksum_offset = self.OPTIONAL_HEADER.__file_offset__ + 0x40 # 64
checksum = 0
for i in range( len(self.__data__) / 4 ):
# Skip the checksum field
#
if i == checksum_offset / 4:
continue
dword = struct.unpack('L', self.__data__[ i*4 : i*4+4 ])[0]
checksum = (checksum & 0xffffffff) + dword + (checksum>>32)
if checksum > 2**32:
checksum = (checksum & 0xffffffff) + (checksum >> 32)
checksum = (checksum & 0xffff) + (checksum >> 16)
checksum = (checksum) + (checksum >> 16)
checksum = checksum & 0xffff
return checksum + len(self.__data__)
| bsd-3-clause |
pekeler/arangodb | 3rdParty/V8-4.3.61/build/gyp/test/generator-output/gyptest-depth.py | 232 | 1561 | #!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a project hierarchy created when the --generator-output=
and --depth= options is used to put the build configuration files in a separate
directory tree.
"""
import TestGyp
import os
# This is a regression test for the make generator only.
test = TestGyp.TestGyp(formats=['make'])
test.writable(test.workpath('src'), False)
toplevel_dir = os.path.basename(test.workpath())
test.run_gyp(os.path.join(toplevel_dir, 'src', 'prog1.gyp'),
'-Dset_symroot=1',
'--generator-output=gypfiles',
depth=toplevel_dir,
chdir='..')
test.writable(test.workpath('src/build'), True)
test.writable(test.workpath('src/subdir2/build'), True)
test.writable(test.workpath('src/subdir3/build'), True)
test.build('prog1.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
expect = """\
Hello from %s
Hello from inc.h
Hello from inc1/include1.h
Hello from inc2/include2.h
Hello from inc3/include3.h
Hello from subdir2/deeper/deeper.h
"""
if test.format == 'xcode':
chdir = 'src'
test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
if test.format == 'xcode':
chdir = 'src/subdir2'
test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
if test.format == 'xcode':
chdir = 'src/subdir3'
test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
test.pass_test()
| apache-2.0 |
joelddiaz/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.40/filter_plugins/oo_filters.py | 9 | 43540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom filters for use in openshift-ansible
"""
from ansible import errors
from collections import Mapping
from distutils.util import strtobool
from distutils.version import LooseVersion
from operator import itemgetter
import OpenSSL.crypto
import os
import pdb
import pkg_resources
import re
import json
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
from urlparse import urlparse
try:
# ansible-2.2
# ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
# ansible.module_utils._text.to_text should be used instead.
from ansible.module_utils._text import to_text
except ImportError:
# ansible-2.1
from ansible.utils.unicode import to_unicode as to_text
# Disabling too-many-public-methods, since filter methods are necessarily
# public
# pylint: disable=too-many-public-methods
class FilterModule(object):
""" Custom ansible filters """
@staticmethod
def oo_pdb(arg):
""" This pops you into a pdb instance where arg is the data passed in
from the filter.
Ex: "{{ hostvars | oo_pdb }}"
"""
pdb.set_trace()
return arg
@staticmethod
def get_attr(data, attribute=None):
""" This looks up dictionary attributes of the form a.b.c and returns
the value.
If the key isn't present, None is returned.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
"""
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
ptr = data
for attr in attribute.split('.'):
if attr in ptr:
ptr = ptr[attr]
else:
ptr = None
break
return ptr
@staticmethod
def oo_flatten(data):
""" This filter plugin will flatten a list of lists
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
return [item for sublist in data for item in sublist]
@staticmethod
def oo_merge_dicts(first_dict, second_dict):
""" Merge two dictionaries where second_dict values take precedence.
Ex: first_dict={'a': 1, 'b': 2}
second_dict={'b': 3, 'c': 4}
returns {'a': 1, 'b': 3, 'c': 4}
"""
if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
raise errors.AnsibleFilterError("|failed expects to merge two dicts")
merged = first_dict.copy()
merged.update(second_dict)
return merged
@staticmethod
def oo_merge_hostvars(hostvars, variables, inventory_hostname):
""" Merge host and play variables.
When ansible version is greater than or equal to 2.0.0,
merge hostvars[inventory_hostname] with variables (ansible vars)
otherwise merge hostvars with hostvars['inventory_hostname'].
Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
'openshift_other_variable': '7'}
variables={'openshift_other_variable': '6'}
inventory_hostname='master1.example.com'
returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
variables={'openshift_other_variable': '6'}
inventory_hostname='master1.example.com'
returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
"""
if not isinstance(hostvars, Mapping):
raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
if not isinstance(variables, dict):
raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
if not isinstance(inventory_hostname, basestring):
raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
# pylint: disable=no-member
ansible_version = pkg_resources.get_distribution("ansible").version
merged_hostvars = {}
if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
variables)
else:
merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
hostvars)
return merged_hostvars
@staticmethod
def oo_collect(data, attribute=None, filters=None):
""" This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
{'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
if filters is not None:
if not isinstance(filters, dict):
raise errors.AnsibleFilterError("|failed expects filter to be a"
" dict")
retval = [FilterModule.get_attr(d, attribute) for d in data if (
all([d.get(key, None) == filters[key] for key in filters]))]
else:
retval = [FilterModule.get_attr(d, attribute) for d in data]
retval = [val for val in retval if val != None]
return retval
@staticmethod
def oo_select_keys_from_list(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [FilterModule.oo_select_keys(item, keys) for item in data]
return FilterModule.oo_flatten(retval)
@staticmethod
def oo_select_keys(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, Mapping):
raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys if key in data]
return retval
@staticmethod
def oo_prepend_strings_in_list(data, prepend):
""" This takes a list of strings and prepends a string to each item in the
list
Ex: data = ['cart', 'tree']
prepend = 'apple-'
returns ['apple-cart', 'apple-tree']
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, basestring) for x in data):
raise errors.AnsibleFilterError("|failed expects first param is a list"
" of strings")
retval = [prepend + s for s in data]
return retval
@staticmethod
def oo_combine_key_value(data, joiner='='):
"""Take a list of dict in the form of { 'key': 'value'} and
arrange them as a list of strings ['key=value']
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
rval = []
for item in data:
rval.append("%s%s%s" % (item['key'], joiner, item['value']))
return rval
@staticmethod
def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
"""Take a dict in the form of { 'key': 'value', 'key': 'value' } and
arrange them as a string 'key=value key=value'
"""
if not isinstance(data, dict):
raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
@staticmethod
def oo_ami_selector(data, image_name):
""" This takes a list of amis and an image name and attempts to return
the latest ami.
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not data:
return None
else:
if image_name is None or not image_name.endswith('_*'):
ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
return ami['ami_id']
else:
ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
return ami['ami_id']
@staticmethod
def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
""" This takes a dictionary of volume definitions and returns a valid ec2
volume definition based on the host_type and the values in the
dictionary.
The dictionary should look similar to this:
{ 'master':
{ 'root':
{ 'volume_size': 10, 'device_type': 'gp2',
'iops': 500
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
},
'node':
{ 'root':
{ 'volume_size': 10, 'device_type': 'io1',
'iops': 1000
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
}
}
"""
if not isinstance(data, dict):
raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
if host_type not in ['master', 'node', 'etcd']:
raise errors.AnsibleFilterError("|failed expects etcd, master or node"
" as the host type")
root_vol = data[host_type]['root']
root_vol['device_name'] = '/dev/sda1'
root_vol['delete_on_termination'] = True
if root_vol['device_type'] != 'io1':
root_vol.pop('iops', None)
if host_type in ['master', 'node'] and 'docker' in data[host_type]:
docker_vol = data[host_type]['docker']
docker_vol['device_name'] = '/dev/xvdb'
docker_vol['delete_on_termination'] = True
if docker_vol['device_type'] != 'io1':
docker_vol.pop('iops', None)
if docker_ephemeral:
docker_vol.pop('device_type', None)
docker_vol.pop('delete_on_termination', None)
docker_vol['ephemeral'] = 'ephemeral0'
return [root_vol, docker_vol]
elif host_type == 'etcd' and 'etcd' in data[host_type]:
etcd_vol = data[host_type]['etcd']
etcd_vol['device_name'] = '/dev/xvdb'
etcd_vol['delete_on_termination'] = True
if etcd_vol['device_type'] != 'io1':
etcd_vol.pop('iops', None)
return [root_vol, etcd_vol]
return [root_vol]
@staticmethod
def oo_split(string, separator=','):
""" This splits the input string into a list. If the input string is
already a list we will return it as is.
"""
if isinstance(string, list):
return string
return string.split(separator)
@staticmethod
def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
"""
servers = []
for idx, host_info in enumerate(hosts):
server = dict(name="master%s" % idx)
server_ip = host_info['openshift']['common']['ip']
server['address'] = "%s:%s" % (server_ip, port)
server['opts'] = 'check'
servers.append(server)
return servers
@staticmethod
def oo_filter_list(data, filter_attr=None):
""" This returns a list, which contains all items where filter_attr
evaluates to true
Ex: data = [ { a: 1, b: True },
{ a: 3, b: False },
{ a: 5, b: True } ]
filter_attr = 'b'
returns [ { a: 1, b: True },
{ a: 5, b: True } ]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not isinstance(filter_attr, basestring):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
# Gather up the values for the list of keys passed in
return [x for x in data if filter_attr in x and x[filter_attr]]
@staticmethod
def oo_nodes_with_label(nodes, label, value=None):
""" Filters a list of nodes by label and value (if provided)
It handles labels that are in the following variables by priority:
openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
Examples:
data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
'c': {'openshift_node_labels': {'size': 'S'}}]
label = 'color'
returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
'c': {'openshift_node_labels': {'size': 'S'}}]
label = 'color'
value = 'green'
returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
Args:
nodes (list[dict]): list of node to node variables
label (str): label to filter `nodes` by
value (Optional[str]): value of `label` to filter by Defaults
to None.
Returns:
list[dict]: nodes filtered by label and value (if provided)
"""
if not isinstance(nodes, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if not isinstance(label, basestring):
raise errors.AnsibleFilterError("failed expects label to be a string")
if value is not None and not isinstance(value, basestring):
raise errors.AnsibleFilterError("failed expects value to be a string")
def label_filter(node):
""" filter function for testing if node should be returned """
if not isinstance(node, dict):
raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
if 'openshift_node_labels' in node:
labels = node['openshift_node_labels']
elif 'cli_openshift_node_labels' in node:
labels = node['cli_openshift_node_labels']
elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
labels = node['openshift']['node']['labels']
else:
return False
if isinstance(labels, basestring):
labels = yaml.safe_load(labels)
if not isinstance(labels, dict):
raise errors.AnsibleFilterError(
"failed expected node labels to be a dict or serializable to a dict"
)
return label in labels and (value is None or labels[label] == value)
return [n for n in nodes if label_filter(n)]
@staticmethod
def oo_parse_heat_stack_outputs(data):
""" Formats the HEAT stack output into a usable form
The goal is to transform something like this:
+---------------+-------------------------------------------------+
| Property | Value |
+---------------+-------------------------------------------------+
| capabilities | [] | |
| creation_time | 2015-06-26T12:26:26Z | |
| description | OpenShift cluster | |
| … | … |
| outputs | [ |
| | { |
| | "output_value": "value_A" |
| | "description": "This is the value of Key_A" |
| | "output_key": "Key_A" |
| | }, |
| | { |
| | "output_value": [ |
| | "value_B1", |
| | "value_B2" |
| | ], |
| | "description": "This is the value of Key_B" |
| | "output_key": "Key_B" |
| | }, |
| | ] |
| parameters | { |
| … | … |
+---------------+-------------------------------------------------+
into something like this:
{
"Key_A": "value_A",
"Key_B": [
"value_B1",
"value_B2"
]
}
"""
# Extract the “outputs” JSON snippet from the pretty-printed array
in_outputs = False
outputs = ''
line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
for line in data['stdout_lines']:
match = line_regex.match(line)
if match:
if match.group(1) == 'outputs':
in_outputs = True
elif match.group(1) != '':
in_outputs = False
if in_outputs:
outputs += match.group(2)
outputs = json.loads(outputs)
# Revamp the “outputs” to put it in the form of a “Key: value” map
revamped_outputs = {}
for output in outputs:
revamped_outputs[output['output_key']] = output['output_value']
return revamped_outputs
@staticmethod
# pylint: disable=too-many-branches
def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
""" Parses names from list of certificate hashes.
Ex: certificates = [{ "certfile": "/root/custom1.crt",
"keyfile": "/root/custom1.key",
"cafile": "/root/custom-ca1.crt" },
{ "certfile": "custom2.crt",
"keyfile": "custom2.key",
"cafile": "custom-ca2.crt" }]
returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
"keyfile": "/etc/origin/master/named_certificates/custom1.key",
"cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
"names": [ "public-master-host.com",
"other-master-host.com" ] },
{ "certfile": "/etc/origin/master/named_certificates/custom2.crt",
"keyfile": "/etc/origin/master/named_certificates/custom2.key",
"cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
"names": [ "some-hostname.com" ] }]
"""
if not isinstance(named_certs_dir, basestring):
raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
if not isinstance(internal_hostnames, list):
raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
for certificate in certificates:
if 'names' in certificate.keys():
continue
else:
certificate['names'] = []
if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
(certificate['certfile'], certificate['keyfile']))
try:
st_cert = open(certificate['certfile'], 'rt').read()
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
certificate['names'].append(str(cert.get_subject().commonName.decode()))
for i in range(cert.get_extension_count()):
if cert.get_extension(i).get_short_name() == 'subjectAltName':
for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
certificate['names'].append(name)
except:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
"please specify certificate names in host inventory"))
certificate['names'] = list(set(certificate['names']))
if 'cafile' not in certificate:
certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
if not certificate['names']:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
"detected a collision with internal hostname, please specify " +
"certificate names in host inventory"))
for certificate in certificates:
# Update paths for configuration
certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
if 'cafile' in certificate:
certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
return certificates
@staticmethod
def oo_pretty_print_cluster(data, prefix='tag_'):
""" Read a subset of hostvars and build a summary of the cluster
in the following layout:
"c_id": {
"master": {
"default": [
{ "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
]
"node": {
"infra": [
{ "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
],
"compute": [
{ "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
...
]
}
"""
def _get_tag_value(tags, key):
""" Extract values of a map implemented as a set.
Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
key = 'bar'
returns 'value2'
"""
for tag in tags:
if tag[:len(prefix)+len(key)] == prefix + key:
return tag[len(prefix)+len(key)+1:]
raise KeyError(key)
def _add_host(clusters,
clusterid,
host_type,
sub_host_type,
host):
""" Add a new host in the clusters data structure """
if clusterid not in clusters:
clusters[clusterid] = {}
if host_type not in clusters[clusterid]:
clusters[clusterid][host_type] = {}
if sub_host_type not in clusters[clusterid][host_type]:
clusters[clusterid][host_type][sub_host_type] = []
clusters[clusterid][host_type][sub_host_type].append(host)
clusters = {}
for host in data:
try:
_add_host(clusters=clusters,
clusterid=_get_tag_value(host['group_names'], 'clusterid'),
host_type=_get_tag_value(host['group_names'], 'host-type'),
sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
host={'name': host['inventory_hostname'],
'public IP': host['ansible_ssh_host'],
'private IP': host['ansible_default_ipv4']['address']})
except KeyError:
pass
return clusters
@staticmethod
def oo_generate_secret(num_bytes):
""" generate a session secret """
if not isinstance(num_bytes, int):
raise errors.AnsibleFilterError("|failed expects num_bytes is int")
secret = os.urandom(num_bytes)
return secret.encode('base-64').strip()
@staticmethod
def to_padded_yaml(data, level=0, indent=2, **kw):
""" returns a yaml snippet padded to match the indent level you specify """
if data in [None, ""]:
return ""
try:
transformed = yaml.dump(data, indent=indent, allow_unicode=True,
default_flow_style=False,
Dumper=AnsibleDumper, **kw)
padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
return to_text("\n{0}".format(padded))
except Exception as my_e:
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
@staticmethod
def oo_openshift_env(hostvars):
''' Return facts which begin with "openshift_" and translate
legacy facts to their openshift_env counterparts.
Ex: hostvars = {'openshift_fact': 42,
'theyre_taking_the_hobbits_to': 'isengard'}
returns = {'openshift_fact': 42}
'''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
facts = {}
regex = re.compile('^openshift_.*')
for key in hostvars:
if regex.match(key):
facts[key] = hostvars[key]
migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
'openshift_registry_selector': 'openshift_hosted_registry_selector'}
for old_fact, new_fact in migrations.iteritems():
if old_fact in facts and new_fact not in facts:
facts[new_fact] = facts[old_fact]
return facts
@staticmethod
# pylint: disable=too-many-branches
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
"""
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if not issubclass(type(groups), dict):
raise errors.AnsibleFilterError("|failed expects groups is a dict")
if persistent_volumes != None and not issubclass(type(persistent_volumes), list):
raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
if persistent_volumes == None:
persistent_volumes = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
create_pv = params['create_pv']
if kind != None and create_pv:
if kind == 'nfs':
host = params['host']
if host == None:
if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
host = groups['oo_nfs_to_config'][0]
else:
raise errors.AnsibleFilterError("|failed no storage host detected")
directory = params['nfs']['directory']
volume = params['volume']['name']
path = directory + '/' + volume
size = params['volume']['size']
access_modes = params['access_modes']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
access_modes=access_modes,
storage=dict(
nfs=dict(
server=host,
path=path)))
persistent_volumes.append(persistent_volume)
elif kind == 'openstack':
volume = params['volume']['name']
size = params['volume']['size']
access_modes = params['access_modes']
filesystem = params['openstack']['filesystem']
volume_id = params['openstack']['volumeID']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
access_modes=access_modes,
storage=dict(
cinder=dict(
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
elif not (kind == 'object' or kind == 'dynamic'):
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
component)
raise errors.AnsibleFilterError(msg)
return persistent_volumes
@staticmethod
def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
""" Generate list of persistent volume claims based on oo_openshift_env
storage options set in host variables.
"""
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if persistent_volume_claims != None and not issubclass(type(persistent_volume_claims), list):
raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
if persistent_volume_claims == None:
persistent_volume_claims = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
create_pv = params['create_pv']
create_pvc = params['create_pvc']
if kind not in [None, 'object'] and create_pv and create_pvc:
volume = params['volume']['name']
size = params['volume']['size']
access_modes = params['access_modes']
persistent_volume_claim = dict(
name="{0}-claim".format(volume),
capacity=size,
access_modes=access_modes)
persistent_volume_claims.append(persistent_volume_claim)
return persistent_volume_claims
@staticmethod
def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
""" Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
names with proper version (if provided)
If 3.1 rpms are passed in they will only be augmented with the
correct version. This is important for hosts that are running both
Masters and Nodes.
"""
if not isinstance(rpms, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if openshift_version is not None and not isinstance(openshift_version, basestring):
raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
rpms_31 = []
for rpm in rpms:
if not 'atomic' in rpm:
rpm = rpm.replace("openshift", "atomic-openshift")
if openshift_version:
rpm = rpm + openshift_version
rpms_31.append(rpm)
return rpms_31
@staticmethod
def oo_pods_match_component(pods, deployment_type, component):
""" Filters a list of Pods and returns the ones matching the deployment_type and component
"""
if not isinstance(pods, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if not isinstance(deployment_type, basestring):
raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
if not isinstance(component, basestring):
raise errors.AnsibleFilterError("failed expects component to be a string")
image_prefix = 'openshift/origin-'
if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
image_prefix = 'openshift3/ose-'
elif deployment_type == 'atomic-enterprise':
image_prefix = 'aep3_beta/aep-'
matching_pods = []
image_regex = image_prefix + component + r'.*'
for pod in pods:
for container in pod['spec']['containers']:
if re.search(image_regex, container['image']):
matching_pods.append(pod)
break # stop here, don't add a pod more than once
return matching_pods
@staticmethod
def oo_get_hosts_from_hostvars(hostvars, hosts):
""" Return a list of hosts from hostvars """
retval = []
for host in hosts:
try:
retval.append(hostvars[host])
except errors.AnsibleError as _:
# host does not exist
pass
return retval
@staticmethod
def oo_image_tag_to_rpm_version(version, include_dash=False):
""" Convert an image tag string to an RPM version if necessary
Empty strings and strings that are already in rpm version format
are ignored. Also remove non semantic version components.
Ex. v3.2.0.10 -> -3.2.0.10
v1.2.0-rc1 -> -1.2.0
"""
if not isinstance(version, basestring):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
if version.startswith("v"):
version = version[1:]
# Strip release from requested version, we no longer support this.
version = version.split('-')[0]
if include_dash and version and not version.startswith("-"):
version = "-" + version
return version
@staticmethod
def oo_hostname_from_url(url):
""" Returns the hostname contained in a URL
Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
"""
if not isinstance(url, basestring):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
parse_result = urlparse(url)
if parse_result.netloc != '':
return parse_result.netloc
else:
# netloc wasn't parsed, assume url was missing scheme and path
return parse_result.path
@staticmethod
def oo_openshift_loadbalancer_frontends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
loadbalancer_frontends = [{'name': 'atomic-openshift-api',
'mode': 'tcp',
'options': ['tcplog'],
'binds': ["*:{0}".format(api_port)],
'default_backend': 'atomic-openshift-api'}]
if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
loadbalancer_frontends.append({'name': 'nuage-monitor',
'mode': 'tcp',
'options': ['tcplog'],
'binds': ["*:{0}".format(nuage_rest_port)],
'default_backend': 'nuage-monitor'})
return loadbalancer_frontends
@staticmethod
def oo_openshift_loadbalancer_backends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
loadbalancer_backends = [{'name': 'atomic-openshift-api',
'mode': 'tcp',
'option': 'tcplog',
'balance': 'source',
'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, api_port)}]
if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
loadbalancer_backends.append({'name': 'nuage-monitor',
'mode': 'tcp',
'option': 'tcplog',
'balance': 'source',
'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
return loadbalancer_backends
@staticmethod
def oo_chomp_commit_offset(version):
"""Chomp any "+git.foo" commit offset string from the given `version`
and return the modified version string.
Ex:
- chomp_commit_offset(None) => None
- chomp_commit_offset(1337) => "1337"
- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
"""
if version is None:
return version
else:
# Stringify, just in case it's a Number type. Split by '+' and
# return the first split. No concerns about strings without a
# '+', .split() returns an array of the original string.
return str(version).split('+')[0]
def filters(self):
""" returns a mapping of filters to methods """
return {
"oo_select_keys": self.oo_select_keys,
"oo_select_keys_from_list": self.oo_select_keys_from_list,
"oo_chomp_commit_offset": self.oo_chomp_commit_offset,
"oo_collect": self.oo_collect,
"oo_flatten": self.oo_flatten,
"oo_pdb": self.oo_pdb,
"oo_prepend_strings_in_list": self.oo_prepend_strings_in_list,
"oo_ami_selector": self.oo_ami_selector,
"oo_ec2_volume_definition": self.oo_ec2_volume_definition,
"oo_combine_key_value": self.oo_combine_key_value,
"oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
"oo_parse_named_certificates": self.oo_parse_named_certificates,
"oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
"oo_pretty_print_cluster": self.oo_pretty_print_cluster,
"oo_generate_secret": self.oo_generate_secret,
"to_padded_yaml": self.to_padded_yaml,
"oo_nodes_with_label": self.oo_nodes_with_label,
"oo_openshift_env": self.oo_openshift_env,
"oo_persistent_volumes": self.oo_persistent_volumes,
"oo_persistent_volume_claims": self.oo_persistent_volume_claims,
"oo_31_rpm_rename_conversion": self.oo_31_rpm_rename_conversion,
"oo_pods_match_component": self.oo_pods_match_component,
"oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars,
"oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version,
"oo_merge_dicts": self.oo_merge_dicts,
"oo_hostname_from_url": self.oo_hostname_from_url,
"oo_merge_hostvars": self.oo_merge_hostvars,
"oo_openshift_loadbalancer_frontends": self.oo_openshift_loadbalancer_frontends,
"oo_openshift_loadbalancer_backends": self.oo_openshift_loadbalancer_backends
}
| apache-2.0 |
dc3-plaso/plaso | tests/storage/fake_storage.py | 1 | 6205 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the fake storage."""
import unittest
from plaso.containers import errors
from plaso.containers import event_sources
from plaso.containers import reports
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.lib import definitions
from plaso.storage import fake_storage
from plaso.storage import zip_file
from tests import test_lib as shared_test_lib
from tests.storage import test_lib
class FakeStorageWriterTest(test_lib.StorageTestCase):
"""Tests for the fake storage writer object."""
def testAddAnalysisReport(self):
"""Tests the AddAnalysisReport function."""
session = sessions.Session()
analysis_report = reports.AnalysisReport(
plugin_name=u'test', text=u'test report')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddAnalysisReport(analysis_report)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddAnalysisReport(analysis_report)
def testAddError(self):
"""Tests the AddError function."""
session = sessions.Session()
extraction_error = errors.ExtractionError(
message=u'Test extraction error')
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddError(extraction_error)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddError(extraction_error)
def testAddEvent(self):
"""Tests the AddEvent function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
event = None
for event in test_events:
storage_writer.AddEvent(event)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEvent(event)
def testAddEventSource(self):
"""Tests the AddEventSource function."""
session = sessions.Session()
event_source = event_sources.EventSource()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.AddEventSource(event_source)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventSource(event_source)
def testAddEventTag(self):
"""Tests the AddEventTag function."""
session = sessions.Session()
test_events = self._CreateTestEvents()
event_tags = self._CreateTestEventTags()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
for event in test_events:
storage_writer.AddEvent(event)
event_tag = None
for event_tag in event_tags:
storage_writer.AddEventTag(event_tag)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddEventTag(event_tag)
def testOpenClose(self):
"""Tests the Open and Close functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
storage_writer.Close()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
storage_writer.Close()
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.Open()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.Close()
# TODO: add test for GetEvents.
# TODO: add test for GetFirstWrittenEventSource and
# GetNextWrittenEventSource.
@shared_test_lib.skipUnlessHasTestFile([u'psort_test.json.plaso'])
@shared_test_lib.skipUnlessHasTestFile([u'pinfo_test.json.plaso'])
def testMergeFromStorage(self):
"""Tests the MergeFromStorage function."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
test_file = self._GetTestFilePath([u'psort_test.json.plaso'])
storage_reader = zip_file.ZIPStorageFileReader(test_file)
storage_writer.MergeFromStorage(storage_reader)
test_file = self._GetTestFilePath([u'pinfo_test.json.plaso'])
storage_reader = zip_file.ZIPStorageFileReader(test_file)
storage_writer.MergeFromStorage(storage_reader)
storage_writer.Close()
# TODO: add test for GetNextEventSource.
def testWriteSessionStartAndCompletion(self):
"""Tests the WriteSessionStart and WriteSessionCompletion functions."""
session = sessions.Session()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
storage_writer.WriteSessionStart()
storage_writer.WriteSessionCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteSessionStart()
with self.assertRaises(IOError):
storage_writer.WriteSessionCompletion()
storage_writer.Close()
def testWriteTaskStartAndCompletion(self):
"""Tests the WriteTaskStart and WriteTaskCompletion functions."""
session = sessions.Session()
task = tasks.Task(session_identifier=session.identifier)
storage_writer = fake_storage.FakeStorageWriter(
session, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
storage_writer.Open()
storage_writer.WriteTaskStart()
storage_writer.WriteTaskCompletion()
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer = fake_storage.FakeStorageWriter(session)
storage_writer.Open()
with self.assertRaises(IOError):
storage_writer.WriteTaskStart()
with self.assertRaises(IOError):
storage_writer.WriteTaskCompletion()
storage_writer.Close()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
lastr2d2/lastchat | src/lib/web/db.py | 16 | 42213 | """
Database API
(part of web.py)
"""
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
import time, os, urllib
try:
import datetime
except ImportError:
datetime = None
try: set
except NameError:
from sets import Set as set
from utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
try:
# db module can work independent of web.py
from webapi import debug, config
except:
import sys
debug = sys.stderr
config = storage()
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, long):
return str(obj)
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
<sql: '1=2'>
>>> sqlors('foo = ', [1])
<sql: 'foo = 1'>
>>> sqlors('foo = ', 1)
<sql: 'foo = 1'>
>>> sqlors('foo = ', [1,2,3])
<sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
try:
import DBUtils
# enable pooling if DBUtils module is available.
self.has_pooling = True
except ImportError:
self.has_pooling = False
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, style
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print >> debug, 'ERR:', str(sql_query)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, vars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if vars is None: vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test: return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if vars is None: vars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
<sql: 'SELECT * FROM foo WHERE bar_id = 3'>
>>> db.where('foo', source=2, crust='dewey', _test=True)
<sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'">
>>> db.where('foo', _test=True)
<sql: 'SELECT * FROM foo'>
"""
where_clauses = []
for k, v in kwargs.iteritems():
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
where = SQLQuery.join(where_clauses, " AND ")
else:
where = None
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, vars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "[email protected]"}, {"name": "bar", "email": "[email protected]"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '[email protected]'), ('bar', '[email protected]')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError, 'Not all rows have the same keys'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, vars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if vars is None: vars = {}
where = self._where(where, vars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, vars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if vars is None: vars = {}
where = self._where(where, vars)
q = 'DELETE FROM ' + table
if using: q += ' USING ' + sqllist(using)
if where: q += ' WHERE ' + where
if _test: return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class PostgresDB(DB):
"""Postgres driver."""
def __init__(self, **keywords):
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
if db_module.__name__ == "psycopg2":
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
if db_module.__name__ == "pgdb" and 'port' in keywords:
keywords["host"] += ":" + str(keywords.pop('port'))
# if db is not provided postgres driver will take it from PGDATABASE environment variable
if 'db' in keywords:
keywords['database'] = keywords.pop('db')
self.dbname = "postgres"
self.paramstyle = db_module.paramstyle
DB.__init__(self, db_module, keywords)
self.supports_multiple_insert = True
self._sequences = None
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# when seqname is not provided guess the seqname and make sure it exists
seqname = tablename + "_id_seq"
if seqname not in self._get_all_sequences():
seqname = None
if seqname:
query += "; SELECT currval('%s')" % seqname
return query
def _get_all_sequences(self):
"""Query postgres to find names of all sequences used in this database."""
if self._sequences is None:
q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
self._sequences = set([c.relname for c in self.query(q)])
return self._sequences
def _connect(self, keywords):
conn = DB._connect(self, keywords)
try:
conn.set_client_encoding('UTF8')
except AttributeError:
# fallback for pgdb driver
conn.cursor().execute("set client_encoding to 'UTF-8'")
return conn
def _connect_with_pooling(self, keywords):
conn = DB._connect_with_pooling(self, keywords)
conn._con._con.set_client_encoding('UTF8')
return conn
class MySQLDB(DB):
def __init__(self, **keywords):
import MySQLdb as db
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, iterbetter):
del out.__len__
return out
class FirebirdDB(DB):
"""Firebird Database.
"""
def __init__(self, **keywords):
try:
import kinterbasdb as db
except Exception:
db = None
pass
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.paramstyle = db.paramstyle
DB.__init__(self, db, keywords)
def delete(self, table, where=None, using=None, vars=None, _test=False):
# firebird doesn't support using clause
using=None
return DB.delete(self, table, where, using, vars, _test)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', ''),
('FIRST', limit),
('SKIP', offset),
('', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order)
)
class MSSQLDB(DB):
def __init__(self, **keywords):
import pymssql as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.dbname = "mssql"
DB.__init__(self, db, keywords)
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
# MSSQLDB expects params to be a tuple.
# Overwriting the default implementation to convert params to tuple.
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, tuple(params)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('TOP', limit),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('OFFSET', offset))
def _test(self):
"""Test LIMIT.
Fake presence of pymssql module for running tests.
>>> import sys
>>> sys.modules['pymssql'] = sys.modules['sys']
MSSQL has TOP clause instead of LIMIT clause.
>>> db = MSSQLDB(db='test', user='joe', pw='secret')
>>> db.select('foo', limit=4, _test=True)
<sql: 'SELECT * TOP 4 FROM foo'>
"""
pass
class OracleDB(DB):
def __init__(self, **keywords):
import cx_Oracle as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
#@@ TODO: use db.makedsn if host, port is specified
keywords['dsn'] = keywords.pop('db')
self.dbname = 'oracle'
db.paramstyle = 'numeric'
self.paramstyle = db.paramstyle
# oracle doesn't support pooling
keywords.pop('pooling', None)
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# It is not possible to get seq name from table name in Oracle
return query
else:
return query + "; SELECT %s.currval FROM dual" % seqname
def dburl2dict(url):
"""
Takes a URL to a database and parses it into an equivalent dictionary.
>>> dburl2dict('postgres://james:[email protected]:5432/mygreatdb')
{'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': '5432'}
>>> dburl2dict('postgres://james:[email protected]/mygreatdb')
{'user': 'james', 'host': 'serverfarm.example.net', 'db': 'mygreatdb', 'pw': 'day', 'dbn': 'postgres'}
>>> dburl2dict('postgres://james:d%[email protected]/mygreatdb')
{'user': 'james', 'host': 'serverfarm.example.net', 'db': 'mygreatdb', 'pw': 'd@y', 'dbn': 'postgres'}
"""
dbn, rest = url.split('://', 1)
user, rest = rest.split(':', 1)
pw, rest = rest.split('@', 1)
if ':' in rest:
host, rest = rest.split(':', 1)
port, rest = rest.split('/', 1)
else:
host, rest = rest.split('/', 1)
port = None
db = rest
uq = urllib.unquote
out = dict(dbn=dbn, user=uq(user), pw=uq(pw), host=uq(host), db=uq(db))
if port: out['port'] = port
return out
_databases = {}
def database(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
if not dburl and not params:
dburl = os.environ['DATABASE_URL']
if dburl:
params = dburl2dict(dburl)
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB, dbn
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('postgres', PostgresDB)
register_database('sqlite', SqliteDB)
register_database('firebird', FirebirdDB)
register_database('mssql', MSSQLDB)
register_database('oracle', OracleDB)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-2.0 |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/test/test_xdrlib.py | 94 | 1597 | from test import test_support
import unittest
import xdrlib
class XDRTest(unittest.TestCase):
def test_xdr(self):
p = xdrlib.Packer()
s = 'hello world'
a = ['what', 'is', 'hapnin', 'doctor']
p.pack_int(42)
p.pack_int(-17)
p.pack_uint(9)
p.pack_bool(True)
p.pack_bool(False)
p.pack_uhyper(45L)
p.pack_float(1.9)
p.pack_double(1.9)
p.pack_string(s)
p.pack_list(range(5), p.pack_uint)
p.pack_array(a, p.pack_string)
# now verify
data = p.get_buffer()
up = xdrlib.Unpacker(data)
self.assertEqual(up.get_position(), 0)
self.assertEqual(up.unpack_int(), 42)
self.assertEqual(up.unpack_int(), -17)
self.assertEqual(up.unpack_uint(), 9)
self.assertTrue(up.unpack_bool() is True)
# remember position
pos = up.get_position()
self.assertTrue(up.unpack_bool() is False)
# rewind and unpack again
up.set_position(pos)
self.assertTrue(up.unpack_bool() is False)
self.assertEqual(up.unpack_uhyper(), 45L)
self.assertAlmostEqual(up.unpack_float(), 1.9)
self.assertAlmostEqual(up.unpack_double(), 1.9)
self.assertEqual(up.unpack_string(), s)
self.assertEqual(up.unpack_list(up.unpack_uint), range(5))
self.assertEqual(up.unpack_array(up.unpack_string), a)
up.done()
self.assertRaises(EOFError, up.unpack_uint)
def test_main():
test_support.run_unittest(XDRTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
zchking/bill-data-collector | lib/getWeb/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| apache-2.0 |
xisisu/RT-Xen | tools/python/xen/xend/MemoryPool.py | 43 | 4567 | #===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2009 Novell, Inc.
# Author: James (Song Wei) <[email protected]>
#============================================================================
import xen.lowlevel.xc
import XendDomain
import XendOptions
from XendLogging import log
from XendError import VmError
class MemoryPool:
def init(self):
xoptions = XendOptions.instance()
self.default_reserved_memory = xoptions.get_reserved_memory() * 1024 * 1024 #KiB
if self.default_reserved_memory <= 0:
return
self.enable_memory_pool = 1
self.dom0_ballooning = xoptions.get_enable_dom0_ballooning()
if not self.dom0_ballooning:
return
self.reserve_memory = 0
self.untouched_memory = 0
#init reserved memory
#if not reserve_memory_size:
xc = xen.lowlevel.xc.xc()
physinfo = xc.physinfo()
total_mem = physinfo['total_memory']
if total_mem < self.reserve_memory:
self.default_reserved_memory = total_mem
self.reserve_memory = self.default_reserved_memory
self.untouched_memory = self.default_reserved_memory
log.debug("MemoryPool: init reserved_memory %d KiB" %self.reserve_memory)
def __init__(self):
self.reserve_memory = 0
self.untouched_memory = 0
self.default_reserved_memory = 0
self.enable_memory_pool = 0
self.dom0_ballooning = 0
def available_memory_check(self, need_mem):
return self.is_enabled() and self.reserved_memory > need_mem
def decrease_memory(self, value):
if not self.is_enabled() or value <= 4096: #4M for PV guest kernel and ramdisk unzip
return
elif self.reserve_memory < value:
raise VMError(('I need %d KiB, but only have %d KiB in Memory Pool') %(value,self.reserve_memory))
else:
self.reserve_memory -= value
log.debug("MemoryPool: decrease_memory: decrease: %d reserved_memory %d KiB" %(value,self.reserve_memory))
return
def decrease_untouched_memory(self, value):
if not self.is_enabled():
return
elif self.untouched_memory < value:
raise VmError(('I need %d KiB untouch mem, but only have %d KiB untouched mem in Memory Pool') %(value,self.reserve_memory))
else:
self.untouched_memory -= value
log.debug("MemoryPool: decrease_untouched_memory: untouched_memory %d KiB" %self.untouched_memory)
return
def increase_memory(self, value):
if not self.is_enabled():
return
else:
self.reserve_memory += value
if self.reserve_memory > self.default_reserved_memory:
raise VmError(('the maxsize of memory pool is %d KiB, but current is %d KiB') %(value,self.reserve_memory))
log.debug("MemoryPool: increase_memory:%d, reserved_memory %d KiB" %(value,self.reserve_memory))
return
def is_enabled(self):
return self.enable_memory_pool and self.dom0_ballooning
def get_pool_size(self):
if self.is_enabled():
return self.default_reserved_memory
else:
return 0
def get_left_memory(self):
if self.is_enabled():
return self.reserve_memory
else:
return 0
def get_untouched_memory(self):
if self.is_enabled():
return self.untouched_memory
else:
return 0
def instance():
"""Singleton constructor. Use this instead of the class constructor.
"""
global MP_inst
try:
MP_inst
except:
MP_inst = MemoryPool()
MP_inst.init()
return MP_inst
| gpl-2.0 |
ella/django-ratings | django_ratings/aggregation.py | 1 | 1768 | """
This file is for aggregation records from Rating,Agg tables to Agg and TotalRate table
"""
import logging
from datetime import datetime, timedelta
from django_ratings.models import Rating, Agg, TotalRate
logger = logging.getLogger('django_ratings')
# aggregate ratings older than 2 years by year
DELTA_TIME_YEAR = 2*365*24*60*60
# ratings older than 2 months by month
DELTA_TIME_MONTH = 2*30*24*60*60
# rest of the ratings (last 2 months) aggregate daily
DELTA_TIME_DAY = -24*60*60
TIMES_ALL = {DELTA_TIME_YEAR : 'year', DELTA_TIME_MONTH : 'month', DELTA_TIME_DAY : 'day'}
def transfer_agg_to_totalrate():
"""
Transfer aggregation data from table Agg to table TotalRate
"""
logger.info("transfer_agg_to_totalrate BEGIN")
if TotalRate.objects.count() != 0:
TotalRate.objects.all().delete()
Agg.objects.agg_to_totalrate()
logger.info("transfer_agg_to_totalrate END")
def transfer_agg_to_agg():
"""
aggregation data from table Agg to table Agg
"""
logger.info("transfer_agg_to_agg BEGIN")
timenow = datetime.now()
for t in TIMES_ALL:
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
Agg.objects.move_agg_to_agg(time_agg, TIMES_ALL[t])
Agg.objects.agg_assume()
logger.info("transfer_agg_to_agg END")
def transfer_data():
"""
transfer data from table Rating to table Agg
"""
logger.info("transfer_data BEGIN")
timenow = datetime.now()
for t in sorted(TIMES_ALL.keys(), reverse=True):
TIME_DELTA = t
time_agg = timenow - timedelta(seconds=TIME_DELTA)
Rating.objects.move_rate_to_agg(time_agg, TIMES_ALL[t])
transfer_agg_to_agg()
transfer_agg_to_totalrate()
logger.info("transfer_data END")
| bsd-3-clause |
mancoast/CPythonPyc_test | fail/321_test_funcattrs.py | 56 | 10870 | from test import support
import types
import unittest
class FuncAttrsTest(unittest.TestCase):
def setUp(self):
class F:
def a(self):
pass
def b():
return 3
self.fi = F()
self.F = F
self.b = b
def cannot_set_attr(self, obj, name, value, exceptions):
try:
setattr(obj, name, value)
except exceptions:
pass
else:
self.fail("shouldn't be able to set %s to %r" % (name, value))
try:
delattr(obj, name)
except exceptions:
pass
else:
self.fail("shouldn't be able to del %s" % name)
class FunctionPropertiesTest(FuncAttrsTest):
# Include the external setUp method that is common to all tests
def test_module(self):
self.assertEqual(self.b.__module__, __name__)
def test_dir_includes_correct_attrs(self):
self.b.known_attr = 7
self.assertIn('known_attr', dir(self.b),
"set attributes not in dir listing of method")
# Test on underlying function object of method
self.F.a.known_attr = 7
self.assertIn('known_attr', dir(self.fi.a), "set attribute on function "
"implementations, should show up in next dir")
def test_duplicate_function_equality(self):
# Body of `duplicate' is the exact same as self.b
def duplicate():
'my docstring'
return 3
self.assertNotEqual(self.b, duplicate)
def test_copying___code__(self):
def test(): pass
self.assertEqual(test(), None)
test.__code__ = self.b.__code__
self.assertEqual(test(), 3) # self.b always returns 3, arbitrarily
def test___globals__(self):
self.assertIs(self.b.__globals__, globals())
self.cannot_set_attr(self.b, '__globals__', 2,
(AttributeError, TypeError))
def test___closure__(self):
a = 12
def f(): print(a)
c = f.__closure__
self.assertIsInstance(c, tuple)
self.assertEqual(len(c), 1)
# don't have a type object handy
self.assertEqual(c[0].__class__.__name__, "cell")
self.cannot_set_attr(f, "__closure__", c, AttributeError)
def test_empty_cell(self):
def f(): print(a)
try:
f.__closure__[0].cell_contents
except ValueError:
pass
else:
self.fail("shouldn't be able to read an empty cell")
a = 12
def test___name__(self):
self.assertEqual(self.b.__name__, 'b')
self.b.__name__ = 'c'
self.assertEqual(self.b.__name__, 'c')
self.b.__name__ = 'd'
self.assertEqual(self.b.__name__, 'd')
# __name__ and __name__ must be a string
self.cannot_set_attr(self.b, '__name__', 7, TypeError)
# __name__ must be available when in restricted mode. Exec will raise
# AttributeError if __name__ is not available on f.
s = """def f(): pass\nf.__name__"""
exec(s, {'__builtins__': {}})
# Test on methods, too
self.assertEqual(self.fi.a.__name__, 'a')
self.cannot_set_attr(self.fi.a, "__name__", 'a', AttributeError)
def test___code__(self):
num_one, num_two = 7, 8
def a(): pass
def b(): return 12
def c(): return num_one
def d(): return num_two
def e(): return num_one, num_two
for func in [a, b, c, d, e]:
self.assertEqual(type(func.__code__), types.CodeType)
self.assertEqual(c(), 7)
self.assertEqual(d(), 8)
d.__code__ = c.__code__
self.assertEqual(c.__code__, d.__code__)
self.assertEqual(c(), 7)
# self.assertEqual(d(), 7)
try:
b.__code__ = c.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
try:
e.__code__ = d.__code__
except ValueError:
pass
else:
self.fail("__code__ with different numbers of free vars should "
"not be possible")
def test_blank_func_defaults(self):
self.assertEqual(self.b.__defaults__, None)
del self.b.__defaults__
self.assertEqual(self.b.__defaults__, None)
def test_func_default_args(self):
def first_func(a, b):
return a+b
def second_func(a=1, b=2):
return a+b
self.assertEqual(first_func.__defaults__, None)
self.assertEqual(second_func.__defaults__, (1, 2))
first_func.__defaults__ = (1, 2)
self.assertEqual(first_func.__defaults__, (1, 2))
self.assertEqual(first_func(), 3)
self.assertEqual(first_func(3), 5)
self.assertEqual(first_func(3, 5), 8)
del second_func.__defaults__
self.assertEqual(second_func.__defaults__, None)
try:
second_func()
except TypeError:
pass
else:
self.fail("__defaults__ does not update; deleting it does not "
"remove requirement")
class InstancemethodAttrTest(FuncAttrsTest):
def test___class__(self):
self.assertEqual(self.fi.a.__self__.__class__, self.F)
self.cannot_set_attr(self.fi.a, "__class__", self.F, TypeError)
def test___func__(self):
self.assertEqual(self.fi.a.__func__, self.F.a)
self.cannot_set_attr(self.fi.a, "__func__", self.F.a, AttributeError)
def test___self__(self):
self.assertEqual(self.fi.a.__self__, self.fi)
self.cannot_set_attr(self.fi.a, "__self__", self.fi, AttributeError)
def test___func___non_method(self):
# Behavior should be the same when a method is added via an attr
# assignment
self.fi.id = types.MethodType(id, self.fi)
self.assertEqual(self.fi.id(), id(self.fi))
# Test usage
try:
self.fi.id.unknown_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise AttributeError")
# Test assignment and deletion
self.cannot_set_attr(self.fi.id, 'unknown_attr', 2, AttributeError)
class ArbitraryFunctionAttrTest(FuncAttrsTest):
def test_set_attr(self):
self.b.known_attr = 7
self.assertEqual(self.b.known_attr, 7)
try:
self.fi.a.known_attr = 7
except AttributeError:
pass
else:
self.fail("setting attributes on methods should raise error")
def test_delete_unknown_attr(self):
try:
del self.b.unknown_attr
except AttributeError:
pass
else:
self.fail("deleting unknown attribute should raise TypeError")
def test_unset_attr(self):
for func in [self.b, self.fi.a]:
try:
func.non_existent_attr
except AttributeError:
pass
else:
self.fail("using unknown attributes should raise "
"AttributeError")
class FunctionDictsTest(FuncAttrsTest):
def test_setting_dict_to_invalid(self):
self.cannot_set_attr(self.b, '__dict__', None, TypeError)
from collections import UserDict
d = UserDict({'known_attr': 7})
self.cannot_set_attr(self.fi.a.__func__, '__dict__', d, TypeError)
def test_setting_dict_to_valid(self):
d = {'known_attr': 7}
self.b.__dict__ = d
# Test assignment
self.assertIs(d, self.b.__dict__)
# ... and on all the different ways of referencing the method's func
self.F.a.__dict__ = d
self.assertIs(d, self.fi.a.__func__.__dict__)
self.assertIs(d, self.fi.a.__dict__)
# Test value
self.assertEqual(self.b.known_attr, 7)
self.assertEqual(self.b.__dict__['known_attr'], 7)
# ... and again, on all the different method's names
self.assertEqual(self.fi.a.__func__.known_attr, 7)
self.assertEqual(self.fi.a.known_attr, 7)
def test_delete___dict__(self):
try:
del self.b.__dict__
except TypeError:
pass
else:
self.fail("deleting function dictionary should raise TypeError")
def test_unassigned_dict(self):
self.assertEqual(self.b.__dict__, {})
def test_func_as_dict_key(self):
value = "Some string"
d = {}
d[self.b] = value
self.assertEqual(d[self.b], value)
class FunctionDocstringTest(FuncAttrsTest):
def test_set_docstring_attr(self):
self.assertEqual(self.b.__doc__, None)
docstr = "A test method that does nothing"
self.b.__doc__ = docstr
self.F.a.__doc__ = docstr
self.assertEqual(self.b.__doc__, docstr)
self.assertEqual(self.fi.a.__doc__, docstr)
self.cannot_set_attr(self.fi.a, "__doc__", docstr, AttributeError)
def test_delete_docstring(self):
self.b.__doc__ = "The docstring"
del self.b.__doc__
self.assertEqual(self.b.__doc__, None)
def cell(value):
"""Create a cell containing the given value."""
def f():
print(a)
a = value
return f.__closure__[0]
def empty_cell(empty=True):
"""Create an empty cell."""
def f():
print(a)
# the intent of the following line is simply "if False:"; it's
# spelt this way to avoid the danger that a future optimization
# might simply remove an "if False:" code block.
if not empty:
a = 1729
return f.__closure__[0]
class CellTest(unittest.TestCase):
def test_comparison(self):
# These tests are here simply to exercise the comparison code;
# their presence should not be interpreted as providing any
# guarantees about the semantics (or even existence) of cell
# comparisons in future versions of CPython.
self.assertTrue(cell(2) < cell(3))
self.assertTrue(empty_cell() < cell('saturday'))
self.assertTrue(empty_cell() == empty_cell())
self.assertTrue(cell(-36) == cell(-36.0))
self.assertTrue(cell(True) > empty_cell())
class StaticMethodAttrsTest(unittest.TestCase):
def test_func_attribute(self):
def f():
pass
c = classmethod(f)
self.assertTrue(c.__func__ is f)
s = staticmethod(f)
self.assertTrue(s.__func__ is f)
def test_main():
support.run_unittest(FunctionPropertiesTest, InstancemethodAttrTest,
ArbitraryFunctionAttrTest, FunctionDictsTest,
FunctionDocstringTest, CellTest,
StaticMethodAttrsTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
vikrant82/android_kernel_samsung_mondrianwifi | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
philsch/ansible | lib/ansible/utils/module_docs_fragments/asa.py | 123 | 4186 | #
# (c) 2016, Peter Sprygada <[email protected]>
# (c) 2016, Patrick Ogenstad <@ogenstad>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
context:
description:
- Specifies which context to target if you are running in the ASA in
multiple context mode. Defaults to the current context you login to.
default: null
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
timeout:
description:
- Specifies idle timeout in seconds for the connection, in seconds. Useful
if the console freezes before continuing. For example when saving
configurations.
default: 10
"""
| gpl-3.0 |
jjlee9/openthread | tools/harness-automation/cases/leader_5_1_1.py | 16 | 1875 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_5_1_1(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 1 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
natcoin/natcoin | contrib/bitrpc/bitrpc.py | 1 | 7836 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Natcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Natcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit |
Boldie/gourmet | setup.py | 4 | 12089 | #!/bin/env python
#
# setup.py for Gourmet
import sys
import glob
import os.path
import os
import fileinput
import string
from types import StringType, ListType, TupleType
from distutils.command.build_py import build_py as _build_py
from distutils.command.build_scripts import build_scripts as _build_scripts
from distutils.util import convert_path
from DistUtilsExtra.command import build_extra, build_i18n, build_icons
# grab the version from our "version" module
# first we have to extend our path to include gourmet/
srcpath = os.path.split(__file__)[0]
sys.path.append(os.path.join(srcpath, 'gourmet'))
import version
class build_py(_build_py):
"""build_py command
This specific build_py command will modify module 'build_config' so that it
contains information on installation prefixes afterwards.
"""
def build_module (self, module, module_file, package):
_build_py.build_module(self, module, module_file, package)
if type(package) is StringType:
package = string.split(package, '.')
elif type(package) not in (ListType, TupleType):
raise TypeError, \
"'package' must be a string (dot-separated), list, or tuple"
if ( module == 'settings' and len(package) == 1
and package[0] == 'gourmet'
and 'install' in self.distribution.command_obj):
outfile = self.get_module_outfile(self.build_lib, package, module)
iobj = self.distribution.command_obj['install']
lib_dir = iobj.install_lib
base = iobj.install_data
if (iobj.root):
lib_dir = lib_dir[len(iobj.root):]
base = base[len(iobj.root):]
base = os.path.join(base, 'share')
data_dir = os.path.join(base, 'gourmet')
# abuse fileinput to replace two lines in bin/gourmet
for line in fileinput.input(outfile, inplace = 1):
if "base_dir = " in line:
line = "base_dir = '%s'\n" % base
elif "lib_dir = " in line:
line = "lib_dir = '%s'\n" % lib_dir
elif "data_dir = " in line:
line = "data_dir = '%s'\n" % data_dir
elif "doc_base = " in line:
line = "doc_base = '%s'\n" % \
os.path.join(base, 'doc', 'gourmet')
elif "icon_base = " in line:
line = "icon_base = '%s'\n" % \
os.path.join(base, 'icons', 'hicolor')
elif "locale_base = " in line:
line = "locale_base = '%s'\n" % \
os.path.join(base, 'locale')
elif "plugin_base = " in line:
line = "plugin_base = data_dir\n"
print line,
class build_scripts(_build_scripts):
"""build_scripts command
This specific build_scripts command will modify the bin/gourmet script
so that it contains information on installation prefixes afterwards.
"""
def copy_scripts(self):
_build_scripts.copy_scripts(self)
if "install" in self.distribution.command_obj:
iobj = self.distribution.command_obj["install"]
lib_dir = iobj.install_lib
data_dir = iobj.install_data
if iobj.root:
lib_dir = lib_dir[len(iobj.root):]
data_dir = data_dir[len(iobj.root):]
script = convert_path("bin/gourmet")
outfile = os.path.join(self.build_dir, os.path.basename(script))
# abuse fileinput to replace two lines in bin/gourmet
for line in fileinput.input(outfile, inplace = 1):
if "lib_dir = '.'" in line:
line = "lib_dir = '%s'\n" % lib_dir
elif "data_dir = '.'" in line:
line = "data_dir = '%s'\n" % data_dir
print line,
if sys.platform == "win32":
#gtk file inclusion
import gtk
# The runtime dir is in the same directory as the module:
GTK_RUNTIME_DIR = os.path.join(
os.path.split(os.path.dirname(gtk.__file__))[0], "runtime")
assert os.path.exists(GTK_RUNTIME_DIR), "Cannot find GTK runtime data"
GTK_THEME_DEFAULT = os.path.join("share", "themes", "Default")
GTK_THEME_WINDOWS = os.path.join("share", "themes", "MS-Windows")
GTK_GTKRC_DIR = os.path.join("etc", "gtk-2.0")
GTK_GTKRC = "gtkrc"
GTK_WIMP_DIR = os.path.join("lib", "gtk-2.0", "2.10.0", "engines")
GTK_WIMP_DLL = "libwimp.dll"
#If you want the Tango icons:
GTK_ICONS = os.path.join("share", "icons")
#There is also localisation data (which I omit, but you might not want to):
GTK_LOCALE_DATA = os.path.join("share", "locale")
def data_files():
'''Build list of data files to be installed'''
data_files = []
for root, dirs, files in os.walk('data'):
if files:
files = [os.path.join(root, f) for f in files]
data_files.append((os.path.join('share','gourmet', root[len('data')+1:]), files))
# files in /usr/share/X/ (not gourmet)
files = []
base = os.path.join('share','gourmet')
files.extend(data_files)
files.extend([(os.path.join(base,'ui'), glob.glob(os.path.join('ui','*.ui')))])
files.extend([(os.path.join('share','doc','gourmet'), ['FAQ', 'LICENSE'])])
return files
if sys.platform == "win32":
from cx_Freeze import setup, Executable, build as build_cxf
import msilib
class build(build_extra.build_extra, build_cxf):
def __init__(self, dist):
build_extra.build_extra.__init__(self, dist)
build_cxf.__init__(self, dist)
def get_sub_comands(self):
build_cxf.sub_commands(self)
def initialize_options(self):
build_extra.build_extra.initialize_options(self)
build_cxf.initialize_options(self)
def finalize_options(self):
build_extra.build_extra.finalize_options(self)
build_cxf.finalize_options(self)
include_files = []
for i in data_files():
for j in i[1]:
include_files.append((j, i[0]))
icon_table = [
('GourmetIco', msilib.Binary('data/icons/gourmet.ico'))
]
property_table = [
('ARPPRODUCTICON', 'GourmetIco'),
]
msi_data = {
'Icon': icon_table,
'Property': property_table,
}
kwargs = dict(name="Gourmet Recipe Manager",
executables=[Executable(
os.path.join(srcpath, 'bin','gourmet'),
base="Win32GUI",
icon="data/icons/gourmet.ico",
shortcutName="Gourmet Recipe Manager",
shortcutDir="ProgramMenuFolder"
)
],
options={
'build_exe':
{
'packages': [
'gourmet',
'sqlalchemy',
'reportlab',
'reportlab.graphics',
'reportlab.lib',
'reportlab.pdfbase',
'reportlab.pdfgen',
'reportlab.platypus',
'xml.dom',
'lxml.etree',
'lxml._elementpath'
],
'includes': [
'cairo',
'gio',
'pango',
'pangocairo',
'atk',
'BeautifulSoup'
],
'include_files': [
('data', '.'),
('ui', 'ui'),
('LICENSE', os.path.join('doc', 'LICENSE')),
('FAQ', os.path.join('doc', 'FAQ')),
(os.path.join(GTK_RUNTIME_DIR, GTK_THEME_DEFAULT), GTK_THEME_DEFAULT),
(os.path.join(GTK_RUNTIME_DIR, GTK_THEME_WINDOWS), GTK_THEME_WINDOWS),
#(os.path.join(GTK_RUNTIME_DIR, GTK_ICONS), GTK_ICONS),
(os.path.join(GTK_RUNTIME_DIR, GTK_GTKRC_DIR, GTK_GTKRC), os.path.join(GTK_GTKRC_DIR, GTK_GTKRC)),
(os.path.join(GTK_RUNTIME_DIR, GTK_WIMP_DIR, GTK_WIMP_DLL), os.path.join(GTK_WIMP_DIR, GTK_WIMP_DLL)),
(os.path.join('build', 'mo'), 'locale'),
(os.path.join("build", "share", "gourmet"), '.'),
(os.path.join("gourmet", 'plugins'), 'plugins')
],
# We're excluding the plugins module from being added to library.zip
# and add it via include_files instead in order to faciliate
# handling *.gourmet-plugin and extra files (such as *.ui files
# and images).
'excludes': ['plugins','Tkinter','wx'],
'optimize': 2,
'compressed':1,
'include_msvcr': True,
# see http://stackoverflow.com/questions/1979486/py2exe-win32api-pyc-importerror-dll-load-failed
# libgcc_s_dw2-1.dll, if present, would crash Gourmet
'bin_excludes': ["mswsock.dll", "powrprof.dll","libgcc_s_dw2-1.dll"],
},
'bdist_msi':
{
'upgrade_code': '{D19B9EC6-DF39-4C83-BF87-A67776D087FA}',
'data': msi_data
}
}
)
else:
from distutils.core import setup
build = build_extra.build_extra
kwargs = dict(
name=version.name,
data_files=data_files(),
scripts=[os.path.join('bin','gourmet')]
)
plugins = []
def crawl (base, basename):
bdir = base
subdirs = filter(lambda x: os.path.isdir(os.path.join(bdir,x)), os.listdir(bdir))
for subd in subdirs:
name = basename + '.' + subd
plugins.append(name)
crawl(os.path.join(bdir,subd),name)
crawl('gourmet/plugins', 'gourmet.plugins')
result = setup(
version = version.version,
description = version.description,
author = version.author,
author_email = version.author_email,
url = version.website,
license = version.license,
packages = ['gourmet',
'gourmet.backends',
'gourmet.util',
'gourmet.defaults',
'gourmet.gtk_extras',
'gourmet.importers',
'gourmet.exporters',
'gourmet.plugins',
] + plugins,
package_data = {'gourmet': ['plugins/*/*.ui', 'plugins/*/images/*.png','plugins/*/*/images/*.png']},
cmdclass={'build' : build,
'build_i18n' : build_i18n.build_i18n,
'build_icons' : build_icons.build_icons,
'build_py' : build_py,
'build_scripts' : build_scripts,
},
**kwargs
)
| gpl-2.0 |
krafczyk/spack | var/spack/repos/builtin/packages/chlorop/package.py | 2 | 2274 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Chlorop(Package):
"""Chlorop predicts the presence of chloroplast transit peptides
in protein sequences and the location of potential cTP cleavage
sites. You will need to obtain the tarball by visiting the
URL and completing the form. You can then either run spack
install with the tarball in the directory, or add it to a
mirror. You will need to set the CHLOROTMP environment variable
to the full path of the directory you want chlorop to use as
a temporary directory."""
homepage = "http://www.cbs.dtu.dk/services/ChloroP/"
url = "file://{0}/chlorop-1.1.Linux.tar.gz".format(os.getcwd())
version('1.1', 'eb0ba6b28dfa735163ad5fc70e30139e46e33f6ae27f87666a7167a4ac5f71d9')
depends_on('awk', type='run')
patch('chlorop.patch')
def install(self, spec, prefix):
os.rename('chlorop', 'bin/chlorop')
install_tree('.', prefix)
def setup_environment(self, spack_env, run_env):
run_env.set('CHLOROP', self.prefix)
| lgpl-2.1 |
Bysmyyr/chromium-crosswalk | third_party/protobuf/python/google/protobuf/internal/message_test.py | 224 | 22295 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = '[email protected] (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import message
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
self.assertEquals(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testRepeatedCompositeFieldSortArguments(self):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self):
"""Check sorting a scalar field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testSortEmptyRepeatedCompositeContainer(self):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = unittest_pb2.TestAllTypes()
m.repeated_nested_message.sort()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
rogeriofalcone/libre | libre/apps/lock_manager/migrations/0001_initial.py | 2 | 1308 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Lock'
db.create_table('lock_manager_lock', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('timeout', self.gf('django.db.models.fields.IntegerField')(default=30)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=48)),
))
db.send_create_signal('lock_manager', ['Lock'])
def backwards(self, orm):
# Deleting model 'Lock'
db.delete_table('lock_manager_lock')
models = {
'lock_manager.lock': {
'Meta': {'object_name': 'Lock'},
'creation_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '48'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30'})
}
}
complete_apps = ['lock_manager'] | gpl-3.0 |
yebeloved/idapython | pywraps/py_expr.py | 16 | 5381 | # --------------------------------------------------------------------------
import os
import sys
import idaapi
import _idaapi
from sys import getrefcount
import gc
try:
import pywraps
pywraps_there = True
_idaapi.pyw_register_idc_func = pywraps.pyw_register_idc_func
_idaapi.pyw_unregister_idc_func = pywraps.pyw_unregister_idc_func
_idaapi.py_get_call_idc_func = pywraps.py_get_call_idc_func
_idaapi.py_set_idc_func_ex = pywraps.py_set_idc_func_ex
except Exception as e:
pywraps_there = False
print("exception: %s" % str(e))
print("Using PyWraps: %s" % pywraps_there)
# --------------------------------------------------------------------------
#<pycode(py_expr)>
try:
import types
import ctypes
# Callback for IDC func callback (On Windows, we use stdcall)
# typedef error_t idaapi idc_func_t(idc_value_t *argv,idc_value_t *r);
_IDCFUNC_CB_T = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
# A trampoline function that is called from idcfunc_t that will
# call the Python callback with the argv and r properly serialized to python
call_idc_func__ = ctypes.CFUNCTYPE(ctypes.c_long)(_idaapi.py_get_call_idc_func())
except:
def call_idc_func__(*args):
warning("IDC extensions need ctypes library in order to work")
return 0
try:
_IDCFUNC_CB_T = CFUNCTYPE(c_int, c_void_p, c_void_p)
except:
_IDCFUNC_CB_T = None
# --------------------------------------------------------------------------
EXTFUN_BASE = 0x0001
"""requires open database"""
EXTFUN_NORET = 0x0002
"""does not return. the interpreter may clean up its state before calling it."""
EXTFUN_SAFE = 0x0004
"""thread safe function. may be called"""
# --------------------------------------------------------------------------
class _IdcFunction(object):
"""
Internal class that calls pyw_call_idc_func() with a context
"""
def __init__(self, ctxptr):
self.ctxptr = ctxptr
# Take a reference to the ctypes callback
# (note: this will create a circular reference)
self.cb = _IDCFUNC_CB_T(self)
fp_ptr = property(lambda self: ctypes.cast(self.cb, ctypes.c_void_p).value)
def __call__(self, args, res):
return call_idc_func__(self.ctxptr, args, res)
# --------------------------------------------------------------------------
# Dictionary to remember IDC function names along with the context pointer
# retrieved by using the internal pyw_register_idc_func()
__IDC_FUNC_CTXS = {}
# --------------------------------------------------------------------------
def set_idc_func_ex(name, fp=None, args=(), flags=0):
"""
Extends the IDC language by exposing a new IDC function that is backed up by a Python function
This function also unregisters the IDC function if 'fp' was passed as None
@param name: IDC function name to expose
@param fp: Python callable that will receive the arguments and return a tuple.
If this argument is None then the IDC function is unregistered
@param args: Arguments. A tuple of idaapi.VT_XXX constants
@param flags: IDC function flags. A combination of EXTFUN_XXX constants
@return: Boolean.
"""
global __IDC_FUNC_CTXS
# Get the context
f = __IDC_FUNC_CTXS.get(name, None)
# Unregistering?
if fp is None:
# Not registered?
if f is None:
return False
# Break circular reference
del f.cb
# Delete the name from the dictionary
del __IDC_FUNC_CTXS[name]
# Delete the context and unregister the function
return _idaapi.pyw_unregister_idc_func(f.ctxptr)
# Registering a function that is already registered?
if f is not None:
# Unregister it first
set_idc_func_ex(name, None)
# Convert the tupple argument info to a string
args = "".join([chr(x) for x in args])
# Create a context
ctxptr = _idaapi.pyw_register_idc_func(name, args, fp)
if ctxptr == 0:
return False
# Bind the context with the IdcFunc object
f = _IdcFunction(ctxptr)
# Remember the Python context
__IDC_FUNC_CTXS[name] = f
# Register IDC function with a callback
return _idaapi.py_set_idc_func_ex(
name,
f.fp_ptr,
args,
flags)
#</pycode(py_expr)>
# --------------------------------------------------------------------------
def test1():
global MY_IDC_FUNC
try:
# Already registered?
MY_IDC_FUNC
# Unregister
print("Unregistering function")
set_idc_func_ex(MY_IDC_FUNC)
except:
MY_IDC_FUNC = "pysum"
ok = set_idc_func_ex(MY_IDC_FUNC, my_idc_sum, (idaapi.VT_LONG, idaapi.VT_LONG), 0)
if not ok:
del MY_IDC_FUNC
#</pycode(ex_expr)>
# --------------------------------------------------------------------------
#<pycode(ex_expr)>
def py_power(n, e):
return n ** e
ok = set_idc_func_ex("pow", py_power, (idaapi.VT_LONG, idaapi.VT_LONG), 0)
if ok:
print("Now the pow() will be present IDC!")
else:
print("Failed to register pow() IDC function")
#</pycode(ex_expr)>
| bsd-3-clause |
KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_visual_scenes/visual_scene/extra/multiExtra/multiExtra.py | 4 | 3846 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['library_visual_scenes', 'visual_scene', 'extra', 'technique']
attrName = 'profile'
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Check for preservation of element data
self.__assistant.FullPreservation(context, self.tagList, self.attrName)
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.