repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
maljac/odoomrp-wip | product_attribute_value_image/__openerp__.py | 25 | 1514 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Product Attribute Value Image',
'version': "1.0",
'author': 'OdooMRP team,'
'AvanzOSC,'
'Serv. Tecnol. Avanzados - Pedro M. Baeza',
'website': "http://www.odoomrp.com",
"contributors": [
"Pedro M. Baeza <[email protected]",
"Ana Juaristi <[email protected]>",
"Alfredo de la Fuente <[email protected]>",
],
'category': 'Sales Management',
'depends': ['product',
'web_tree_image'
],
'data': ['views/product_attribute_value_view.xml'
],
'installable': True,
}
| agpl-3.0 |
GGoussar/scikit-image | skimage/feature/tests/test_canny.py | 19 | 4525 | import unittest
import numpy as np
from numpy.testing import assert_equal
from scipy.ndimage import binary_dilation, binary_erosion
import skimage.feature as F
from skimage import filters, data
from skimage import img_as_float
class TestCanny(unittest.TestCase):
def test_00_00_zeros(self):
'''Test that the Canny filter finds no points for a blank field'''
result = F.canny(np.zeros((20, 20)), 4, 0, 0, np.ones((20, 20), bool))
self.assertFalse(np.any(result))
def test_00_01_zeros_mask(self):
'''Test that the Canny filter finds no points in a masked image'''
result = (F.canny(np.random.uniform(size=(20, 20)), 4, 0, 0,
np.zeros((20, 20), bool)))
self.assertFalse(np.any(result))
def test_01_01_circle(self):
'''Test that the Canny filter finds the outlines of a circle'''
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
c = np.abs(np.sqrt(i * i + j * j) - .5) < .02
result = F.canny(c.astype(float), 4, 0, 0, np.ones(c.shape, bool))
#
# erode and dilate the circle to get rings that should contain the
# outlines
#
cd = binary_dilation(c, iterations=3)
ce = binary_erosion(c, iterations=3)
cde = np.logical_and(cd, np.logical_not(ce))
self.assertTrue(np.all(cde[result]))
#
# The circle has a radius of 100. There are two rings here, one
# for the inside edge and one for the outside. So that's
# 100 * 2 * 2 * 3 for those places where pi is still 3.
# The edge contains both pixels if there's a tie, so we
# bump the count a little.
point_count = np.sum(result)
self.assertTrue(point_count > 1200)
self.assertTrue(point_count < 1600)
def test_01_02_circle_with_noise(self):
'''Test that the Canny filter finds the circle outlines
in a noisy image'''
np.random.seed(0)
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
c = np.abs(np.sqrt(i * i + j * j) - .5) < .02
cf = c.astype(float) * .5 + np.random.uniform(size=c.shape) * .5
result = F.canny(cf, 4, .1, .2, np.ones(c.shape, bool))
#
# erode and dilate the circle to get rings that should contain the
# outlines
#
cd = binary_dilation(c, iterations=4)
ce = binary_erosion(c, iterations=4)
cde = np.logical_and(cd, np.logical_not(ce))
self.assertTrue(np.all(cde[result]))
point_count = np.sum(result)
self.assertTrue(point_count > 1200)
self.assertTrue(point_count < 1600)
def test_image_shape(self):
self.assertRaises(ValueError, F.canny, np.zeros((20, 20, 20)), 4, 0, 0)
def test_mask_none(self):
result1 = F.canny(np.zeros((20, 20)), 4, 0, 0, np.ones((20, 20), bool))
result2 = F.canny(np.zeros((20, 20)), 4, 0, 0)
self.assertTrue(np.all(result1 == result2))
def test_use_quantiles(self):
image = img_as_float(data.camera()[::50,::50])
# Correct output produced manually with quantiles
# of 0.8 and 0.6 for high and low respectively
correct_output = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
result = F.canny(image, low_threshold=0.6, high_threshold=0.8, use_quantiles=True)
assert_equal(result, correct_output)
def test_invalid_use_quantiles(self):
image = img_as_float(data.camera()[::50,::50])
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
low_threshold=0.5, high_threshold=3.6)
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
low_threshold=-5, high_threshold=0.5)
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
low_threshold=99, high_threshold=0.9)
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
low_threshold=0.5, high_threshold=-100)
| bsd-3-clause |
semonte/intellij-community | python/lib/Lib/token.py | 108 | 2926 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
ERRORTOKEN = 52
N_TOKENS = 53
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
main()
| apache-2.0 |
zofuthan/zulip | api/integrations/codebase/zulip_codebase_config.py | 124 | 2537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for your codebase account
# Note that this is the Codebase API Username, found in the Settings page
# for your account
CODEBASE_API_USERNAME = "[email protected]"
CODEBASE_API_KEY = "1234561234567abcdef"
# The URL of your codebase setup
CODEBASE_ROOT_URL = "https://YOUR_COMPANY.codebasehq.com"
# When initially started, how many hours of messages to include.
# Note that the Codebase API only returns the 20 latest events,
# if you have more than 20 events that fit within this window,
# earlier ones may be lost
CODEBASE_INITIAL_HISTORY_HOURS = 12
# Change these values to configure Zulip authentication for the plugin
ZULIP_USER = "[email protected]"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# The streams to send commit information and ticket information to
ZULIP_COMMITS_STREAM_NAME = "codebase"
ZULIP_TICKETS_STREAM_NAME = "tickets"
# If properly installed, the Zulip API should be in your import
# path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://api.zulip.com"
# If you wish to log to a file rather than stdout/stderr,
# please fill this out your desired path
LOG_FILE = None
# This file is used to resume this mirror in case the script shuts down.
# It is required and needs to be writeable.
RESUME_FILE = "/var/tmp/zulip_codebase.state"
| apache-2.0 |
Jgarcia-IAS/Fidelizacion_odoo | openerp/addons/purchase_requisition/wizard/bid_line_qty.py | 374 | 1711 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class bid_line_qty(osv.osv_memory):
_name = "bid.line.qty"
_description = "Change Bid line quantity"
_columns = {
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def change_qty(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.order.line').write(cr, uid, active_ids, {'quantity_bid': data.qty})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ncoden/NASM | test/gas2nasm.py | 9 | 3289 | #!/usr/bin/env python -tt
# -*- python -*-
# Convert gas testsuite file to NASM test asm file
# usage >
# python gas2nasm.py -i input_gas_file -o output_nasm_file -b bits
# e.g. python gas2nasm.py -i x86-64-avx512f-intel.d -o avx512f.asm -b 64
import sys
import os
import optparse
import re
def setup():
parser = optparse.OptionParser()
parser.add_option('-i', dest='input', action='store',
default="",
help='Name for input gas testsuite file.')
parser.add_option('-o', dest='output', action='store',
default="",
help='Name for output NASM test asm file.')
parser.add_option('-b', dest='bits', action='store',
default="",
help='Bits for output ASM file.')
parser.add_option('-r', dest='raw_output', action='store',
default="",
help='Name for raw output bytes in text')
(options, args) = parser.parse_args()
return options
def read(options):
with open(options.input, 'rb') as f:
recs = []
for line in f:
if line[0] == '[':
d = []
strr = line[16:].partition(' ')
if strr[1] == '':
strr = line[16:].partition('\t')
l = strr[0].strip()
r = strr[2].strip()
d.append(l)
d.append(r)
recs.append(d)
return recs
def commas(recs):
replace_tbl = {' PTR':'', '\\':'', 'MM':'', 'XWORD':'OWORD'}
reccommas = []
for insn in recs:
new = []
byte = '0x' + insn[0].replace(' ', ', 0x')
for rep in replace_tbl.keys():
insn[1] = insn[1].replace(rep, replace_tbl[rep])
mnemonic = insn[1]
# gas size specifier for gather and scatter insturctions seems wrong. just remove them.
if 'gather' in insn[1] or 'scatter' in insn[1]:
mnemonic = mnemonic.replace('ZWORD', '')
new.append(byte)
new.append(mnemonic)
reccommas.append(new)
return reccommas
# The spaces reserved here can be adjusted according to the output string length.
# maxlen printed out at the end of the process will give a hint for it.
outstrfmt = "testcase\t{ %-70s }, { %-60s }\n"
macro = "%macro testcase 2\n %ifdef BIN\n db %1\n %endif\n %ifdef SRC\n %2\n %endif\n%endmacro\n\n\n"
def write(data, options):
if options.output:
with open(options.output, 'wb') as out:
out.write(macro)
if options.bits:
out.write('bits ' + options.bits + '\n\n')
for insn in data:
outstr = outstrfmt % tuple(insn)
out.write(outstr)
def write_rawbytes(data, options):
if options.raw_output:
with open(options.raw_output, 'wb') as out:
for insn in data:
out.write(insn[0] + '\n')
if __name__ == "__main__":
options = setup()
recs = read(options)
write_rawbytes(recs, options)
recs = commas(recs)
write(recs, options)
maxlen = [0,0,0,0,0,0,0,0]
for insn in recs:
#print insn[0] + '\t<-\t' + insn[1]
print outstrfmt[:-1] % tuple(insn)
for i, strstr in enumerate(insn):
if maxlen[i] < len(strstr): maxlen[i] = len(strstr)
print maxlen
| bsd-2-clause |
madelynfreed/rlundo | venv/lib/python2.7/site-packages/IPython/utils/frame.py | 15 | 3165 | # encoding: utf-8
"""
Utilities for working with stack frames.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
@py3compat.doctest_refactor_print
def extract_vars(*names,**kw):
"""Extract a set of variables by name from another frame.
Parameters
----------
*names : str
One or more variable names which will be extracted from the caller's
frame.
depth : integer, optional
How many frames in the stack to walk when looking for your variables.
The default is 0, which will use the frame where the call was made.
Examples
--------
::
In [2]: def func(x):
...: y = 1
...: print(sorted(extract_vars('x','y').items()))
...:
In [3]: func('hello')
[('x', 'hello'), ('y', 1)]
"""
depth = kw.get('depth',0)
callerNS = sys._getframe(depth+1).f_locals
return dict((k,callerNS[k]) for k in names)
def extract_vars_above(*names):
"""Extract a set of variables by name from another frame.
Similar to extractVars(), but with a specified depth of 1, so that names
are exctracted exactly from above the caller.
This is simply a convenience function so that the very common case (for us)
of skipping exactly 1 frame doesn't have to construct a special dict for
keyword passing."""
callerNS = sys._getframe(2).f_locals
return dict((k,callerNS[k]) for k in names)
def debugx(expr,pre_msg=''):
"""Print the value of an expression from the caller's frame.
Takes an expression, evaluates it in the caller's frame and prints both
the given expression and the resulting value (as well as a debug mark
indicating the name of the calling function. The input must be of a form
suitable for eval().
An optional message can be passed, which will be prepended to the printed
expr->value pair."""
cf = sys._getframe(1)
print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
eval(expr,cf.f_globals,cf.f_locals)))
# deactivate it by uncommenting the following line, which makes it a no-op
#def debugx(expr,pre_msg=''): pass
def extract_module_locals(depth=0):
"""Returns (module, locals) of the funciton `depth` frames away from the caller"""
f = sys._getframe(depth + 1)
global_ns = f.f_globals
module = sys.modules[global_ns['__name__']]
return (module, f.f_locals)
| gpl-3.0 |
hradec/gaffer | python/GafferImageTest/ConstantTest.py | 7 | 7463 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ConstantTest( GafferImageTest.ImageTestCase ) :
def testChannelData( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 511 ) ), 1 ) )
constant["color"].setValue( imath.Color4f( 0, 0.25, 0.5, 1 ) )
for i, channel in enumerate( [ "R", "G", "B", "A" ] ) :
channelData = constant["out"].channelData( channel, imath.V2i( 0 ) )
self.assertEqual( len( channelData ), constant["out"].tileSize() * constant["out"].tileSize() )
expectedValue = constant["color"][i].getValue()
for value in channelData :
self.assertEqual( value, expectedValue )
def testChannelDataHash( self ) :
# The hash for each individual channel should only
# be affected by that particular channel of the colour plug.
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 511 ) ), 1 ) )
constant["color"].setValue( imath.Color4f( 0 ) )
channels = [ "R", "G", "B", "A" ]
for i, channel in enumerate( channels ) :
h1 = [ constant["out"].channelDataHash( c, imath.V2i( 0 ) ) for c in channels ]
constant["color"][i].setValue( constant["color"][i].getValue() + .1 )
h2 = [ constant["out"].channelDataHash( c, imath.V2i( 0 ) ) for c in channels ]
for j in range( 0, len( channels ) ) :
if j == i :
self.assertNotEqual( h1[j], h2[j] )
else :
self.assertEqual( h1[j], h2[j] )
def testFormatHash( self ) :
# Check that the data hash doesn't change when the format does.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
h1 = c["out"].channelData( "R", imath.V2i( 0 ) ).hash()
c["format"].setValue( GafferImage.Format( 1920, 1080, 1. ) )
h2 = c["out"].channelData( "R", imath.V2i( 0 ) ).hash()
self.assertEqual( h1, h2 )
def testTileHashes( self ) :
# Test that two tiles within the image have the same hash.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
c["color"][0].setValue( .5 )
self.assertEqual(
c["out"].channelDataHash( "R", imath.V2i( 0 ) ),
c["out"].channelDataHash( "R", imath.V2i( GafferImage.ImagePlug().tileSize() ) ),
)
def testTileIdentity( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
# The channelData() binding returns a copy by default, so we wouldn't
# expect two tiles to be referencing the same object.
self.assertFalse(
c["out"].channelData( "R", imath.V2i( 0 ) ).isSame(
c["out"].channelData( "R", imath.V2i( GafferImage.ImagePlug.tileSize() ) )
)
)
# But behind the scenes we do want them to be the same, so
# check that that is the case.
self.assertTrue(
c["out"].channelData( "R", imath.V2i( 0 ), _copy = False ).isSame(
c["out"].channelData( "R", imath.V2i( GafferImage.ImagePlug.tileSize() ), _copy = False )
)
)
def testEnableBehaviour( self ) :
c = GafferImage.Constant()
self.assertTrue( c.enabledPlug().isSame( c["enabled"] ) )
self.assertEqual( c.correspondingInput( c["out"] ), None )
self.assertEqual( c.correspondingInput( c["color"] ), None )
self.assertEqual( c.correspondingInput( c["format"] ), None )
def testChannelNamesHash( self ) :
c = GafferImage.Constant()
h1 = c["out"]["channelNames"].hash()
c["color"].setValue( imath.Color4f( 1, 0.5, 0.25, 1 ) )
h2 = c["out"]["channelNames"].hash()
self.assertEqual( h1, h2 )
def testSerialisationWithZeroAlpha( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Constant()
s["c"]["color"].setValue( imath.Color4f( 0, 1, 0, 0 ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["c"]["color"].getValue(), imath.Color4f( 0, 1, 0, 0 ) )
def testFormatDependencies( self ) :
c = GafferImage.Constant()
self.assertEqual(
c.affects( c["format"]["displayWindow"]["min"]["x"] ),
[ c["out"]["format"], c["out"]["dataWindow"] ],
)
# For the sake of simplicity when dealing with falling back to a default format from the context,
# we make all child plugs of the format affect everything that depends at all on the format
self.assertEqual(
c.affects( c["format"]["pixelAspect"] ),
[ c["out"]["format"], c["out"]["dataWindow"] ],
)
def testLayer( self ) :
c1 = GafferImage.Constant()
c2 = GafferImage.Constant()
c1["color"].setValue( imath.Color4f( 1, 0.5, 0.25, 1 ) )
c2["color"].setValue( imath.Color4f( 1, 0.5, 0.25, 1 ) )
c2["layer"].setValue( "diffuse" )
self.assertEqual(
c1["out"]["channelNames"].getValue(),
IECore.StringVectorData( [ "R", "G", "B", "A" ] )
)
self.assertEqual(
c2["out"]["channelNames"].getValue(),
IECore.StringVectorData( [ "diffuse.R", "diffuse.G", "diffuse.B", "diffuse.A" ] )
)
for channelName in ( "R", "G", "B", "A" ) :
self.assertEqual(
c1["out"].channelDataHash( channelName, imath.V2i( 0 ) ),
c2["out"].channelDataHash( "diffuse." + channelName, imath.V2i( 0 ) )
)
self.assertEqual(
c1["out"].channelData( channelName, imath.V2i( 0 ) ),
c2["out"].channelData( "diffuse." + channelName, imath.V2i( 0 ) )
)
def testLayerAffectsChannelNames( self ) :
c = GafferImage.Constant()
cs = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["layer"].setValue( "diffuse" )
self.assertTrue( c["out"]["channelNames"] in set( [ x[0] for x in cs ] ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
ianatpn/nupictest | nupic/support/datafiles.py | 17 | 7952 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# TODO for NUPIC 2 -- document the interface!
# TODO for NuPIC 2 -- should this move to inferenceanalysis?
def _calculateColumnsFromLine(line):
if "," in line:
splitLine = line.strip().split(",")
n = len(splitLine)
if n:
if not splitLine[-1].strip():
return n-1
else:
return n
else:
return 0
else:
# Too flexible.
# return len([x for x in line.strip().split() if x != ","])
return len(line.strip().split())
def _isComment(strippedLine):
if strippedLine:
return strippedLine.startswith("#")
else:
return True
def _calculateColumnsFromFile(f, format, rewind):
# Calculate the number of columns.
# We will put more trust in the second line that the first, in case the
# first line includes header entries.
if format not in [0, 2, 3]:
raise RuntimeError("Supported formats are 0, 2, and 3.")
if format == 0:
line0 = f.readline()
csplit = line0.split()
if len(csplit) != 1:
raise RuntimeError("Expected first line of data file to "
"contain a single number of columns. "
" Found %d fields" % len(csplit))
try:
numColumns = int(csplit[0])
except:
raise RuntimeError("Expected first line of data file to "
"contain a single number of columns. Found '%s'" % csplit[0])
if rewind:
f.seek(0)
return numColumns
elif format == 2:
numColumns = 0
numLinesRead = 0
for line in f:
strippedLine = line.strip()
if not _isComment(strippedLine):
curColumns = _calculateColumnsFromLine(strippedLine)
numLinesRead += 1
if numColumns and (numColumns != curColumns):
raise RuntimeError("Different lines have different "
"numbers of columns.")
else:
numColumns = curColumns
if numLinesRead > 1:
break
if rewind:
f.seek(0)
return numColumns
# CSV file: we'll just check the first line
elif format == 3:
strippedLine = f.readline().strip()
numColumns = calculateColumnsFromLine(strippedLine)
if rewind:
f.seek(0)
return numColumns
def processCategoryFile(f, format, categoryColumn=None, categoryColumns=None, count=1):
"""Read the data out of the given category file, returning a tuple
(categoryCount, listOfCategories)
@param f A file-like object containing the category info.
@param format The format of the category file. TODO: describe.
@param categoryColumn If non-None, this is the column number (zero-based)
where the category info starts in the file. If
None, indicates that the file only contains category
information (same as passing 0, but allows some
extra sanity checking).
@param categoryColumns Indicates how many categories are active per
timepoint (how many elements wide the category info
is). If 0, we'll determine this from the file. If
None (the default), means that the category info
is 1 element wide, and that the list we return
will just be a list of ints (rather than a list of
lists)
@param count Determines the size of chunks that will be aggregated
into a single entry. The default is 1, so each entry
from the file will be represented in the result. If
count > 1 then 'count' categories (all identical) will
be collapsed into a single entry. This is helpful for
aggregating explorers like EyeMovements where multiple
presentaions are conceptually the same item.
@return categoryCount The number of categories (aka maxCat + 1)
@return allCategories A list of the categories read in, with one item per
time point. If 'categoryColumns' is None, each item
will be an int. Otherwise, each item will be a list
of ints. If count > 1 then the categories will be
aggregated, so that each chunk of 'count' categories
will result in only one entry (all categories in a chunk
must be identical)
"""
calculatedCategoryColumns = _calculateColumnsFromFile(f, format=format,
rewind=(format==2 or format==3))
# If the user passed categoryColumns as None, we'll return a list of ints
# directly; otherwise we'll return a list of lists...
wantListOfInts = (categoryColumns is None)
# Get arguments sanitized...
if categoryColumns == 0:
# User has told us to auto-calculate the # of categories / time point...
# If categoryColumn is not 0 or None, that's an error...
if categoryColumn:
raise RuntimeError("You can't specify an offset for category data "
"if using automatic width.")
categoryColumn = 0
categoryColumns = calculatedCategoryColumns
elif categoryColumns is None:
# User has told us that there's just one category...
if categoryColumn is None:
if calculatedCategoryColumns != 1:
raise RuntimeError("Category file must contain exactly one column.")
categoryColumn = 0
categoryColumns = 1
else:
# User specified exactly how big the category data is...
if (categoryColumns + categoryColumn) > calculatedCategoryColumns:
raise RuntimeError("Not enough categories in file")
maxCategory = 0
allCategories = []
for line in f:
strippedLine = line.strip()
if not _isComment(strippedLine):
if wantListOfInts:
category = int(strippedLine.split()[categoryColumn])
allCategories.append(category)
maxCategory = max(maxCategory, category)
else:
categories = strippedLine.split()[categoryColumn:
categoryColumn+categoryColumns]
categories = map(int, categories)
allCategories.append(categories)
maxCategory = max(maxCategory, max(categories))
categoryCount = maxCategory + 1
# Aggregate categories
result = []
if count > 1:
# Make sure there the number of categories can be aggregated
# exactly by chunks of size 'count'
assert len(allCategories) % count == 0
start = 0
for i in range(len(allCategories) / count):
end = start + count
# Make sure each chunk of size 'count' contains exactly one category
assert (min(allCategories[start:end]) == max(allCategories[start:end]))
# Add just one entry for each chunk
result.append(allCategories[start])
start = end
else:
result = allCategories
return categoryCount, result
| gpl-3.0 |
markreidvfx/pyaaf | tests/test_diagnostic_output.py | 1 | 2446 | from __future__ import print_function
import aaf
import aaf.mob
import aaf.define
import aaf.iterator
import aaf.dictionary
import aaf.storage
import aaf.component
import aaf.util
import traceback
import unittest
import os
from aaf.util import AUID, MobID
cur_dir = os.path.dirname(os.path.abspath(__file__))
sandbox = os.path.join(cur_dir,'sandbox')
if not os.path.exists(sandbox):
os.makedirs(sandbox)
LOG = ""
def diagnostic_output_callback(message):
global LOG
LOG += message
class TestDiagnosticOutput(unittest.TestCase):
# This Test only works on debug builds
def test_basic(self):
aaf.util.set_diagnostic_output_callback(diagnostic_output_callback)
test_file = os.path.join(sandbox, "test_DiagnosticOutput.aaf")
f = aaf.open(test_file, 'w')
timeline = f.create.TimelineMobSlot()
timeline.mark_in = 1
assert timeline.mark_in == 1
timeline.mark_in = 2
assert timeline.mark_in == 2
timeline.mark_out = 100
assert timeline.mark_out == 100
timeline.mark_out = 10
assert timeline.mark_out == 10
# File won't save unless MobSlot has a segment
# seq = f.create.Sequence("picture")
# timeline.segment = seq
mob = f.create.MasterMob()
mob.append_slot(timeline)
f.storage.add_mob(mob)
try:
f.save()
except:
print(traceback.format_exc())
global LOG
# there should be something in the log
assert len(LOG)
print("Diagnostic Log:\n")
print(LOG)
print("A stack track and a diagnostic should print out, this is corrrect!")
def test_get_library_path_name(self):
path = aaf.util.get_library_path_name()
assert os.path.exists(path)
print("com api =", path)
def test_get_static_library_version(self):
v = aaf.util.get_static_library_version()
assert v
version_string = "%i.%i.%i.%i-%s" % (v['major'], v['minor'], v['tertiary'], v['patchLevel'], v['type'])
print("static library version =", version_string)
def test_get_library_version(self):
v = aaf.util.get_library_version()
assert v
version_string = "%i.%i.%i.%i-%s" % (v['major'], v['minor'], v['tertiary'], v['patchLevel'], v['type'])
print("library version =", version_string)
if __name__ == "__main__":
unittest.main()
| mit |
hale36/SRTV | lib/imdb/parser/http/movieParser.py | 40 | 80839 | """
parser.http.movieParser module (imdb package).
This module provides the classes (and the instances), used to parse the
IMDb pages on the akas.imdb.com server about a movie.
E.g., for Brian De Palma's "The Untouchables", the referred
pages would be:
combined details: http://akas.imdb.com/title/tt0094226/combined
plot summary: http://akas.imdb.com/title/tt0094226/plotsummary
...and so on...
Copyright 2004-2013 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import urllib
from imdb import imdbURL_base
from imdb.Person import Person
from imdb.Movie import Movie
from imdb.Company import Company
from imdb.utils import analyze_title, split_company_name_notes, _Container
from utils import build_person, DOMParserBase, Attribute, Extractor, \
analyze_imdbid
# Dictionary used to convert some section's names.
_SECT_CONV = {
'directed': 'director',
'directed by': 'director',
'directors': 'director',
'editors': 'editor',
'writing credits': 'writer',
'writers': 'writer',
'produced': 'producer',
'cinematography': 'cinematographer',
'film editing': 'editor',
'casting': 'casting director',
'costume design': 'costume designer',
'makeup department': 'make up',
'production management': 'production manager',
'second unit director or assistant director': 'assistant director',
'costume and wardrobe department': 'costume department',
'sound department': 'sound crew',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'also known as': 'akas',
'country': 'countries',
'runtime': 'runtimes',
'language': 'languages',
'certification': 'certificates',
'genre': 'genres',
'created': 'creator',
'creators': 'creator',
'color': 'color info',
'plot': 'plot outline',
'seasons': 'number of seasons',
'art directors': 'art direction',
'assistant directors': 'assistant director',
'set decorators': 'set decoration',
'visual effects department': 'visual effects',
'production managers': 'production manager',
'miscellaneous': 'miscellaneous crew',
'make up department': 'make up',
'plot summary': 'plot outline',
'cinematographers': 'cinematographer',
'camera department': 'camera and electrical department',
'costume designers': 'costume designer',
'production designers': 'production design',
'production managers': 'production manager',
'music original': 'original music',
'casting directors': 'casting director',
'other companies': 'miscellaneous companies',
'producers': 'producer',
'special effects by': 'special effects department',
'special effects': 'special effects companies'
}
def _manageRoles(mo):
"""Perform some transformation on the html, so that roleIDs can
be easily retrieved."""
firstHalf = mo.group(1)
secondHalf = mo.group(2)
newRoles = []
roles = secondHalf.split(' / ')
for role in roles:
role = role.strip()
if not role:
continue
roleID = analyze_imdbid(role)
if roleID is None:
roleID = u'/'
else:
roleID += u'/'
newRoles.append(u'<div class="_imdbpyrole" roleid="%s">%s</div>' % \
(roleID, role.strip()))
return firstHalf + u' / '.join(newRoles) + mo.group(3)
_reRolesMovie = re.compile(r'(<td class="char">)(.*?)(</td>)',
re.I | re.M | re.S)
def _replaceBR(mo):
"""Replaces <br> tags with '::' (useful for some akas)"""
txt = mo.group(0)
return txt.replace('<br>', '::')
_reAkas = re.compile(r'<h5>also known as:</h5>.*?</div>', re.I | re.M | re.S)
def makeSplitter(lstrip=None, sep='|', comments=True,
origNotesSep=' (', newNotesSep='::(', strip=None):
"""Return a splitter function suitable for a given set of data."""
def splitter(x):
if not x: return x
x = x.strip()
if not x: return x
if lstrip is not None:
x = x.lstrip(lstrip).lstrip()
lx = x.split(sep)
lx[:] = filter(None, [j.strip() for j in lx])
if comments:
lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
if strip:
lx[:] = [j.strip(strip) for j in lx]
return lx
return splitter
def _toInt(val, replace=()):
"""Return the value, converted to integer, or None; if present, 'replace'
must be a list of tuples of values to replace."""
for before, after in replace:
val = val.replace(before, after)
try:
return int(val)
except (TypeError, ValueError):
return None
class DOMHTMLMovieParser(DOMParserBase):
"""Parser for the "combined details" (and if instance.mdparse is
True also for the "main details") page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
mparser = DOMHTMLMovieParser()
result = mparser.parse(combined_details_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title',
path="//h1",
attrs=Attribute(key='title',
path=".//text()",
postprocess=analyze_title)),
Extractor(label='glossarysections',
group="//a[@class='glossary']",
group_key="./@name",
group_key_normalize=lambda x: x.replace('_', ' '),
path="../../../..//tr",
attrs=Attribute(key=None,
multi=True,
path={'person': ".//text()",
'link': "./td[1]/a[@href]/@href"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='cast',
path="//table[@class='cast']//tr",
attrs=Attribute(key="cast",
multi=True,
path={'person': ".//text()",
'link': "td[2]/a/@href",
'roleID': \
"td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'))
)),
Extractor(label='genres',
path="//div[@class='info']//a[starts-with(@href," \
" '/Sections/Genres')]",
attrs=Attribute(key="genres",
multi=True,
path="./text()")),
Extractor(label='h5sections',
path="//div[@class='info']/h5/..",
attrs=[
Attribute(key="plot summary",
path="./h5[starts-with(text(), " \
"'Plot:')]/../div/text()",
postprocess=lambda x: \
x.strip().rstrip('|').rstrip()),
Attribute(key="aspect ratio",
path="./h5[starts-with(text()," \
" 'Aspect')]/../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="mpaa",
path="./h5/a[starts-with(text()," \
" 'MPAA')]/../../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="countries",
path="./h5[starts-with(text(), " \
"'Countr')]/../div[@class='info-content']//text()",
postprocess=makeSplitter('|')),
Attribute(key="language",
path="./h5[starts-with(text(), " \
"'Language')]/..//text()",
postprocess=makeSplitter('Language:')),
Attribute(key='color info',
path="./h5[starts-with(text(), " \
"'Color')]/..//text()",
postprocess=makeSplitter('Color:')),
Attribute(key='sound mix',
path="./h5[starts-with(text(), " \
"'Sound Mix')]/..//text()",
postprocess=makeSplitter('Sound Mix:')),
# Collects akas not encosed in <i> tags.
Attribute(key='other akas',
path="./h5[starts-with(text(), " \
"'Also Known As')]/../div//text()",
postprocess=makeSplitter(sep='::',
origNotesSep='" - ',
newNotesSep='::',
strip='"')),
Attribute(key='runtimes',
path="./h5[starts-with(text(), " \
"'Runtime')]/../div/text()",
postprocess=makeSplitter()),
Attribute(key='certificates',
path="./h5[starts-with(text(), " \
"'Certificat')]/..//text()",
postprocess=makeSplitter('Certification:')),
Attribute(key='number of seasons',
path="./h5[starts-with(text(), " \
"'Seasons')]/..//text()",
postprocess=lambda x: x.count('|') + 1),
Attribute(key='original air date',
path="./h5[starts-with(text(), " \
"'Original Air Date')]/../div/text()"),
Attribute(key='tv series link',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/@href"),
Attribute(key='tv series title',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/text()")
]),
Extractor(label='language codes',
path="//h5[starts-with(text(), 'Language')]/..//a[starts-with(@href, '/language/')]",
attrs=Attribute(key='language codes', multi=True,
path="./@href",
postprocess=lambda x: x.split('/')[2].strip()
)),
Extractor(label='country codes',
path="//h5[starts-with(text(), 'Country')]/..//a[starts-with(@href, '/country/')]",
attrs=Attribute(key='country codes', multi=True,
path="./@href",
postprocess=lambda x: x.split('/')[2].strip()
)),
Extractor(label='creator',
path="//h5[starts-with(text(), 'Creator')]/..//a",
attrs=Attribute(key='creator', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin writer',
path="//h5[starts-with(text(), 'Writer')]/..//a",
attrs=Attribute(key='thin writer', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin director',
path="//h5[starts-with(text(), 'Director')]/..//a",
attrs=Attribute(key='thin director', multi=True,
path={'name': "./text()",
'link': "@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='top 250/bottom 100',
path="//div[@class='starbar-special']/" \
"a[starts-with(@href, '/chart/')]",
attrs=Attribute(key='top/bottom rank',
path="./text()")),
Extractor(label='series years',
path="//div[@id='tn15title']//span" \
"[starts-with(text(), 'TV series')]",
attrs=Attribute(key='series years',
path="./text()",
postprocess=lambda x: \
x.replace('TV series','').strip())),
Extractor(label='number of episodes',
path="//a[@title='Full Episode List']",
attrs=Attribute(key='number of episodes',
path="./text()",
postprocess=lambda x: \
_toInt(x, [(' Episodes', '')]))),
Extractor(label='akas',
path="//i[@class='transl']",
attrs=Attribute(key='akas', multi=True, path='text()',
postprocess=lambda x:
x.replace(' ', ' ').rstrip('-').replace('" - ',
'"::', 1).strip('"').replace(' ', ' '))),
Extractor(label='production notes/status',
path="//h5[starts-with(text(), 'Status:')]/..//div[@class='info-content']",
attrs=Attribute(key='production status',
path=".//text()",
postprocess=lambda x: x.strip().split('|')[0].strip().lower())),
Extractor(label='production notes/status updated',
path="//h5[starts-with(text(), 'Status Updated:')]/..//div[@class='info-content']",
attrs=Attribute(key='production status updated',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='production notes/comments',
path="//h5[starts-with(text(), 'Comments:')]/..//div[@class='info-content']",
attrs=Attribute(key='production comments',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='production notes/note',
path="//h5[starts-with(text(), 'Note:')]/..//div[@class='info-content']",
attrs=Attribute(key='production note',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='blackcatheader',
group="//b[@class='blackcatheader']",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../ul/li",
attrs=Attribute(key=None,
multi=True,
path={'name': "./a//text()",
'comp-link': "./a/@href",
'notes': "./text()"},
postprocess=lambda x: \
Company(name=x.get('name') or u'',
companyID=analyze_imdbid(x.get('comp-link')),
notes=(x.get('notes') or u'').strip())
)),
Extractor(label='rating',
path="//div[@class='starbar-meta']/b",
attrs=Attribute(key='rating',
path=".//text()")),
Extractor(label='votes',
path="//div[@class='starbar-meta']/a[@href]",
attrs=Attribute(key='votes',
path=".//text()")),
Extractor(label='cover url',
path="//a[@name='poster']",
attrs=Attribute(key='cover url',
path="./img/@src"))
]
preprocessors = [
(re.compile(r'(<b class="blackcatheader">.+?</b>)', re.I),
r'</div><div>\1'),
('<small>Full cast and crew for<br>', ''),
('<td> </td>', '<td>...</td>'),
('<span class="tv-extra">TV mini-series</span>',
'<span class="tv-extra">(mini)</span>'),
(_reRolesMovie, _manageRoles),
(_reAkas, _replaceBR)]
def preprocess_dom(self, dom):
# Handle series information.
xpath = self.xpath(dom, "//b[text()='Series Crew']")
if xpath:
b = xpath[-1] # In doubt, take the last one.
for a in self.xpath(b, "./following::h5/a[@class='glossary']"):
name = a.get('name')
if name:
a.set('name', 'series %s' % name)
# Remove links to IMDbPro.
for proLink in self.xpath(dom, "//span[@class='pro-link']"):
proLink.drop_tree()
# Remove some 'more' links (keep others, like the one around
# the number of votes).
for tn15more in self.xpath(dom,
"//a[@class='tn15more'][starts-with(@href, '/title/')]"):
tn15more.drop_tree()
return dom
re_space = re.compile(r'\s+')
re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
def postprocess_data(self, data):
# Convert section names.
for sect in data.keys():
if sect in _SECT_CONV:
data[_SECT_CONV[sect]] = data[sect]
del data[sect]
sect = _SECT_CONV[sect]
# Filter out fake values.
for key in data:
value = data[key]
if isinstance(value, list) and value:
if isinstance(value[0], Person):
data[key] = filter(lambda x: x.personID is not None, value)
if isinstance(value[0], _Container):
for obj in data[key]:
obj.accessSystem = self._as
obj.modFunct = self._modFunct
if 'akas' in data or 'other akas' in data:
akas = data.get('akas') or []
other_akas = data.get('other akas') or []
akas += other_akas
nakas = []
for aka in akas:
aka = aka.strip()
if aka.endswith('" -'):
aka = aka[:-3].rstrip()
nakas.append(aka)
if 'akas' in data:
del data['akas']
if 'other akas' in data:
del data['other akas']
if nakas:
data['akas'] = nakas
if 'runtimes' in data:
data['runtimes'] = [x.replace(' min', u'')
for x in data['runtimes']]
if 'original air date' in data:
oid = self.re_space.sub(' ', data['original air date']).strip()
data['original air date'] = oid
aid = self.re_airdate.findall(oid)
if aid and len(aid[0]) == 3:
date, season, episode = aid[0]
date = date.strip()
try: season = int(season)
except: pass
try: episode = int(episode)
except: pass
if date and date != '????':
data['original air date'] = date
else:
del data['original air date']
# Handle also "episode 0".
if season or type(season) is type(0):
data['season'] = season
if episode or type(season) is type(0):
data['episode'] = episode
for k in ('writer', 'director'):
t_k = 'thin %s' % k
if t_k not in data:
continue
if k not in data:
data[k] = data[t_k]
del data[t_k]
if 'top/bottom rank' in data:
tbVal = data['top/bottom rank'].lower()
if tbVal.startswith('top'):
tbKey = 'top 250 rank'
tbVal = _toInt(tbVal, [('top 250: #', '')])
else:
tbKey = 'bottom 100 rank'
tbVal = _toInt(tbVal, [('bottom 100: #', '')])
if tbVal:
data[tbKey] = tbVal
del data['top/bottom rank']
if 'year' in data and data['year'] == '????':
del data['year']
if 'tv series link' in data:
if 'tv series title' in data:
data['episode of'] = Movie(title=data['tv series title'],
movieID=analyze_imdbid(
data['tv series link']),
accessSystem=self._as,
modFunct=self._modFunct)
del data['tv series title']
del data['tv series link']
if 'rating' in data:
try:
data['rating'] = float(data['rating'].replace('/10', ''))
except (TypeError, ValueError):
pass
if 'votes' in data:
try:
votes = data['votes'].replace(',', '').replace('votes', '')
data['votes'] = int(votes)
except (TypeError, ValueError):
pass
return data
def _process_plotsummary(x):
"""Process a plot (contributed by Rdian06)."""
xauthor = x.get('author')
xplot = x.get('plot', u'').strip()
if xauthor:
xplot += u'::%s' % xauthor
return xplot
class DOMHTMLPlotParser(DOMParserBase):
"""Parser for the "plot summary" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a 'plot' key, containing a list
of string with the structure: 'summary::summary_author <author@email>'.
Example:
pparser = HTMLPlotParser()
result = pparser.parse(plot_summary_html_string)
"""
_defGetRefs = True
# Notice that recently IMDb started to put the email of the
# author only in the link, that we're not collecting, here.
extractors = [Extractor(label='plot',
path="//ul[@class='zebraList']//p",
attrs=Attribute(key='plot',
multi=True,
path={'plot': './text()[1]',
'author': './span/em/a/text()'},
postprocess=_process_plotsummary))]
def _process_award(x):
award = {}
_award = x.get('award')
if _award is not None:
_award = _award.strip()
award['award'] = _award
if not award['award']:
return {}
award['year'] = x.get('year').strip()
if award['year'] and award['year'].isdigit():
award['year'] = int(award['year'])
award['result'] = x.get('result').strip()
category = x.get('category').strip()
if category:
award['category'] = category
received_with = x.get('with')
if received_with is not None:
award['with'] = received_with.strip()
notes = x.get('notes')
if notes is not None:
notes = notes.strip()
if notes:
award['notes'] = notes
award['anchor'] = x.get('anchor')
return award
class DOMHTMLAwardsParser(DOMParserBase):
"""Parser for the "awards" page of a given person or movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
awparser = HTMLAwardsParser()
result = awparser.parse(awards_html_string)
"""
subject = 'title'
_containsObjects = True
extractors = [
Extractor(label='awards',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()][not(@colspan)]",
attrs=Attribute(key=None,
multi=True,
path={
'year': "../td[1]/a/text()",
'result': "../td[2]/b/text()",
'award': "../td[3]/text()",
'category': "./text()[1]",
# FIXME: takes only the first co-recipient
'with': "./small[starts-with(text()," \
" 'Shared with:')]/following-sibling::a[1]/text()",
'notes': "./small[last()]//text()",
'anchor': ".//text()"
},
postprocess=_process_award
)),
Extractor(label='recipients',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()]/small[1]/preceding-sibling::a",
attrs=Attribute(key=None,
multi=True,
path={
'name': "./text()",
'link': "./@href",
'anchor': "..//text()"
}
))
]
preprocessors = [
(re.compile('(<tr><td[^>]*>.*?</td></tr>\n\n</table>)', re.I),
r'\1</table>'),
(re.compile('(<tr><td[^>]*>\n\n<big>.*?</big></td></tr>)', re.I),
r'</table><table class="_imdbpy">\1'),
(re.compile('(<table[^>]*>\n\n)</table>(<table)', re.I), r'\1\2'),
(re.compile('(<small>.*?)<br>(.*?</small)', re.I), r'\1 \2'),
(re.compile('(</tr>\n\n)(<td)', re.I), r'\1<tr>\2')
]
def preprocess_dom(self, dom):
"""Repeat td elements according to their rowspan attributes
in subsequent tr elements.
"""
cols = self.xpath(dom, "//td[@rowspan]")
for col in cols:
span = int(col.get('rowspan'))
del col.attrib['rowspan']
position = len(self.xpath(col, "./preceding-sibling::td"))
row = col.getparent()
for tr in self.xpath(row, "./following-sibling::tr")[:span-1]:
# if not cloned, child will be moved to new parent
clone = self.clone(col)
# XXX: beware that here we don't use an "adapted" function,
# because both BeautifulSoup and lxml uses the same
# "insert" method.
tr.insert(position, clone)
return dom
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for key in data.keys():
dom = self.get_dom(key)
assigner = self.xpath(dom, "//a/text()")[0]
for entry in data[key]:
if not entry.has_key('name'):
if not entry:
continue
# this is an award, not a recipient
entry['assigner'] = assigner.strip()
# find the recipients
matches = [p for p in data[key]
if p.has_key('name') and (entry['anchor'] ==
p['anchor'])]
if self.subject == 'title':
recipients = [Person(name=recipient['name'],
personID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['to'] = recipients
elif self.subject == 'name':
recipients = [Movie(title=recipient['name'],
movieID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['for'] = recipients
nd.append(entry)
del entry['anchor']
return {'awards': nd}
class DOMHTMLTaglinesParser(DOMParserBase):
"""Parser for the "taglines" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLTaglinesParser()
result = tparser.parse(taglines_html_string)
"""
extractors = [Extractor(label='taglines',
path='//*[contains(concat(" ", normalize-space(@class), " "), " soda ")]',
attrs=Attribute(key='taglines',
multi=True,
path="./text()"))]
def postprocess_data(self, data):
if 'taglines' in data:
data['taglines'] = [tagline.strip() for tagline in data['taglines']]
return data
class DOMHTMLKeywordsParser(DOMParserBase):
"""Parser for the "keywords" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
kwparser = DOMHTMLKeywordsParser()
result = kwparser.parse(keywords_html_string)
"""
extractors = [Extractor(label='keywords',
path="//a[starts-with(@href, '/keyword/')]",
attrs=Attribute(key='keywords',
path="./text()", multi=True,
postprocess=lambda x: \
x.lower().replace(' ', '-')))]
class DOMHTMLAlternateVersionsParser(DOMParserBase):
"""Parser for the "alternate versions" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//ul[@class='trivia']/li",
attrs=Attribute(key='alternate versions',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
class DOMHTMLTriviaParser(DOMParserBase):
"""Parser for the "trivia" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//div[@class='sodatext']",
attrs=Attribute(key='trivia',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//span[@class='linksoda']"):
qLink.drop_tree()
return dom
class DOMHTMLSoundtrackParser(DOMHTMLAlternateVersionsParser):
kind = 'soundtrack'
preprocessors = [
('<br>', '\n')
]
def postprocess_data(self, data):
if 'alternate versions' in data:
nd = []
for x in data['alternate versions']:
ds = x.split('\n')
title = ds[0]
if title[0] == '"' and title[-1] == '"':
title = title[1:-1]
nds = []
newData = {}
for l in ds[1:]:
if ' with ' in l or ' by ' in l or ' from ' in l \
or ' of ' in l or l.startswith('From '):
nds.append(l)
else:
if nds:
nds[-1] += l
else:
nds.append(l)
newData[title] = {}
for l in nds:
skip = False
for sep in ('From ',):
if l.startswith(sep):
fdix = len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
skip = True
if not skip:
for sep in ' with ', ' by ', ' from ', ' of ':
fdix = l.find(sep)
if fdix != -1:
fdix = fdix+len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
break
nd.append(newData)
data['soundtrack'] = nd
return data
class DOMHTMLCrazyCreditsParser(DOMParserBase):
"""Parser for the "crazy credits" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
ccparser = DOMHTMLCrazyCreditsParser()
result = ccparser.parse(crazycredits_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='crazy credits', path="//ul/li/tt",
attrs=Attribute(key='crazy credits', multi=True,
path=".//text()",
postprocess=lambda x: \
x.replace('\n', ' ').replace(' ', ' ')))]
def _process_goof(x):
if x['spoiler_category']:
return x['spoiler_category'].strip() + ': SPOILER: ' + x['text'].strip()
else:
return x['category'].strip() + ': ' + x['text'].strip()
class DOMHTMLGoofsParser(DOMParserBase):
"""Parser for the "goofs" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
gparser = DOMHTMLGoofsParser()
result = gparser.parse(goofs_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='goofs', path="//div[@class='soda odd']",
attrs=Attribute(key='goofs', multi=True,
path={
'text':"./text()",
'category':'./preceding-sibling::h4[1]/text()',
'spoiler_category': './h4/text()'
},
postprocess=_process_goof))]
class DOMHTMLQuotesParser(DOMParserBase):
"""Parser for the "memorable quotes" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLQuotesParser()
result = qparser.parse(quotes_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='quotes_odd',
path="//div[@class='quote soda odd']",
attrs=Attribute(key='quotes_odd',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip().replace(' \n',
'::').replace('::\n', '::').replace('\n', ' '))),
Extractor(label='quotes_even',
path="//div[@class='quote soda even']",
attrs=Attribute(key='quotes_even',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip().replace(' \n',
'::').replace('::\n', '::').replace('\n', ' ')))
]
preprocessors = [
(re.compile('<a href="#" class="hidesoda hidden">Hide options</a><br>', re.I), '')
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//span[@class='linksoda']"):
qLink.drop_tree()
for qLink in self.xpath(dom, "//div[@class='sharesoda_pre']"):
qLink.drop_tree()
return dom
def postprocess_data(self, data):
quotes = data.get('quotes_odd', []) + data.get('quotes_even', [])
if not quotes:
return {}
quotes = [q.split('::') for q in quotes]
return {'quotes': quotes}
class DOMHTMLReleaseinfoParser(DOMParserBase):
"""Parser for the "release dates" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rdparser = DOMHTMLReleaseinfoParser()
result = rdparser.parse(releaseinfo_html_string)
"""
extractors = [Extractor(label='release dates',
path="//table[@id='release_dates']//tr",
attrs=Attribute(key='release dates', multi=True,
path={'country': ".//td[1]//text()",
'date': ".//td[2]//text()",
'notes': ".//td[3]//text()"})),
Extractor(label='akas',
path="//table[@id='akas']//tr",
attrs=Attribute(key='akas', multi=True,
path={'title': "./td[1]/text()",
'countries': "./td[2]/text()"}))]
preprocessors = [
(re.compile('(<h5><a name="?akas"?.*</table>)', re.I | re.M | re.S),
r'<div class="_imdbpy_akas">\1</div>')]
def postprocess_data(self, data):
if not ('release dates' in data or 'akas' in data): return data
releases = data.get('release dates') or []
rl = []
for i in releases:
country = i.get('country')
date = i.get('date')
if not (country and date): continue
country = country.strip()
date = date.strip()
if not (country and date): continue
notes = i['notes']
info = u'%s::%s' % (country, date)
if notes:
info += notes
rl.append(info)
if releases:
del data['release dates']
if rl:
data['release dates'] = rl
akas = data.get('akas') or []
nakas = []
for aka in akas:
title = (aka.get('title') or '').strip()
if not title:
continue
countries = (aka.get('countries') or '').split(',')
if not countries:
nakas.append(title)
else:
for country in countries:
nakas.append('%s::%s' % (title, country.strip()))
if akas:
del data['akas']
if nakas:
data['akas from release info'] = nakas
return data
class DOMHTMLRatingsParser(DOMParserBase):
"""Parser for the "user ratings" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = DOMHTMLRatingsParser()
result = rparser.parse(userratings_html_string)
"""
re_means = re.compile('mean\s*=\s*([0-9]\.[0-9])\.\s*median\s*=\s*([0-9])',
re.I)
extractors = [
Extractor(label='number of votes',
path="//td[b='Percentage']/../../tr",
attrs=[Attribute(key='votes',
multi=True,
path={
'votes': "td[1]//text()",
'ordinal': "td[3]//text()"
})]),
Extractor(label='mean and median',
path="//p[starts-with(text(), 'Arithmetic mean')]",
attrs=Attribute(key='mean and median',
path="text()")),
Extractor(label='rating',
path="//a[starts-with(@href, '/search/title?user_rating=')]",
attrs=Attribute(key='rating',
path="text()")),
Extractor(label='demographic voters',
path="//td[b='Average']/../../tr",
attrs=Attribute(key='demographic voters',
multi=True,
path={
'voters': "td[1]//text()",
'votes': "td[2]//text()",
'average': "td[3]//text()"
})),
Extractor(label='top 250',
path="//a[text()='top 250']",
attrs=Attribute(key='top 250',
path="./preceding-sibling::text()[1]"))
]
def postprocess_data(self, data):
nd = {}
votes = data.get('votes', [])
if votes:
nd['number of votes'] = {}
for i in xrange(1, 11):
_ordinal = int(votes[i]['ordinal'])
_strvts = votes[i]['votes'] or '0'
nd['number of votes'][_ordinal] = \
int(_strvts.replace(',', ''))
mean = data.get('mean and median', '')
if mean:
means = self.re_means.findall(mean)
if means and len(means[0]) == 2:
am, med = means[0]
try: am = float(am)
except (ValueError, OverflowError): pass
if type(am) is type(1.0):
nd['arithmetic mean'] = am
try: med = int(med)
except (ValueError, OverflowError): pass
if type(med) is type(0):
nd['median'] = med
if 'rating' in data:
nd['rating'] = float(data['rating'])
dem_voters = data.get('demographic voters')
if dem_voters:
nd['demographic'] = {}
for i in xrange(1, len(dem_voters)):
if (dem_voters[i]['votes'] is not None) \
and (dem_voters[i]['votes'].strip()):
nd['demographic'][dem_voters[i]['voters'].strip().lower()] \
= (int(dem_voters[i]['votes'].replace(',', '')),
float(dem_voters[i]['average']))
if 'imdb users' in nd.get('demographic', {}):
nd['votes'] = nd['demographic']['imdb users'][0]
nd['demographic']['all votes'] = nd['demographic']['imdb users']
del nd['demographic']['imdb users']
top250 = data.get('top 250')
if top250:
sd = top250[9:]
i = sd.find(' ')
if i != -1:
sd = sd[:i]
try: sd = int(sd)
except (ValueError, OverflowError): pass
if type(sd) is type(0):
nd['top 250 rank'] = sd
return nd
class DOMHTMLEpisodesRatings(DOMParserBase):
"""Parser for the "episode ratings ... by date" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
erparser = DOMHTMLEpisodesRatings()
result = erparser.parse(eprating_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title', path="//title",
attrs=Attribute(key='title', path="./text()")),
Extractor(label='ep ratings',
path="//th/../..//tr",
attrs=Attribute(key='episodes', multi=True,
path={'nr': ".//td[1]/text()",
'ep title': ".//td[2]//text()",
'movieID': ".//td[2]/a/@href",
'rating': ".//td[3]/text()",
'votes': ".//td[4]/text()"}))]
def postprocess_data(self, data):
if 'title' not in data or 'episodes' not in data: return {}
nd = []
title = data['title']
for i in data['episodes']:
ept = i['ep title']
movieID = analyze_imdbid(i['movieID'])
votes = i['votes']
rating = i['rating']
if not (ept and movieID and votes and rating): continue
try:
votes = int(votes.replace(',', '').replace('.', ''))
except:
pass
try:
rating = float(rating)
except:
pass
ept = ept.strip()
ept = u'%s {%s' % (title, ept)
nr = i['nr']
if nr:
ept += u' (#%s)' % nr.strip()
ept += '}'
if movieID is not None:
movieID = str(movieID)
m = Movie(title=ept, movieID=movieID, accessSystem=self._as,
modFunct=self._modFunct)
epofdict = m.get('episode of')
if epofdict is not None:
m['episode of'] = Movie(data=epofdict, accessSystem=self._as,
modFunct=self._modFunct)
nd.append({'episode': m, 'votes': votes, 'rating': rating})
return {'episodes rating': nd}
def _normalize_href(href):
if (href is not None) and (not href.lower().startswith('http://')):
if href.startswith('/'): href = href[1:]
# TODO: imdbURL_base may be set by the user!
href = '%s%s' % (imdbURL_base, href)
return href
class DOMHTMLCriticReviewsParser(DOMParserBase):
"""Parser for the "critic reviews" pages of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLCriticReviewsParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'critic reviews'
extractors = [
Extractor(label='metascore',
path="//div[@class='metascore_wrap']/div/span",
attrs=Attribute(key='metascore',
path=".//text()")),
Extractor(label='metacritic url',
path="//div[@class='article']/div[@class='see-more']/a",
attrs=Attribute(key='metacritic url',
path="./@href")) ]
class DOMHTMLOfficialsitesParser(DOMParserBase):
"""Parser for the "official sites", "external reviews", "newsgroup
reviews", "miscellaneous links", "sound clips", "video clips" and
"photographs" pages of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLOfficialsitesParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'official sites'
extractors = [
Extractor(label='site',
path="//ol/li/a",
attrs=Attribute(key='self.kind',
multi=True,
path={
'link': "./@href",
'info': "./text()"
},
postprocess=lambda x: (x.get('info').strip(),
urllib.unquote(_normalize_href(x.get('link'))))))
]
class DOMHTMLConnectionParser(DOMParserBase):
"""Parser for the "connections" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
connparser = DOMHTMLConnectionParser()
result = connparser.parse(connections_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='connection',
group="//div[@class='_imdbpy']",
group_key="./h5/text()",
group_key_normalize=lambda x: x.lower(),
path="./a",
attrs=Attribute(key=None,
path={'title': "./text()",
'movieID': "./@href"},
multi=True))]
preprocessors = [
('<h5>', '</div><div class="_imdbpy"><h5>'),
# To get the movie's year.
('</a> (', ' ('),
('\n<br/>', '</a>'),
('<br/> - ', '::')
]
def postprocess_data(self, data):
for key in data.keys():
nl = []
for v in data[key]:
title = v['title']
ts = title.split('::', 1)
title = ts[0].strip()
notes = u''
if len(ts) == 2:
notes = ts[1].strip()
m = Movie(title=title,
movieID=analyze_imdbid(v['movieID']),
accessSystem=self._as, notes=notes,
modFunct=self._modFunct)
nl.append(m)
data[key] = nl
if not data: return {}
return {'connections': data}
class DOMHTMLLocationsParser(DOMParserBase):
"""Parser for the "locations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
lparser = DOMHTMLLocationsParser()
result = lparser.parse(locations_html_string)
"""
extractors = [Extractor(label='locations', path="//dt",
attrs=Attribute(key='locations', multi=True,
path={'place': ".//text()",
'note': "./following-sibling::dd[1]" \
"//text()"},
postprocess=lambda x: (u'%s::%s' % (
x['place'].strip(),
(x['note'] or u'').strip())).strip(':')))]
class DOMHTMLTechParser(DOMParserBase):
"""Parser for the "technical", "business", "literature",
"publicity" (for people) and "contacts (for people) pages of
a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = HTMLTechParser()
result = tparser.parse(technical_html_string)
"""
kind = 'tech'
extractors = [Extractor(label='tech',
group="//h5",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="./following-sibling::div[1]",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip()
for t in x.split('\n') if t.strip()]))]
preprocessors = [
(re.compile('(<h5>.*?</h5>)', re.I), r'</div>\1<div class="_imdbpy">'),
(re.compile('((<br/>|</p>|</table>))\n?<br/>(?!<a)', re.I),
r'\1</div>'),
# the ones below are for the publicity parser
(re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
(re.compile('(</td><td valign="top">)', re.I), r'\1::'),
(re.compile('(</tr><tr>)', re.I), r'\n\1'),
# this is for splitting individual entries
(re.compile('<br/>', re.I), r'\n'),
]
def postprocess_data(self, data):
for key in data:
data[key] = filter(None, data[key])
if self.kind in ('literature', 'business', 'contacts') and data:
if 'screenplay/teleplay' in data:
data['screenplay-teleplay'] = data['screenplay/teleplay']
del data['screenplay/teleplay']
data = {self.kind: data}
else:
if self.kind == 'publicity':
if 'biography (print)' in data:
data['biography-print'] = data['biography (print)']
del data['biography (print)']
# Tech info.
for key in data.keys():
if key.startswith('film negative format'):
data['film negative format'] = data[key]
del data[key]
elif key.startswith('film length'):
data['film length'] = data[key]
del data[key]
return data
class DOMHTMLRecParser(DOMParserBase):
"""Parser for the "recommendations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = HTMLRecParser()
result = rparser.parse(recommendations_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='recommendations',
path="//td[@valign='middle'][1]",
attrs=Attribute(key='../../tr/td[1]//text()',
multi=True,
path={'title': ".//text()",
'movieID': ".//a/@href"}))]
def postprocess_data(self, data):
for key in data.keys():
n_key = key
n_keyl = n_key.lower()
if n_keyl == 'suggested by the database':
n_key = 'database'
elif n_keyl == 'imdb users recommend':
n_key = 'users'
data[n_key] = [Movie(title=x['title'],
movieID=analyze_imdbid(x['movieID']),
accessSystem=self._as, modFunct=self._modFunct)
for x in data[key]]
del data[key]
if data: return {'recommendations': data}
return data
class DOMHTMLNewsParser(DOMParserBase):
"""Parser for the "news" page of a given movie or person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
nwparser = DOMHTMLNewsParser()
result = nwparser.parse(news_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='news',
path="//h2",
attrs=Attribute(key='news',
multi=True,
path={
'title': "./text()",
'fromdate': "../following-sibling::p[1]/small//text()",
# FIXME: sometimes (see The Matrix (1999)) <p> is found
# inside news text.
'body': "../following-sibling::p[2]//text()",
'link': "../..//a[text()='Permalink']/@href",
'fulllink': "../..//a[starts-with(text(), " \
"'See full article at')]/@href"
},
postprocess=lambda x: {
'title': x.get('title').strip(),
'date': x.get('fromdate').split('|')[0].strip(),
'from': x.get('fromdate').split('|')[1].replace('From ',
'').strip(),
'body': (x.get('body') or u'').strip(),
'link': _normalize_href(x.get('link')),
'full article link': _normalize_href(x.get('fulllink'))
}))
]
preprocessors = [
(re.compile('(<a name=[^>]+><h2>)', re.I), r'<div class="_imdbpy">\1'),
(re.compile('(<hr/>)', re.I), r'</div>\1'),
(re.compile('<p></p>', re.I), r'')
]
def postprocess_data(self, data):
if not data.has_key('news'):
return {}
for news in data['news']:
if news.has_key('full article link'):
if news['full article link'] is None:
del news['full article link']
return data
def _parse_review(x):
result = {}
title = x.get('title').strip()
if title[-1] == ':': title = title[:-1]
result['title'] = title
result['link'] = _normalize_href(x.get('link'))
kind = x.get('kind').strip()
if kind[-1] == ':': kind = kind[:-1]
result['review kind'] = kind
text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||')
review = '\n'.join(text)
if x.get('author') is not None:
author = x.get('author').strip()
review = review.split(author)[0].strip()
result['review author'] = author[2:]
if x.get('item') is not None:
item = x.get('item').strip()
review = review[len(item):].strip()
review = "%s: %s" % (item, review)
result['review'] = review
return result
class DOMHTMLSeasonEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLSeasonEpisodesParser()
result = sparser.parse(episodes_html_string)
"""
extractors = [
Extractor(label='series link',
path="//div[@class='parent']",
attrs=[Attribute(key='series link',
path=".//a/@href")]
),
Extractor(label='series title',
path="//head/meta[@property='og:title']",
attrs=[Attribute(key='series title',
path="./@content")]
),
Extractor(label='seasons list',
path="//select[@id='bySeason']//option",
attrs=[Attribute(key='_seasons',
multi=True,
path="./@value")]),
Extractor(label='selected season',
path="//select[@id='bySeason']//option[@selected]",
attrs=[Attribute(key='_current_season',
path='./@value')]),
Extractor(label='episodes',
path=".",
group="//div[@class='info']",
group_key=".//meta/@content",
group_key_normalize=lambda x: 'episode %s' % x,
attrs=[Attribute(key=None,
multi=True,
path={
"link": ".//strong//a[@href][1]/@href",
"original air date": ".//div[@class='airdate']/text()",
"title": ".//strong//text()",
"plot": ".//div[@class='item_description']//text()"
}
)]
)
]
def postprocess_data(self, data):
series_id = analyze_imdbid(data.get('series link'))
series_title = data.get('series title', '').strip()
selected_season = data.get('_current_season',
'unknown season').strip()
if not (series_id and series_title):
return {}
series = Movie(title=series_title, movieID=str(series_id),
accessSystem=self._as, modFunct=self._modFunct)
if series.get('kind') == 'movie':
series['kind'] = u'tv series'
try: selected_season = int(selected_season)
except: pass
nd = {selected_season: {}}
if 'episode -1' in data:
counter = 1
for episode in data['episode -1']:
while 'episode %d' % counter in data:
counter += 1
k = 'episode %d' % counter
data[k] = [episode]
del data['episode -1']
for episode_nr, episode in data.iteritems():
if not (episode and episode[0] and
episode_nr.startswith('episode ')):
continue
episode = episode[0]
episode_nr = episode_nr[8:].rstrip()
try: episode_nr = int(episode_nr)
except: pass
episode_id = analyze_imdbid(episode.get('link' ''))
episode_air_date = episode.get('original air date',
'').strip()
episode_title = episode.get('title', '').strip()
episode_plot = episode.get('plot', '')
if not (episode_nr is not None and episode_id and episode_title):
continue
ep_obj = Movie(movieID=episode_id, title=episode_title,
accessSystem=self._as, modFunct=self._modFunct)
ep_obj['kind'] = u'episode'
ep_obj['episode of'] = series
ep_obj['season'] = selected_season
ep_obj['episode'] = episode_nr
if episode_air_date:
ep_obj['original air date'] = episode_air_date
if episode_air_date[-4:].isdigit():
ep_obj['year'] = episode_air_date[-4:]
if episode_plot:
ep_obj['plot'] = episode_plot
nd[selected_season][episode_nr] = ep_obj
_seasons = data.get('_seasons') or []
for idx, season in enumerate(_seasons):
try: _seasons[idx] = int(season)
except: pass
return {'episodes': nd, '_seasons': _seasons,
'_current_season': selected_season}
def _build_episode(x):
"""Create a Movie object for a given series' episode."""
episode_id = analyze_imdbid(x.get('link'))
episode_title = x.get('title')
e = Movie(movieID=episode_id, title=episode_title)
e['kind'] = u'episode'
oad = x.get('oad')
if oad:
e['original air date'] = oad.strip()
year = x.get('year')
if year is not None:
year = year[5:]
if year == 'unknown': year = u'????'
if year and year.isdigit():
year = int(year)
e['year'] = year
else:
if oad and oad[-4:].isdigit():
e['year'] = int(oad[-4:])
epinfo = x.get('episode')
if epinfo is not None:
season, episode = epinfo.split(':')[0].split(',')
e['season'] = int(season[7:])
e['episode'] = int(episode[8:])
else:
e['season'] = 'unknown'
e['episode'] = 'unknown'
plot = x.get('plot')
if plot:
e['plot'] = plot.strip()
return e
class DOMHTMLEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
# XXX: no more used for the list of episodes parser,
# but only for the episodes cast parser (see below).
_containsObjects = True
kind = 'episodes list'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::span/strong[1]/text()"
def _init(self):
self.extractors = [
Extractor(label='series',
path="//html",
attrs=[Attribute(key='series title',
path=".//title/text()"),
Attribute(key='series movieID',
path=".//h1/a[@class='main']/@href",
postprocess=analyze_imdbid)
]),
Extractor(label='episodes',
group="//div[@class='_imdbpy']/h3",
group_key="./a/@name",
path=self._episodes_path,
attrs=Attribute(key=None,
multi=True,
path={
'link': "./a/@href",
'title': "./a/text()",
'year': "./preceding-sibling::a[1]/@name",
'episode': "./text()[1]",
'oad': self._oad_path,
'plot': "./following-sibling::text()[1]"
},
postprocess=_build_episode))]
if self.kind == 'episodes cast':
self.extractors += [
Extractor(label='cast',
group="//h4",
group_key="./text()[1]",
group_key_normalize=lambda x: x.strip(),
path="./following-sibling::table[1]//td[@class='nm']",
attrs=Attribute(key=None,
multi=True,
path={'person': "..//text()",
'link': "./a/@href",
'roleID': \
"../td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'),
accessSystem=self._as,
modFunct=self._modFunct)))
]
preprocessors = [
(re.compile('(<hr/>\n)(<h3>)', re.I),
r'</div>\1<div class="_imdbpy">\2'),
(re.compile('(</p>\n\n)</div>', re.I), r'\1'),
(re.compile('<h3>(.*?)</h3>', re.I), r'<h4>\1</h4>'),
(_reRolesMovie, _manageRoles),
(re.compile('(<br/> <br/>\n)(<hr/>)', re.I), r'\1</div>\2')
]
def postprocess_data(self, data):
# A bit extreme?
if not 'series title' in data: return {}
if not 'series movieID' in data: return {}
stitle = data['series title'].replace('- Episode list', '')
stitle = stitle.replace('- Episodes list', '')
stitle = stitle.replace('- Episode cast', '')
stitle = stitle.replace('- Episodes cast', '')
stitle = stitle.strip()
if not stitle: return {}
seriesID = data['series movieID']
if seriesID is None: return {}
series = Movie(title=stitle, movieID=str(seriesID),
accessSystem=self._as, modFunct=self._modFunct)
nd = {}
for key in data.keys():
if key.startswith('filter-season-') or key.startswith('season-'):
season_key = key.replace('filter-season-', '').replace('season-', '')
try: season_key = int(season_key)
except: pass
nd[season_key] = {}
ep_counter = 1
for episode in data[key]:
if not episode: continue
episode_key = episode.get('episode')
if episode_key is None: continue
if not isinstance(episode_key, int):
episode_key = ep_counter
ep_counter += 1
cast_key = 'Season %s, Episode %s:' % (season_key,
episode_key)
if data.has_key(cast_key):
cast = data[cast_key]
for i in xrange(len(cast)):
cast[i].billingPos = i + 1
episode['cast'] = cast
episode['episode of'] = series
nd[season_key][episode_key] = episode
if len(nd) == 0:
return {}
return {'episodes': nd}
class DOMHTMLEpisodesCastParser(DOMHTMLEpisodesParser):
"""Parser for the "episodes cast" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
kind = 'episodes cast'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::b[1]/text()"
class DOMHTMLFaqsParser(DOMParserBase):
"""Parser for the "FAQ" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
fparser = DOMHTMLFaqsParser()
result = fparser.parse(faqs_html_string)
"""
_defGetRefs = True
# XXX: bsoup and lxml don't match (looks like a minor issue, anyway).
extractors = [
Extractor(label='faqs',
path="//div[@class='section']",
attrs=Attribute(key='faqs',
multi=True,
path={
'question': "./h3/a/span/text()",
'answer': "../following-sibling::div[1]//text()"
},
postprocess=lambda x: u'%s::%s' % (x.get('question').strip(),
'\n\n'.join(x.get('answer').replace(
'\n\n', '\n').strip().split('||')))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('<h4>(.*?)</h4>\n', re.I), r'||\1--'),
(re.compile('<span class="spoiler"><span>(.*?)</span></span>', re.I),
r'[spoiler]\1[/spoiler]')
]
class DOMHTMLAiringParser(DOMParserBase):
"""Parser for the "airing" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
aparser = DOMHTMLAiringParser()
result = aparser.parse(airing_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='series title',
path="//title",
attrs=Attribute(key='series title', path="./text()",
postprocess=lambda x: \
x.replace(' - TV schedule', u''))),
Extractor(label='series id',
path="//h1/a[@href]",
attrs=Attribute(key='series id', path="./@href")),
Extractor(label='tv airings',
path="//tr[@class]",
attrs=Attribute(key='airing',
multi=True,
path={
'date': "./td[1]//text()",
'time': "./td[2]//text()",
'channel': "./td[3]//text()",
'link': "./td[4]/a[1]/@href",
'title': "./td[4]//text()",
'season': "./td[5]//text()",
},
postprocess=lambda x: {
'date': x.get('date'),
'time': x.get('time'),
'channel': x.get('channel').strip(),
'link': x.get('link'),
'title': x.get('title'),
'season': (x.get('season') or '').strip()
}
))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
seriesTitle = data['series title']
seriesID = analyze_imdbid(data['series id'])
if data.has_key('airing'):
for airing in data['airing']:
title = airing.get('title', '').strip()
if not title:
epsTitle = seriesTitle
if seriesID is None:
continue
epsID = seriesID
else:
epsTitle = '%s {%s}' % (data['series title'],
airing['title'])
epsID = analyze_imdbid(airing['link'])
e = Movie(title=epsTitle, movieID=epsID)
airing['episode'] = e
del airing['link']
del airing['title']
if not airing['season']:
del airing['season']
if 'series title' in data:
del data['series title']
if 'series id' in data:
del data['series id']
if 'airing' in data:
data['airing'] = filter(None, data['airing'])
if 'airing' not in data or not data['airing']:
return {}
return data
class DOMHTMLSynopsisParser(DOMParserBase):
"""Parser for the "synopsis" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = HTMLSynopsisParser()
result = sparser.parse(synopsis_html_string)
"""
extractors = [
Extractor(label='synopsis',
path="//div[@class='display'][not(@style)]",
attrs=Attribute(key='synopsis',
path=".//text()",
postprocess=lambda x: '\n\n'.join(x.strip().split('||'))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
class DOMHTMLParentsGuideParser(DOMParserBase):
"""Parser for the "parents guide" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
pgparser = HTMLParentsGuideParser()
result = pgparser.parse(parentsguide_html_string)
"""
extractors = [
Extractor(label='parents guide',
group="//div[@class='section']",
group_key="./h3/a/span/text()",
group_key_normalize=lambda x: x.lower(),
path="../following-sibling::div[1]/p",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip().replace('\n', ' ')
for t in x.split('||') if t.strip()]))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
def postprocess_data(self, data):
data2 = {}
for key in data:
if data[key]:
data2[key] = data[key]
if not data2:
return {}
return {'parents guide': data2}
_OBJECTS = {
'movie_parser': ((DOMHTMLMovieParser,), None),
'plot_parser': ((DOMHTMLPlotParser,), None),
'movie_awards_parser': ((DOMHTMLAwardsParser,), None),
'taglines_parser': ((DOMHTMLTaglinesParser,), None),
'keywords_parser': ((DOMHTMLKeywordsParser,), None),
'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None),
'goofs_parser': ((DOMHTMLGoofsParser,), None),
'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None),
'trivia_parser': ((DOMHTMLTriviaParser,), None),
'soundtrack_parser': ((DOMHTMLSoundtrackParser,), {'kind': 'soundtrack'}),
'quotes_parser': ((DOMHTMLQuotesParser,), None),
'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None),
'ratings_parser': ((DOMHTMLRatingsParser,), None),
'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'criticrev_parser': ((DOMHTMLCriticReviewsParser,),
{'kind': 'critic reviews'}),
'externalrev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'external reviews'}),
'newsgrouprev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'newsgroup reviews'}),
'misclinks_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'misc links'}),
'soundclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'sound clips'}),
'videoclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'video clips'}),
'photosites_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'photo sites'}),
'connections_parser': ((DOMHTMLConnectionParser,), None),
'tech_parser': ((DOMHTMLTechParser,), None),
'business_parser': ((DOMHTMLTechParser,),
{'kind': 'business', '_defGetRefs': 1}),
'literature_parser': ((DOMHTMLTechParser,), {'kind': 'literature'}),
'locations_parser': ((DOMHTMLLocationsParser,), None),
'rec_parser': ((DOMHTMLRecParser,), None),
'news_parser': ((DOMHTMLNewsParser,), None),
'episodes_parser': ((DOMHTMLEpisodesParser,), None),
'season_episodes_parser': ((DOMHTMLSeasonEpisodesParser,), None),
'episodes_cast_parser': ((DOMHTMLEpisodesCastParser,), None),
'eprating_parser': ((DOMHTMLEpisodesRatings,), None),
'movie_faqs_parser': ((DOMHTMLFaqsParser,), None),
'airing_parser': ((DOMHTMLAiringParser,), None),
'synopsis_parser': ((DOMHTMLSynopsisParser,), None),
'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None)
}
| gpl-3.0 |
manishpatell/erpcustomizationssaiimpex123qwe | openerp/addons/base/tests/test_api.py | 42 | 17604 |
from openerp import models
from openerp.tools import mute_logger
from openerp.osv.orm import except_orm
from openerp.tests import common
class TestAPI(common.TransactionCase):
""" test the new API of the ORM """
def assertIsRecordset(self, value, model):
self.assertIsInstance(value, models.BaseModel)
self.assertEqual(value._name, model)
def assertIsRecord(self, value, model):
self.assertIsRecordset(value, model)
self.assertTrue(len(value) <= 1)
def assertIsNull(self, value, model):
self.assertIsRecordset(value, model)
self.assertFalse(value)
@mute_logger('openerp.models')
def test_00_query(self):
""" Build a recordset, and check its contents. """
domain = [('name', 'ilike', 'j')]
ids = self.registry('res.partner').search(self.cr, self.uid, domain)
partners = self.env['res.partner'].search(domain)
# partners is a collection of browse records corresponding to ids
self.assertTrue(ids)
self.assertTrue(partners)
# partners and its contents are instance of the model, and share its ormcache
self.assertIsRecordset(partners, 'res.partner')
self.assertIs(partners._ormcache, self.env['res.partner']._ormcache)
for p in partners:
self.assertIsRecord(p, 'res.partner')
self.assertIs(p._ormcache, self.env['res.partner']._ormcache)
self.assertEqual([p.id for p in partners], ids)
self.assertEqual(self.env['res.partner'].browse(ids), partners)
@mute_logger('openerp.models')
def test_01_query_offset(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([], offset=10)
partners2 = self.env['res.partner'].search([])[10:]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_02_query_limit(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([], limit=10)
partners2 = self.env['res.partner'].search([])[:10]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_03_query_offset_limit(self):
""" Build a recordset with offset and limit, and check equivalence. """
partners1 = self.env['res.partner'].search([], offset=3, limit=7)
partners2 = self.env['res.partner'].search([])[3:10]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_05_immutable(self):
""" Check that a recordset remains the same, even after updates. """
domain = [('name', 'ilike', 'j')]
partners = self.env['res.partner'].search(domain)
self.assertTrue(partners)
ids = map(int, partners)
# modify those partners, and check that partners has not changed
self.registry('res.partner').write(self.cr, self.uid, ids, {'active': False})
self.assertEqual(ids, map(int, partners))
# redo the search, and check that the result is now empty
partners2 = self.env['res.partner'].search(domain)
self.assertFalse(partners2)
@mute_logger('openerp.models')
def test_06_fields(self):
""" Check that relation fields return records, recordsets or nulls. """
user = self.registry('res.users').browse(self.cr, self.uid, self.uid)
self.assertIsRecord(user, 'res.users')
self.assertIsRecord(user.partner_id, 'res.partner')
self.assertIsRecordset(user.groups_id, 'res.groups')
partners = self.env['res.partner'].search([])
for name, field in partners._fields.iteritems():
if field.type == 'many2one':
for p in partners:
self.assertIsRecord(p[name], field.comodel_name)
elif field.type == 'reference':
for p in partners:
if p[name]:
self.assertIsRecord(p[name], field.comodel_name)
elif field.type in ('one2many', 'many2many'):
for p in partners:
self.assertIsRecordset(p[name], field.comodel_name)
@mute_logger('openerp.models')
def test_07_null(self):
""" Check behavior of null instances. """
# select a partner without a parent
partner = self.env['res.partner'].search([('parent_id', '=', False)])[0]
# check partner and related null instances
self.assertTrue(partner)
self.assertIsRecord(partner, 'res.partner')
self.assertFalse(partner.parent_id)
self.assertIsNull(partner.parent_id, 'res.partner')
self.assertIs(partner.parent_id.id, False)
self.assertFalse(partner.parent_id.user_id)
self.assertIsNull(partner.parent_id.user_id, 'res.users')
self.assertIs(partner.parent_id.user_id.name, False)
self.assertFalse(partner.parent_id.user_id.groups_id)
self.assertIsRecordset(partner.parent_id.user_id.groups_id, 'res.groups')
@mute_logger('openerp.models')
def test_10_old_old(self):
""" Call old-style methods in the old-fashioned way. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
ids = map(int, partners)
# call method name_get on partners' model, and check its effect
res = partners._model.name_get(self.cr, self.uid, ids)
self.assertEqual(len(res), len(ids))
self.assertEqual(set(val[0] for val in res), set(ids))
@mute_logger('openerp.models')
def test_20_old_new(self):
""" Call old-style methods in the new API style. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method name_get on partners itself, and check its effect
res = partners.name_get()
self.assertEqual(len(res), len(partners))
self.assertEqual(set(val[0] for val in res), set(map(int, partners)))
@mute_logger('openerp.models')
def test_25_old_new(self):
""" Call old-style methods on records (new API style). """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method name_get on partner records, and check its effect
for p in partners:
res = p.name_get()
self.assertTrue(isinstance(res, list) and len(res) == 1)
self.assertTrue(isinstance(res[0], tuple) and len(res[0]) == 2)
self.assertEqual(res[0][0], p.id)
@mute_logger('openerp.models')
def test_30_new_old(self):
""" Call new-style methods in the old-fashioned way. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
ids = map(int, partners)
# call method write on partners' model, and check its effect
partners._model.write(self.cr, self.uid, ids, {'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
def test_40_new_new(self):
""" Call new-style methods in the new API style. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method write on partners itself, and check its effect
partners.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
def test_45_new_new(self):
""" Call new-style methods on records (new API style). """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method write on partner records, and check its effects
for p in partners:
p.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
@mute_logger('openerp.addons.base.ir.ir_model')
def test_50_environment(self):
""" Test environment on records. """
# partners and reachable records are attached to self.env
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# check that the current user can read and modify company data
partners[0].company_id.name
partners[0].company_id.write({'name': 'Fools'})
# create an environment with the demo user
demo = self.env['res.users'].search([('login', '=', 'demo')])[0]
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# partners and related records are still attached to self.env
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# create record instances attached to demo_env
demo_partners = partners.sudo(demo)
self.assertEqual(demo_partners.env, demo_env)
for x in (demo_partners, demo_partners[0], demo_partners[0].company_id):
self.assertEqual(x.env, demo_env)
for p in demo_partners:
self.assertEqual(p.env, demo_env)
# demo user can read but not modify company data
demo_partners[0].company_id.name
with self.assertRaises(except_orm):
demo_partners[0].company_id.write({'name': 'Pricks'})
# remove demo user from all groups
demo.write({'groups_id': [(5,)]})
# demo user can no longer access partner data
with self.assertRaises(except_orm):
demo_partners[0].company_id.name
@mute_logger('openerp.models')
def test_55_draft(self):
""" Test draft mode nesting. """
env = self.env
self.assertFalse(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
self.assertTrue(env.in_draft)
self.assertTrue(env.in_draft)
self.assertFalse(env.in_draft)
@mute_logger('openerp.models')
def test_60_cache(self):
""" Check the record cache behavior """
partners = self.env['res.partner'].search([('child_ids', '!=', False)])
partner1, partner2 = partners[0], partners[1]
children1, children2 = partner1.child_ids, partner2.child_ids
self.assertTrue(children1)
self.assertTrue(children2)
# take a child contact
child = children1[0]
self.assertEqual(child.parent_id, partner1)
self.assertIn(child, partner1.child_ids)
self.assertNotIn(child, partner2.child_ids)
# fetch data in the cache
for p in partners:
p.name, p.company_id.name, p.user_id.name, p.contact_address
self.env.check_cache()
# change its parent
child.write({'parent_id': partner2.id})
self.env.check_cache()
# check recordsets
self.assertEqual(child.parent_id, partner2)
self.assertNotIn(child, partner1.child_ids)
self.assertIn(child, partner2.child_ids)
self.assertEqual(set(partner1.child_ids + child), set(children1))
self.assertEqual(set(partner2.child_ids), set(children2 + child))
self.env.check_cache()
# delete it
child.unlink()
self.env.check_cache()
# check recordsets
self.assertEqual(set(partner1.child_ids), set(children1) - set([child]))
self.assertEqual(set(partner2.child_ids), set(children2))
self.env.check_cache()
@mute_logger('openerp.models')
def test_60_cache_prefetching(self):
""" Check the record cache prefetching """
self.env.invalidate_all()
# all the records of an instance already have an entry in cache
partners = self.env['res.partner'].search([])
partner_ids = self.env.prefetch['res.partner']
self.assertEqual(set(partners.ids), set(partner_ids))
# countries have not been fetched yet; their cache must be empty
countries = self.env['res.country'].browse()
self.assertFalse(self.env.prefetch['res.country'])
# reading ONE partner should fetch them ALL
countries |= partners[0].country_id
country_cache = self.env.cache[partners._fields['country_id']]
self.assertLessEqual(set(partners._ids), set(country_cache))
# read all partners, and check that the cache already contained them
country_ids = list(self.env.prefetch['res.country'])
for p in partners:
countries |= p.country_id
self.assertLessEqual(set(countries.ids), set(country_ids))
@mute_logger('openerp.models')
def test_70_one(self):
""" Check method one(). """
# check with many records
ps = self.env['res.partner'].search([('name', 'ilike', 'a')])
self.assertTrue(len(ps) > 1)
with self.assertRaises(except_orm):
ps.ensure_one()
p1 = ps[0]
self.assertEqual(len(p1), 1)
self.assertEqual(p1.ensure_one(), p1)
p0 = self.env['res.partner'].browse()
self.assertEqual(len(p0), 0)
with self.assertRaises(except_orm):
p0.ensure_one()
@mute_logger('openerp.models')
def test_80_contains(self):
""" Test membership on recordset. """
p1 = self.env['res.partner'].search([('name', 'ilike', 'a')], limit=1).ensure_one()
ps = self.env['res.partner'].search([('name', 'ilike', 'a')])
self.assertTrue(p1 in ps)
@mute_logger('openerp.models')
def test_80_set_operations(self):
""" Check set operations on recordsets. """
pa = self.env['res.partner'].search([('name', 'ilike', 'a')])
pb = self.env['res.partner'].search([('name', 'ilike', 'b')])
self.assertTrue(pa)
self.assertTrue(pb)
self.assertTrue(set(pa) & set(pb))
concat = pa + pb
self.assertEqual(list(concat), list(pa) + list(pb))
self.assertEqual(len(concat), len(pa) + len(pb))
difference = pa - pb
self.assertEqual(len(difference), len(set(difference)))
self.assertEqual(set(difference), set(pa) - set(pb))
self.assertLessEqual(difference, pa)
intersection = pa & pb
self.assertEqual(len(intersection), len(set(intersection)))
self.assertEqual(set(intersection), set(pa) & set(pb))
self.assertLessEqual(intersection, pa)
self.assertLessEqual(intersection, pb)
union = pa | pb
self.assertEqual(len(union), len(set(union)))
self.assertEqual(set(union), set(pa) | set(pb))
self.assertGreaterEqual(union, pa)
self.assertGreaterEqual(union, pb)
# one cannot mix different models with set operations
ps = pa
ms = self.env['ir.ui.menu'].search([])
self.assertNotEqual(ps._name, ms._name)
self.assertNotEqual(ps, ms)
with self.assertRaises(except_orm):
res = ps + ms
with self.assertRaises(except_orm):
res = ps - ms
with self.assertRaises(except_orm):
res = ps & ms
with self.assertRaises(except_orm):
res = ps | ms
with self.assertRaises(except_orm):
res = ps < ms
with self.assertRaises(except_orm):
res = ps <= ms
with self.assertRaises(except_orm):
res = ps > ms
with self.assertRaises(except_orm):
res = ps >= ms
@mute_logger('openerp.models')
def test_80_filter(self):
""" Check filter on recordsets. """
ps = self.env['res.partner'].search([])
customers = ps.browse([p.id for p in ps if p.customer])
# filter on a single field
self.assertEqual(ps.filtered(lambda p: p.customer), customers)
self.assertEqual(ps.filtered('customer'), customers)
# filter on a sequence of fields
self.assertEqual(
ps.filtered(lambda p: p.parent_id.customer),
ps.filtered('parent_id.customer')
)
@mute_logger('openerp.models')
def test_80_map(self):
""" Check map on recordsets. """
ps = self.env['res.partner'].search([])
parents = ps.browse()
for p in ps: parents |= p.parent_id
# map a single field
self.assertEqual(ps.mapped(lambda p: p.parent_id), parents)
self.assertEqual(ps.mapped('parent_id'), parents)
# map a sequence of fields
self.assertEqual(
ps.mapped(lambda p: p.parent_id.name),
[p.parent_id.name for p in ps]
)
self.assertEqual(
ps.mapped('parent_id.name'),
[p.name for p in parents]
)
| agpl-3.0 |
EricSchles/regulations-site | regulations/views/redirect.py | 4 | 3521 | from datetime import date
import re
from django.shortcuts import redirect
from regulations.generator.api_reader import ApiReader
from regulations.generator.versions import fetch_grouped_history
from regulations.views.error_handling import handle_generic_404
def redirect_by_date(request, label_id, year, month, day):
"""If a user requests a date as the version, find the version which was
current as of that date"""
date_versions = []
client = ApiReader()
for struct in client.regversions(label_id.split('-')[0])['versions']:
if 'by_date' in struct:
date_versions.append((struct['by_date'], struct['version']))
date_versions = sorted(date_versions)
last_version = None
date_str = '%s-%s-%s' % (year, month, day)
while date_versions and date_versions[0][0] <= date_str:
last_version = date_versions[0][1]
date_versions = date_versions[1:]
label_parts = label_id.split('-')
if last_version and len(label_parts) == 2:
return redirect('chrome_section_view', label_id, last_version)
elif last_version and label_parts[-1] == 'Interp':
return redirect('chrome_section_view', label_id, last_version)
elif last_version and len(label_parts) == 1:
return redirect('chrome_regulation_view', label_id, last_version)
elif last_version:
return redirect('chrome_paragraph_view', label_id, last_version)
else:
return handle_generic_404(request)
def redirect_by_date_get(request, label_id):
"""Handles date, etc. if they are part of the GET variable. We check for
bad data here (as we can't rely on url regex)"""
try:
year = abs(int(request.GET.get('year')))
month = abs(int(request.GET.get('month')))
day = abs(int(request.GET.get('day')))
if year < 100: # Assume two-digit years are for 2000
year = 2000 + year
return redirect_by_date(request, label_id, "%04d" % year,
"%02d" % month, "%02d" % day)
except ValueError:
return handle_generic_404(request)
def order_diff_versions(label_id, version, new_version):
# Re-order if needed - History is sorted in reverse chronological order
for major_version in fetch_grouped_history(label_id.split('-')[0]):
for notice in major_version['notices']:
# Hit the "old" version first, meaning it's not actually the old
# version
if notice['document_number'] == version:
return redirect('chrome_section_diff_view', label_id,
new_version, version)
# Hit the new version first -- sort is correct
elif notice['document_number'] == new_version:
return redirect('chrome_section_diff_view', label_id,
version, new_version)
# Didn't find the versions in question. Assume this was intentional
return redirect('chrome_section_diff_view', label_id, version,
new_version)
def diff_redirect(request, label_id, version):
"""Handles constructing the diff url by pulling the new version from
GET. We check for bad data here (as we can't rely on url regex)"""
new_version = request.GET.get('new_version', '')
if not re.match(r'^[-\d\w]+$', new_version):
return handle_generic_404(request)
response = order_diff_versions(label_id, version, new_version)
response['Location'] += '?from_version=%s' % version
return response
| cc0-1.0 |
jmcarp/django | django/conf/project_template/project_name/settings.py | 271 | 3288 | """
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
| bsd-3-clause |
navrasio/mxnet | example/recommenders/movielens_data.py | 19 | 2419 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MovieLens data handling: download, parse, and expose as DataIter
"""
import os
import mxnet as mx
def load_mldata_iter(filename, batch_size):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
"""
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return mx.io.NDArrayIter(data={'user':user,'item':item},label={'score':score},
batch_size=batch_size, shuffle=True)
def ensure_local_data(prefix):
if not os.path.exists("%s.zip" % prefix):
print("Downloading MovieLens data: %s" % prefix)
os.system("wget http://files.grouplens.org/datasets/movielens/%s.zip" % prefix)
os.system("unzip %s.zip" % prefix)
def get_data_iter(batch_size, prefix='ml-100k'):
"""Returns a pair of NDArrayDataIter, one for train, one for test.
"""
ensure_local_data(prefix)
return (load_mldata_iter('./%s/u1.base' % prefix, batch_size),
load_mldata_iter('./%s/u1.test' % prefix, batch_size))
def max_id(fname):
mu = 0
mi = 0
for line in open(fname):
tks = line.strip().split('\t')
if len(tks) != 4:
continue
mu = max(mu, int(tks[0]))
mi = max(mi, int(tks[1]))
return mu + 1, mi + 1
| apache-2.0 |
dhananjay92/servo | components/script/dom/bindings/codegen/parser/tests/test_distinguishability.py | 134 | 5560 | def firstArgType(method):
return method.signatures()[0][1][0].type
def WebIDLTest(parser, harness):
parser.parse("""
dictionary Dict {
};
callback interface Foo {
};
interface Bar {
// Bit of a pain to get things that have dictionary types
void passDict(optional Dict arg);
void passFoo(Foo arg);
void passNullableUnion((object? or DOMString) arg);
void passNullable(Foo? arg);
};
""")
results = parser.finish()
iface = results[2]
harness.ok(iface.isInterface(), "Should have interface")
dictMethod = iface.members[0]
ifaceMethod = iface.members[1]
nullableUnionMethod = iface.members[2]
nullableIfaceMethod = iface.members[3]
dictType = firstArgType(dictMethod)
ifaceType = firstArgType(ifaceMethod)
harness.ok(dictType.isDictionary(), "Should have dictionary type");
harness.ok(ifaceType.isInterface(), "Should have interface type");
harness.ok(ifaceType.isCallbackInterface(), "Should have callback interface type");
harness.ok(not dictType.isDistinguishableFrom(ifaceType),
"Dictionary not distinguishable from callback interface")
harness.ok(not ifaceType.isDistinguishableFrom(dictType),
"Callback interface not distinguishable from dictionary")
nullableUnionType = firstArgType(nullableUnionMethod)
nullableIfaceType = firstArgType(nullableIfaceMethod)
harness.ok(nullableUnionType.isUnion(), "Should have union type");
harness.ok(nullableIfaceType.isInterface(), "Should have interface type");
harness.ok(nullableIfaceType.nullable(), "Should have nullable type");
harness.ok(not nullableUnionType.isDistinguishableFrom(nullableIfaceType),
"Nullable type not distinguishable from union with nullable "
"member type")
harness.ok(not nullableIfaceType.isDistinguishableFrom(nullableUnionType),
"Union with nullable member type not distinguishable from "
"nullable type")
parser = parser.reset()
parser.parse("""
interface TestIface {
void passKid(Kid arg);
void passParent(Parent arg);
void passGrandparent(Grandparent arg);
void passImplemented(Implemented arg);
void passImplementedParent(ImplementedParent arg);
void passUnrelated1(Unrelated1 arg);
void passUnrelated2(Unrelated2 arg);
void passArrayBuffer(ArrayBuffer arg);
void passArrayBuffer(ArrayBufferView arg);
};
interface Kid : Parent {};
interface Parent : Grandparent {};
interface Grandparent {};
interface Implemented : ImplementedParent {};
Parent implements Implemented;
interface ImplementedParent {};
interface Unrelated1 {};
interface Unrelated2 {};
""")
results = parser.finish()
iface = results[0]
harness.ok(iface.isInterface(), "Should have interface")
argTypes = [firstArgType(method) for method in iface.members]
unrelatedTypes = [firstArgType(method) for method in iface.members[-3:]]
for type1 in argTypes:
for type2 in argTypes:
distinguishable = (type1 is not type2 and
(type1 in unrelatedTypes or
type2 in unrelatedTypes))
harness.check(type1.isDistinguishableFrom(type2),
distinguishable,
"Type %s should %sbe distinguishable from type %s" %
(type1, "" if distinguishable else "not ", type2))
harness.check(type2.isDistinguishableFrom(type1),
distinguishable,
"Type %s should %sbe distinguishable from type %s" %
(type2, "" if distinguishable else "not ", type1))
parser = parser.reset()
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(long arg1, Dummy arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
harness.check(len(results[1].members), 1,
"Should look like we have one method")
harness.check(len(results[1].members[0].signatures()), 4,
"Should have four signatures")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(any arg1, Dummy arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should throw when args before the distinguishing arg are not "
"all the same type")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Dummy {};
interface TestIface {
void method(long arg1, TestIface arg2);
void method(long arg1, long arg2);
void method(any arg1, DOMString arg2);
void method(DOMString arg1, DOMString arg2, DOMString arg3);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should throw when there is no distinguishing index")
| mpl-2.0 |
levinas/assembly | lib/assembly/plugins/idba.py | 2 | 2376 | import os
import logging
import subprocess
from plugins import BaseAssembler
from yapsy.IPlugin import IPlugin
class IdbaAssembler(BaseAssembler, IPlugin):
new_version = True
def run(self):
"""
Build the command and run.
Return list of contig file(s)
"""
# Only supports one set of reads
if not len(self.data.readsets_paired) == 1:
raise Exception('IDBA assembler requires one paired-end library')
readset = self.data.readsets[0]
if self.data.readsets_single:
self.out_module.write('Warning, discarding single end files\n')
cmd_args = [self.bin_idba_ud,
'--num_threads', self.process_threads_allowed]
read_file = readset.files[0]
#Merge file if pairs are separate
if len(readset.files) == 2 :
parts = readset.files[0].rsplit('.',1)
## TODO move this to idba folder
merged_read = parts[0] + '.idba_merged.fa'
merge_cmd = [self.bin_fq2fa, '--merge', '--filter',
readset.files[0],
readset.files[1],
merged_read]
self.arast_popen(merge_cmd, overrides=False)
read_file = merged_read
# Convert if files are fastq
if infer_filetype(read_file) == 'fastq':
parts = read_file.rsplit('.', 1)
fa_file = '{}.fasta'.format(parts[0])
fqfa_command = [self.bin_fq2fa, read_file, fa_file]
self.arast_popen(fqfa_command, overrides=False)
read_file = fa_file
base = os.path.join(self.outpath, 'run')
cmd_args += ['-r', read_file, '-o', base, '--maxk', self.max_k]
self.arast_popen(cmd_args, cwd=self.outpath)
contigs = os.path.join(base, 'contig.fa')
scaffolds = os.path.join(base, 'scaffold.fa')
output = {}
if os.path.exists(contigs):
output['contigs'] = [contigs]
if os.path.exists(scaffolds):
output['scaffolds'] = [scaffolds]
return output
def infer_filetype(file):
filemap = {'.fa':'fasta',
'.fasta' :'fasta',
'.fq':'fastq',
'.fastq' :'fastq'}
for ext in filemap:
if file.endswith(ext):
return filemap[ext]
return ''
| mit |
jameslegg/boto | boto/exception.py | 9 | 15123 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
from boto import handler
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
StandardError.__init__(self, reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
StandardError.__init__(self, status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.box_usage = None
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException), pe:
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput:
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
BotoServerError.__init__(self, status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return BotoServerError.endElement(self, name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
BotoServerError.__init__(self, status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return BotoServerError.endElement(self, name, value, connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
BotoClientError.__init__(self, reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
BotoServerError.__init__(self, status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return BotoServerError.endElement(self, name, value, connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
BotoServerError.__init__(self, status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error:
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
| mit |
cobalys/django | django/contrib/localflavor/ro/forms.py | 8 | 6772 | # -*- coding: utf-8 -*-
"""
Romanian specific form helpers.
"""
from __future__ import absolute_import, unicode_literals
from django.contrib.localflavor.ro.ro_counties import COUNTIES_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
class ROCIFField(RegexField):
"""
A Romanian fiscal identity code (CIF) field
For CIF validation algorithm see http://www.validari.ro/cui.html
"""
default_error_messages = {
'invalid': _("Enter a valid CIF."),
}
def __init__(self, max_length=10, min_length=2, *args, **kwargs):
super(ROCIFField, self).__init__(r'^(RO)?[0-9]{2,10}', max_length,
min_length, *args, **kwargs)
def clean(self, value):
"""
CIF validation
"""
value = super(ROCIFField, self).clean(value)
if value in EMPTY_VALUES:
return ''
# strip RO part
if value[0:2] == 'RO':
value = value[2:]
key = '753217532'[::-1]
value = value[::-1]
key_iter = iter(key)
checksum = 0
for digit in value[1:]:
checksum += int(digit) * int(next(key_iter))
checksum = checksum * 10 % 11
if checksum == 10:
checksum = 0
if checksum != int(value[0]):
raise ValidationError(self.error_messages['invalid'])
return value[::-1]
class ROCNPField(RegexField):
"""
A Romanian personal identity code (CNP) field
For CNP validation algorithm see http://www.validari.ro/cnp.html
"""
default_error_messages = {
'invalid': _("Enter a valid CNP."),
}
def __init__(self, max_length=13, min_length=13, *args, **kwargs):
super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length,
min_length, *args, **kwargs)
def clean(self, value):
"""
CNP validations
"""
value = super(ROCNPField, self).clean(value)
if value in EMPTY_VALUES:
return ''
# check birthdate digits
import datetime
try:
datetime.date(int(value[1:3]),int(value[3:5]),int(value[5:7]))
except:
raise ValidationError(self.error_messages['invalid'])
# checksum
key = '279146358279'
checksum = 0
value_iter = iter(value)
for digit in key:
checksum += int(digit) * int(next(value_iter))
checksum %= 11
if checksum == 10:
checksum = 1
if checksum != int(value[12]):
raise ValidationError(self.error_messages['invalid'])
return value
class ROCountyField(Field):
"""
A form field that validates its input is a Romanian county name or
abbreviation. It normalizes the input to the standard vehicle registration
abbreviation for the given county
WARNING: This field will only accept names written with diacritics; consider
using ROCountySelect if this behavior is unnaceptable for you
Example:
Argeş => valid
Arges => invalid
"""
default_error_messages = {
'invalid': 'Enter a Romanian county code or name.',
}
def clean(self, value):
super(ROCountyField, self).clean(value)
if value in EMPTY_VALUES:
return ''
try:
value = value.strip().upper()
except AttributeError:
pass
# search for county code
for entry in COUNTIES_CHOICES:
if value in entry:
return value
# search for county name
normalized_CC = []
for entry in COUNTIES_CHOICES:
normalized_CC.append((entry[0],entry[1].upper()))
for entry in normalized_CC:
if entry[1] == value:
return entry[0]
raise ValidationError(self.error_messages['invalid'])
class ROCountySelect(Select):
"""
A Select widget that uses a list of Romanian counties (judete) as its
choices.
"""
def __init__(self, attrs=None):
super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES)
class ROIBANField(RegexField):
"""
Romanian International Bank Account Number (IBAN) field
For Romanian IBAN validation algorithm see http://validari.ro/iban.html
"""
default_error_messages = {
'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'),
}
def __init__(self, max_length=40, min_length=24, *args, **kwargs):
super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Strips - and spaces, performs country code and checksum validation
"""
value = super(ROIBANField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.replace('-','')
value = value.replace(' ','')
value = value.upper()
if value[0:2] != 'RO':
raise ValidationError(self.error_messages['invalid'])
numeric_format = ''
for char in value[4:] + value[0:4]:
if char.isalpha():
numeric_format += str(ord(char) - 55)
else:
numeric_format += char
if int(numeric_format) % 97 != 1:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPhoneNumberField(RegexField):
"""Romanian phone number field"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'),
}
def __init__(self, max_length=20, min_length=10, *args, **kwargs):
super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Strips -, (, ) and spaces. Checks the final length.
"""
value = super(ROPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.replace('-','')
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
if len(value) != 10:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPostalCodeField(RegexField):
"""Romanian postal code field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX'),
}
def __init__(self, max_length=6, min_length=6, *args, **kwargs):
super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$',
max_length, min_length, *args, **kwargs)
| bsd-3-clause |
blue-shell/veromix | plasma/contents/code/SourceOutputUI.py | 3 | 3780 | # -*- coding: utf-8 -*-
# Copyright (C) 2009-2012 Nik Lutz <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyKDE4.kdeui import *
from PyKDE4.plasma import Plasma
import signal, os, datetime
from LabelSlider import *
from Channel import *
from MuteButton import *
from ClickableMeter import *
class SourceOutputUI( Channel ):
def __init__(self , parent):
self.mouse_pressed = False
Channel.__init__(self, parent)
self.layout.setContentsMargins(6,2,6,2)
def createMute(self):
self.mute = InputMuteButton(self)
self.mute.setSizePolicy(QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum,True) )
self.connect(self.mute, SIGNAL("clicked()"), self.on_mute_cb )
self.mute.setBigIconName("audio-input-microphone.png")
def createSlider(self):
self.slider = Label()
def context_menu_create_unlock_channels(self):
pass
def context_menu_create_mute(self):
pass
def context_menu_create_meter(self):
pass
def context_menu_create_custom(self):
move_to = QMenu(i18n("Move To"), self.popup_menu)
for widget in self.veromix.get_sinkoutput_widgets():
action = QAction(widget.name(), self.popup_menu)
move_to.addAction(action)
self.popup_menu.addMenu(move_to)
def on_contextmenu_clicked(self, action):
for widget in self.veromix.get_sinkoutput_widgets():
if widget.name() == action.text():
self.veromix.pa.move_source_output(self.index, widget.index)
def update_label(self):
text = self.pa_sink.get_nice_text()
bold = self.pa_sink.get_nice_title()
self.set_name(bold)
if self.slider:
self.slider.setText(text)
self.slider.setBoldText(bold)
iconname = self.pa_sink.get_nice_icon()
if iconname == None and "app" in self.pa_sink.props.keys():
iconname = self.veromix.query_application(self.pa_sink.props["app"])
if iconname :
self.mute.setBigIconName(iconname)
self.updateIcon()
def get_assotiated_source(self):
try:
return self.pa_sink.props["source"]
except:
return 0
def on_slider_cb(self, value):
pass
def on_update_meter(self, index, value, number_of_sinks):
# FIXME
pass
#if self.getOutputIndex() == index:
#self.slider.set_meter_value(int(value)
### Drag and Drop
def mousePressEvent(self, event):
self.mouse_pressed = True
def mouseReleaseEvent(self, event):
self.mouse_pressed = False
def mouseMoveEvent(self,e):
if self.mouse_pressed :
self.startDrag(e)
def startDrag(self,event):
drag = QDrag(event.widget())
mimedata = QMimeData()
liste = []
liste.append(QUrl( "veromix://source_output_index:"+str(int(self.index)) ))
mimedata.setUrls(liste)
drag.setMimeData(mimedata)
#drag.setHotSpot(event.pos() - self.rect().topLeft())
dropAction = drag.start(Qt.MoveAction)
| gpl-3.0 |
rajsadho/django | django/core/handlers/wsgi.py | 339 | 9181 | from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
| bsd-3-clause |
eethomas/eucalyptus | clc/eucadmin/eucadmin/registercloudformation.py | 6 | 1541 | # Copyright 2011-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import eucadmin.registerrequest
class RegisterCloudFormation(eucadmin.registerrequest.RegisterRequest):
ServiceName = 'CloudFormation'
Description = 'Register a CloudFormation service.' | gpl-3.0 |
Spanarchie/pyRest | pyRest/lib/python2.7/site-packages/setuptools/command/install_lib.py | 454 | 2486 | from distutils.command.install_lib import install_lib as _install_lib
import os
class install_lib(_install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def _bytecode_filenames (self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
if not py_file.endswith('.py'):
continue
if self.compile:
bytecode_files.append(py_file + "c")
if self.optimize > 0:
bytecode_files.append(py_file + "o")
return bytecode_files
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
exclude = {}
nsp = self.distribution.namespace_packages
if (nsp and self.get_finalized_command('install')
.single_version_externally_managed
):
for pkg in nsp:
parts = pkg.split('.')
while parts:
pkgdir = os.path.join(self.install_dir, *parts)
for f in '__init__.py', '__init__.pyc', '__init__.pyo':
exclude[os.path.join(pkgdir,f)] = 1
parts.pop()
return exclude
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return _install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = _install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| unlicense |
code4futuredotorg/reeborg_tw | src/libraries/brython_old/Lib/_socket.py | 742 | 6431 | """Implementation module for socket operations.
See the socket module for documentation."""
AF_APPLETALK = 16
AF_DECnet = 12
AF_INET = 2
AF_INET6 = 23
AF_IPX = 6
AF_IRDA = 26
AF_SNA = 11
AF_UNSPEC = 0
AI_ADDRCONFIG = 1024
AI_ALL = 256
AI_CANONNAME = 2
AI_NUMERICHOST = 4
AI_NUMERICSERV = 8
AI_PASSIVE = 1
AI_V4MAPPED = 2048
CAPI = '<capsule object "_socket.CAPI" at 0x00BC4F38>'
EAI_AGAIN = 11002
EAI_BADFLAGS = 10022
EAI_FAIL = 11003
EAI_FAMILY = 10047
EAI_MEMORY = 8
EAI_NODATA = 11001
EAI_NONAME = 11001
EAI_SERVICE = 10109
EAI_SOCKTYPE = 10044
INADDR_ALLHOSTS_GROUP = -536870911
INADDR_ANY = 0
INADDR_BROADCAST = -1
INADDR_LOOPBACK = 2130706433
INADDR_MAX_LOCAL_GROUP = -536870657
INADDR_NONE = -1
INADDR_UNSPEC_GROUP = -536870912
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPROTO_ICMP = 1
IPPROTO_IP = 0
IPPROTO_RAW = 255
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPV6_CHECKSUM = 26
IPV6_DONTFRAG = 14
IPV6_HOPLIMIT = 21
IPV6_HOPOPTS = 1
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_LOOP = 11
IPV6_PKTINFO = 19
IPV6_RECVRTHDR = 38
IPV6_RECVTCLASS = 40
IPV6_RTHDR = 32
IPV6_TCLASS = 39
IPV6_UNICAST_HOPS = 4
IPV6_V6ONLY = 27
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_HDRINCL = 2
IP_MULTICAST_IF = 9
IP_MULTICAST_LOOP = 11
IP_MULTICAST_TTL = 10
IP_OPTIONS = 1
IP_RECVDSTADDR = 25
IP_TOS = 3
IP_TTL = 4
MSG_BCAST = 1024
MSG_CTRUNC = 512
MSG_DONTROUTE = 4
MSG_MCAST = 2048
MSG_OOB = 1
MSG_PEEK = 2
MSG_TRUNC = 256
NI_DGRAM = 16
NI_MAXHOST = 1025
NI_MAXSERV = 32
NI_NAMEREQD = 4
NI_NOFQDN = 1
NI_NUMERICHOST = 2
NI_NUMERICSERV = 8
RCVALL_MAX = 3
RCVALL_OFF = 0
RCVALL_ON = 1
RCVALL_SOCKETLEVELONLY = 2
SHUT_RD = 0
SHUT_RDWR = 2
SHUT_WR = 1
SIO_KEEPALIVE_VALS = 2550136836
SIO_RCVALL = 2550136833
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_STREAM = 1
SOL_IP = 0
SOL_SOCKET = 65535
SOL_TCP = 6
SOL_UDP = 17
SOMAXCONN = 2147483647
SO_ACCEPTCONN = 2
SO_BROADCAST = 32
SO_DEBUG = 1
SO_DONTROUTE = 16
SO_ERROR = 4103
SO_EXCLUSIVEADDRUSE = -5
SO_KEEPALIVE = 8
SO_LINGER = 128
SO_OOBINLINE = 256
SO_RCVBUF = 4098
SO_RCVLOWAT = 4100
SO_RCVTIMEO = 4102
SO_REUSEADDR = 4
SO_SNDBUF = 4097
SO_SNDLOWAT = 4099
SO_SNDTIMEO = 4101
SO_TYPE = 4104
SO_USELOOPBACK = 64
class SocketType:
pass
TCP_MAXSEG = 4
TCP_NODELAY = 1
__loader__ = '<_frozen_importlib.ExtensionFileLoader object at 0x00CA2D90>'
def dup(*args,**kw):
"""dup(integer) -> integer
Duplicate an integer socket file descriptor. This is like os.dup(), but for
sockets; on some platforms os.dup() won't work for socket file descriptors."""
pass
class error:
pass
class gaierror:
pass
def getaddrinfo(*args,**kw):
"""getaddrinfo(host, port [, family, socktype, proto, flags]) -> list of (family, socktype, proto, canonname, sockaddr)
Resolve host and port into addrinfo struct."""
pass
def getdefaulttimeout(*args,**kw):
"""getdefaulttimeout() -> timeout
Returns the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
def gethostbyaddr(*args,**kw):
"""gethostbyaddr(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostbyname(*args,**kw):
"""gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host."""
pass
def gethostbyname_ex(*args,**kw):
"""gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostname(*args,**kw):
"""gethostname() -> string
Return the current host name."""
pass
def getnameinfo(*args,**kw):
"""getnameinfo(sockaddr, flags) --> (host, port)
Get host and port for a sockaddr."""
pass
def getprotobyname(*args,**kw):
"""getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)"""
pass
def getservbyname(*args,**kw):
"""getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
def getservbyport(*args,**kw):
"""getservbyport(port[, protocolname]) -> string
Return the service name from a port number and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
has_ipv6 = True
class herror:
pass
def htonl(*args,**kw):
"""htonl(integer) -> integer
Convert a 32-bit integer from host to network byte order."""
pass
def htons(*args,**kw):
"""htons(integer) -> integer
Convert a 16-bit integer from host to network byte order."""
pass
def inet_aton(*args,**kw):
"""inet_aton(string) -> bytes giving packed 32-bit IP representation
Convert an IP address in string format (123.45.67.89) to the 32-bit packed
binary format used in low-level network functions."""
pass
def inet_ntoa(*args,**kw):
"""inet_ntoa(packed_ip) -> ip_address_string
Convert an IP address from 32-bit packed binary format to string format"""
pass
def ntohl(*args,**kw):
"""ntohl(integer) -> integer
Convert a 32-bit integer from network to host byte order."""
pass
def ntohs(*args,**kw):
"""ntohs(integer) -> integer
Convert a 16-bit integer from network to host byte order."""
pass
def setdefaulttimeout(*args,**kw):
"""setdefaulttimeout(timeout)
Set the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
class socket:
def __init__(self,*args,**kw):
pass
def bind(self,*args,**kw):
pass
def close(self):
pass
class timeout:
pass
| agpl-3.0 |
happy56/kivy | kivy/uix/anchorlayout.py | 3 | 3069 | '''
Anchor Layout
=============
.. only:: html
.. image:: images/anchorlayout.gif
:align: right
.. only:: latex
.. image:: images/anchorlayout.png
:align: right
:class:`AnchorLayout` aligns children to a border (top, bottom, left, right)
or center.
To draw a button in the lower-right corner::
layout = AnchorLayout(
anchor_x='right', anchor_y='bottom')
btn = Button(text='Hello World')
layout.add_widget(btn)
'''
__all__ = ('AnchorLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, OptionProperty
class AnchorLayout(Layout):
'''Anchor layout class. See module documentation for more information.
'''
padding = NumericProperty(0)
'''Padding between widget box and children, in pixels.
:data:`padding` is a :class:`~kivy.properties.NumericProperty`, default to
0.
'''
anchor_x = OptionProperty('center', options=(
'left', 'center', 'right'))
'''Horizontal anchor.
:data:`anchor_x` is an :class:`~kivy.properties.OptionProperty`, default
to 'center'. Can take a value of 'left', 'center' or 'right'.
'''
anchor_y = OptionProperty('center', options=(
'top', 'center', 'bottom'))
'''Vertical anchor.
:data:`anchor_y` is an :class:`~kivy.properties.OptionProperty`, default
to 'center'. Can take a value of 'top', 'center' or 'bottom'.
'''
def __init__(self, **kwargs):
super(AnchorLayout, self).__init__(**kwargs)
self.bind(
children=self._trigger_layout,
parent=self._trigger_layout,
padding=self._trigger_layout,
anchor_x=self._trigger_layout,
anchor_y=self._trigger_layout,
size=self._trigger_layout,
pos=self._trigger_layout)
def do_layout(self, *largs):
_x, _y = self.pos
width = self.width
height = self.height
anchor_x = self.anchor_x
anchor_y = self.anchor_y
padding = self.padding
for c in self.children:
x, y = _x, _y
w, h = c.size
if c.size_hint[0]:
w = c.size_hint[0] * width
elif not self.size_hint[0]:
width = max(width, c.width)
if c.size_hint[1]:
h = c.size_hint[1] * height
elif not self.size_hint[1]:
height = max(height, c.height)
if anchor_x == 'left':
x = x + padding
if anchor_x == 'right':
x = x + width - (w + padding)
if self.anchor_x == 'center':
x = x + (width / 2) - (w / 2)
if anchor_y == 'bottom':
y = y + padding
if anchor_y == 'top':
y = y + height - (h + padding)
if anchor_y == 'center':
y = y + (height / 2) - (h / 2)
c.x = x
c.y = y
c.width = w
c.height = h
self.size = (width, height) # might have changed inside loop
| lgpl-3.0 |
atchariya/django-extensions | tests/test_encrypted_fields.py | 24 | 10268 | import tempfile
from contextlib import contextmanager
import django
import pytest
from django.conf import settings
from django.db import connection, models
from django.test import TestCase
from .testapp.models import Secret
# Only perform encrypted fields tests if keyczar is present. Resolves
# http://github.com/django-extensions/django-extensions/issues/#issue/17
try:
from django_extensions.db.fields.encrypted import EncryptedTextField, EncryptedCharField # NOQA
from keyczar import keyczar, keyczart, keyinfo # NOQA
keyczar_active = True
except ImportError:
keyczar_active = False
# Locations of both private and public keys.
KEY_LOCS = {}
@pytest.fixture(scope="class")
def keyczar_keys(request):
# If KeyCzar is available, set up the environment.
if keyczar_active:
# Create an RSA private key.
keys_dir = tempfile.mkdtemp("django_extensions_tests_keyzcar_rsa_dir")
keyczart.Create(keys_dir, "test", keyinfo.DECRYPT_AND_ENCRYPT, asymmetric=True)
keyczart.AddKey(keys_dir, "PRIMARY", size=4096)
KEY_LOCS['DECRYPT_AND_ENCRYPT'] = keys_dir
# Create an RSA public key.
pub_dir = tempfile.mkdtemp("django_extensions_tests_keyzcar_pub_dir")
keyczart.PubKey(keys_dir, pub_dir)
KEY_LOCS['ENCRYPT'] = pub_dir
# cleanup crypto key temp dirs
def cleanup():
import shutil
for name, path in KEY_LOCS.items():
shutil.rmtree(path)
request.addfinalizer(cleanup)
@contextmanager
def keys(purpose, mode=None):
"""
A context manager that sets up the correct KeyCzar environment for a test.
Arguments:
purpose: Either keyczar.keyinfo.DECRYPT_AND_ENCRYPT or
keyczar.keyinfo.ENCRYPT.
mode: If truthy, settings.ENCRYPTED_FIELD_MODE will be set to (and then
reverted from) this value. If falsy, settings.ENCRYPTED_FIELD_MODE
will not be changed. Optional. Default: None.
Yields:
A Keyczar subclass for the stated purpose. This will be keyczar.Crypter
for DECRYPT_AND_ENCRYPT or keyczar.Encrypter for ENCRYPT. In addition,
settings.ENCRYPTED_FIELD_KEYS_DIR will be set correctly, and then
reverted when the manager exits.
"""
# Store the original settings so we can restore when the manager exits.
orig_setting_dir = getattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR', None)
orig_setting_mode = getattr(settings, 'ENCRYPTED_FIELD_MODE', None)
try:
if mode:
settings.ENCRYPTED_FIELD_MODE = mode
if purpose == keyinfo.DECRYPT_AND_ENCRYPT:
settings.ENCRYPTED_FIELD_KEYS_DIR = KEY_LOCS['DECRYPT_AND_ENCRYPT']
yield keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
else:
settings.ENCRYPTED_FIELD_KEYS_DIR = KEY_LOCS['ENCRYPT']
yield keyczar.Encrypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
except:
raise # Reraise any exceptions.
finally:
# Restore settings.
settings.ENCRYPTED_FIELD_KEYS_DIR = orig_setting_dir
if mode:
if orig_setting_mode:
settings.ENCRYPTED_FIELD_MODE = orig_setting_mode
else:
del settings.ENCRYPTED_FIELD_MODE
@contextmanager
def secret_model():
"""
A context manager that yields a Secret model defined at runtime.
All EncryptedField init logic occurs at model class definition time, not at
object instantiation time. This means that in order to test different keys
and modes, we must generate a new class definition at runtime, after
establishing the correct KeyCzar settings. This context manager handles
that process.
See http://dynamic-models.readthedocs.org/en/latest/ and
https://docs.djangoproject.com/en/dev/topics/db/models/
#differences-between-proxy-inheritance-and-unmanaged-models
"""
try:
# Create a new class that shadows tests.models.Secret.
attrs = {
'name': EncryptedCharField("Name", max_length=Secret._meta.get_field('name').max_length),
'text': EncryptedTextField("Text"),
'__module__': 'tests.testapp.models',
'Meta': type('Meta', (object, ), {
'managed': False,
'db_table': Secret._meta.db_table
})
}
yield type('Secret', (models.Model, ), attrs)
except:
raise # Reraise any exceptions.
@pytest.mark.skipif(keyczar_active is False or django.VERSION < (1, 7),
reason="Encrypted fields needs that keyczar is installed")
@pytest.mark.usefixtures("admin_user", "keyczar_keys")
class EncryptedFieldsTestCase(TestCase):
def test_char_field_create(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is encrypted in the database and can be decrypted.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT) as crypt:
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
cursor = connection.cursor()
query = "SELECT name FROM %s WHERE id = %d" % (model._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def test_char_field_read(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is decrypted when reading the value back from the
model.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.name)
def test_text_field_create(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is encrypted in the database and can be decrypted.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT) as crypt:
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(text=test_val)
cursor = connection.cursor()
query = "SELECT text FROM %s WHERE id = %d" % (model._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def test_text_field_read(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is decrypted when reading the value back from the
model.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(text=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.text)
def test_cannot_decrypt(self):
"""
Uses a public key to encrypt data on model creation.
Verifies that the data cannot be decrypted using the same key.
"""
with keys(keyinfo.ENCRYPT, mode=keyinfo.ENCRYPT.name):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertNotEqual(test_val, retrieved_secret.name)
self.assertTrue(retrieved_secret.name.startswith(EncryptedCharField.prefix))
def test_unacceptable_purpose(self):
"""
Tries to create an encrypted field with a mode mismatch.
A purpose of "DECRYPT_AND_ENCRYPT" cannot be used with a public key,
since public keys cannot be used for decryption. This should raise an
exception.
"""
with self.assertRaises(keyczar.errors.KeyczarError):
with keys(keyinfo.ENCRYPT):
with secret_model():
# A KeyCzar exception should get raised during class
# definition time, so any code in here would never get run.
pass
def test_decryption_forbidden(self):
"""
Uses a private key to encrypt data, but decryption is not allowed.
ENCRYPTED_FIELD_MODE is explicitly set to ENCRYPT, meaning data should
not be decrypted, even though the key would allow for it.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT, mode=keyinfo.ENCRYPT.name):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertNotEqual(test_val, retrieved_secret.name)
self.assertTrue(retrieved_secret.name.startswith(EncryptedCharField.prefix))
def test_encrypt_public_decrypt_private(self):
"""
Uses a public key to encrypt, and a private key to decrypt data.
"""
test_val = "Test Secret"
# First, encrypt data with public key and save to db.
with keys(keyinfo.ENCRYPT, mode=keyinfo.ENCRYPT.name):
with secret_model() as model:
secret = model.objects.create(name=test_val)
enc_retrieved_secret = model.objects.get(id=secret.id)
self.assertNotEqual(test_val, enc_retrieved_secret.name)
self.assertTrue(enc_retrieved_secret.name.startswith(EncryptedCharField.prefix))
# Next, retrieve data from db, and decrypt with private key.
with keys(keyinfo.DECRYPT_AND_ENCRYPT):
with secret_model() as model:
retrieved_secret = model.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.name)
| mit |
cmpgamer/Sprint3 | recommender.py | 2 | 9706 | import codecs
from math import sqrt
class recommender:
def __init__(self, k=1, n=5):
self.k = k
self.n = n
self.data = {}
self.username2id = {}
self.userid2name = {}
self.productid2name = {}
self.cardinality = {}
self.slopeOneDeviations = {}
#Grab a bunch of info from the CSV
self.artists = {}
self.users = {}
self.normalizedData = {}
self.similarity = {}
self.frequencies = {}
self.deviations = {}
# for some reason I want to save the name of the metric
def convertProductID2name(self, id):
if id in self.productid2name:
return self.productid2name[id]
else:
return id
def userRatings(self, id, n):
"""Return n top ratings for user with id"""
print ("Ratings for " + self.userid2name[id])
ratings = self.data[id]
print(len(ratings))
ratings = list(ratings.items())[:n]
ratings = [(self.convertProductID2name(k), v) for (k, v) in ratings]
# finally sort and return
ratings.sort(key=lambda artistTuple: artistTuple[1], reverse = True)
for rating in ratings:
print("%s\t%i" % (rating[0], rating[1]))
def showUserTopItems(self, user, n):
""" show top n items for user"""
items = list(self.data[user].items())
items.sort(key=lambda itemTuple: itemTuple[1], reverse=True)
for i in range(n):
print("%s\t%i" % (self.convertProductID2name(items[i][0]),items[i][1]))
def computeDeviations(self):
# for each person in the data:
# get their ratings
for ratings in self.data.values():
# for each item & rating in that set of ratings:
for (item, rating) in ratings.items():
self.frequencies.setdefault(item, {})
self.deviations.setdefault(item, {})
# for each item2 & rating2 in that set of ratings:
for (item2, rating2) in ratings.items():
if item != item2:
# add the difference between the ratings to our
# computation
self.frequencies[item].setdefault(item2, 0)
self.deviations[item].setdefault(item2, 0.0)
self.frequencies[item][item2] += 1
self.deviations[item][item2] += rating - rating2
for (item, ratings) in self.deviations.items():
for item2 in ratings:
ratings[item2] /= self.frequencies[item][item2]
def pearson(self, rating1, rating2):
sum_xy = 0
sum_x = 0
sum_y = 0
sum_x2 = 0
sum_y2 = 0
n = 0
for key in rating1:
if key in rating2:
n += 1
x = rating1[key]
y = rating2[key]
sum_xy += x * y
sum_x += x
sum_y += y
sum_x2 += pow(x, 2)
sum_y2 += pow(y, 2)
if n == 0:
return 0
# now compute denominator
denominator = sqrt(sum_x2 - pow(sum_x, 2) / n) * sqrt(sum_y2 - pow(sum_y, 2) / n)
if denominator == 0:
return 0
else:
return (sum_xy - (sum_x * sum_y) / n) / denominator
def computeNearestNeighbor(self, username):
distances = []
for instance in self.data:
if instance != username:
distance = self.manhattan(self.data[username], self.data[instance])
distances.append((instance, distance))
# sort based on distance -- closest first
distances.sort(key=lambda artistTuple: artistTuple[1], reverse=True)
return distances
def recommend(self, user):
print("we got here")
"""Give list of recommendations"""
recommendations = {}
# first get list of users ordered by nearness
nearest = self.computeNearestNeighbor(user)
#
# now get the ratings for the user
#
userRatings = self.data[user]
#
# determine the total distance
totalDistance = 0.0
for i in range(self.k):
totalDistance += nearest[i][1]
# now iterate through the k nearest neighbors
# accumulating their ratings
for i in range(self.k):
# compute slice of pie
weight = nearest[i][1] / totalDistance
# get the name of the person
name = nearest[i][0]
# get the ratings for this person
neighborRatings = self.data[name]
# get the name of the person
# now find bands neighbor rated that user didn't
for artist in neighborRatings:
if not artist in userRatings:
if artist not in recommendations:
recommendations[artist] = neighborRatings[artist] * weight
else:
recommendations[artist] = recommendations[artist] + neighborRatings[artist] * weight
# now make list from dictionary and only get the first n items
recommendations = list(recommendations.items())[:self.n]
recommendations = [(self.convertProductID2name(k), v) for (k, v) in recommendations]
# finally sort and return
recommendations.sort(key=lambda artistTuple: artistTuple[1], reverse = True)
return recommendations
def manhattan(self, rating1, rating2):
"""Computes the Manhattan distance. Both rating1 and rating2 are dictionaries
of the form {'The Strokes': 3.0, 'Slightly Stoopid': 2.5}"""
distance = 0
commonRatings = False
for key in rating1:
if key in rating2:
distance += abs(rating1[key] - rating2[key])
commonRatings = True
if commonRatings:
return distance
else:
return -1 #Indicates no ratings in common
def euclidean(self, rating1, rating2):
"""Computes the euclidean distance. Both rating1 and rating2 are dictionaries
of the form {'The Strokes': 3.0, 'Slightly Stoopid': 2.5}"""
totalDistance = 0
distance = 0
commonRatings = False
for key in rating1:
if key in rating2:
distance = abs(rating1[key] - rating2[key])
totalDistance += pow(distance, 2)
commonRatings = True
if commonRatings:
return pow(totalDistance, .5)
else:
return -1 #Indicates no ratings in common
def computeCosineSimilarity(self):
averages = {}
similarity = {}
#We need the averages for each user for the numerator
for userItem, ratings in self.data.items():
userAvg = sum(ratings.values())/len(ratings)
averages[userItem] = userAvg
for user, value in self.data.items():
#time to do the denominator
for band1 in value:
newTuple = {}
for band2 in value:
numerator = 0
denominator1 = 0
denominator2 = 0
if band1 is band2:
continue
for userItem, ratings in self.data.items():
if band1 in ratings and band2 in ratings:
userAvg = averages[userItem]
numerator += (ratings[band1] - userAvg) * (ratings[band2] - userAvg)
denominator1 += (ratings[band1] - userAvg)**2
denominator2 += (ratings[band2] - userAvg)**2
finalD = (sqrt(denominator1) * sqrt(denominator2))
try:
newTuple[band2] = numerator/(sqrt(denominator1) * sqrt(denominator2))
except:
newTuple[band2] = 0
similarity[band1] = newTuple
self.similarity = similarity
#print(similarity)
def normalizeData(self, minNum, maxNum):
normalized = {}
listOfUsers = self.data.items()
for userItem, ratings in listOfUsers:
newTuple = {}
for band in self.data:
if band in ratings:
normalRate = (2*(ratings[band] - minNum) - (maxNum - minNum))/(maxNum - minNum)
newTuple[band] = normalRate
normalized[userItem] = newTuple
self.normalizedData = normalized
#print(normalized)
def recommendCosine(self, minNum, maxNum):
normalized = self.normalizedData
similarity = self.similarity
finalRatings = {}
for user, userRatings in normalized.items():
finalRatings[user] = {}
for artist in self.data:
if not artist in userRatings:
numerator = 0
denominator = 0
for otherArtist in self.data:
if otherArtist in userRatings:
numerator += (userRatings[otherArtist] * similarity[artist][otherArtist])
denominator += abs(similarity[artist][otherArtist])
finalRatings[user][artist] = .5*((numerator/denominator)+ 1)*(maxNum-minNum)+minNum
print(finalRatings) | mit |
mush42/mezzanine | mezzanine/blog/templatetags/blog_tags.py | 39 | 3206 | from __future__ import unicode_literals
from datetime import datetime
from django.contrib.auth import get_user_model
from django.db.models import Count, Q
from mezzanine.blog.forms import BlogPostForm
from mezzanine.blog.models import BlogPost, BlogCategory
from mezzanine.generic.models import Keyword
from mezzanine import template
User = get_user_model()
register = template.Library()
@register.as_tag
def blog_months(*args):
"""
Put a list of dates for blog posts into the template context.
"""
dates = BlogPost.objects.published().values_list("publish_date", flat=True)
date_dicts = [{"date": datetime(d.year, d.month, 1)} for d in dates]
month_dicts = []
for date_dict in date_dicts:
if date_dict not in month_dicts:
month_dicts.append(date_dict)
for i, date_dict in enumerate(month_dicts):
month_dicts[i]["post_count"] = date_dicts.count(date_dict)
return month_dicts
@register.as_tag
def blog_categories(*args):
"""
Put a list of categories for blog posts into the template context.
"""
posts = BlogPost.objects.published()
categories = BlogCategory.objects.filter(blogposts__in=posts)
return list(categories.annotate(post_count=Count("blogposts")))
@register.as_tag
def blog_authors(*args):
"""
Put a list of authors (users) for blog posts into the template context.
"""
blog_posts = BlogPost.objects.published()
authors = User.objects.filter(blogposts__in=blog_posts)
return list(authors.annotate(post_count=Count("blogposts")))
@register.as_tag
def blog_recent_posts(limit=5, tag=None, username=None, category=None):
"""
Put a list of recently published blog posts into the template
context. A tag title or slug, category title or slug or author's
username can also be specified to filter the recent posts returned.
Usage::
{% blog_recent_posts 5 as recent_posts %}
{% blog_recent_posts limit=5 tag="django" as recent_posts %}
{% blog_recent_posts limit=5 category="python" as recent_posts %}
{% blog_recent_posts 5 username=admin as recent_posts %}
"""
blog_posts = BlogPost.objects.published().select_related("user")
title_or_slug = lambda s: Q(title=s) | Q(slug=s)
if tag is not None:
try:
tag = Keyword.objects.get(title_or_slug(tag))
blog_posts = blog_posts.filter(keywords__keyword=tag)
except Keyword.DoesNotExist:
return []
if category is not None:
try:
category = BlogCategory.objects.get(title_or_slug(category))
blog_posts = blog_posts.filter(categories=category)
except BlogCategory.DoesNotExist:
return []
if username is not None:
try:
author = User.objects.get(username=username)
blog_posts = blog_posts.filter(user=author)
except User.DoesNotExist:
return []
return list(blog_posts[:limit])
@register.inclusion_tag("admin/includes/quick_blog.html", takes_context=True)
def quick_blog(context):
"""
Admin dashboard tag for the quick blog form.
"""
context["form"] = BlogPostForm()
return context
| bsd-2-clause |
goodwinnk/intellij-community | python/helpers/pydev/tests_pydevd/test_check_pydevconsole.py | 23 | 4608 | import threading
import unittest
import os
import pytest
import pydevconsole
from _pydev_bundle.pydev_imports import xmlrpclib, SimpleXMLRPCServer
from _pydev_bundle.pydev_localhost import get_localhost
try:
raw_input
raw_input_name = 'raw_input'
except NameError:
raw_input_name = 'input'
try:
from IPython import core # @UnusedImport
has_ipython = True
except:
has_ipython = False
#=======================================================================================================================
# Test
#=======================================================================================================================
@pytest.mark.skipif(os.environ.get('TRAVIS') == 'true' or not has_ipython, reason='Too flaky on Travis (and requires IPython).')
class Test(unittest.TestCase):
def start_client_thread(self, client_port):
class ClientThread(threading.Thread):
def __init__(self, client_port):
threading.Thread.__init__(self)
self.client_port = client_port
def run(self):
class HandleRequestInput:
def RequestInput(self):
client_thread.requested_input = True
return 'RequestInput: OK'
def NotifyFinished(self, *args, **kwargs):
client_thread.notified_finished += 1
return 1
handle_request_input = HandleRequestInput()
from _pydev_bundle import pydev_localhost
self.client_server = client_server = SimpleXMLRPCServer((pydev_localhost.get_localhost(), self.client_port), logRequests=False)
client_server.register_function(handle_request_input.RequestInput)
client_server.register_function(handle_request_input.NotifyFinished)
client_server.serve_forever()
def shutdown(self):
return
self.client_server.shutdown()
client_thread = ClientThread(client_port)
client_thread.requested_input = False
client_thread.notified_finished = 0
client_thread.setDaemon(True)
client_thread.start()
return client_thread
def get_free_addresses(self):
from _pydev_bundle.pydev_localhost import get_socket_names
socket_names = get_socket_names(2, close=True)
return [socket_name[1] for socket_name in socket_names]
def test_server(self):
# Just making sure that the singleton is created in this thread.
from _pydev_bundle.pydev_ipython_console_011 import get_pydev_frontend
get_pydev_frontend(get_localhost(), 0)
client_port, server_port = self.get_free_addresses()
class ServerThread(threading.Thread):
def __init__(self, client_port, server_port):
threading.Thread.__init__(self)
self.client_port = client_port
self.server_port = server_port
def run(self):
from _pydev_bundle import pydev_localhost
print('Starting server with:', pydev_localhost.get_localhost(), self.server_port, self.client_port)
pydevconsole.start_server(pydev_localhost.get_localhost(), self.server_port, self.client_port)
server_thread = ServerThread(client_port, server_port)
server_thread.setDaemon(True)
server_thread.start()
client_thread = self.start_client_thread(client_port) #@UnusedVariable
try:
import time
time.sleep(.3) #let's give it some time to start the threads
from _pydev_bundle import pydev_localhost
server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), server_port))
server.execLine("import sys; print('Running with: %s %s' % (sys.executable or sys.platform, sys.version))")
server.execLine('class Foo:')
server.execLine(' pass')
server.execLine('')
server.execLine('foo = Foo()')
server.execLine('a = %s()' % raw_input_name)
initial = time.time()
while not client_thread.requested_input:
if time.time() - initial > 2:
raise AssertionError('Did not get the return asked before the timeout.')
time.sleep(.1)
frame_xml = server.getFrame()
self.assertTrue('RequestInput' in frame_xml, 'Did not fid RequestInput in:\n%s' % (frame_xml,))
finally:
client_thread.shutdown()
| apache-2.0 |
dbadb/2016-Stronghold | src/python/robot/commands/portcullisCmds.py | 1 | 5529 | from wpilib.command import CommandGroup
from wpilib.command import Command
# portcullis groups -------------------------------------------------
# A command group requires all of the subsystems that each member requires.
k_timeOut = 2
class MoveUp(CommandGroup):
def __init__(self, robot, name=None):
super().__init__(name)
self.addParallel(MoveUpRight(robot, "pur", k_timeOut))
self.addParallel(MoveUpLeft(robot, "pul", k_timeOut))
class MoveDown(CommandGroup):
def __init__(self, robot, name=None):
super().__init__(name)
self.addParallel(MoveDownRight(robot, "pdr", k_timeOut))
self.addParallel(MoveDownLeft(robot, "pdr", k_timeOut))
# portcullis commands ---------------------------------------------
class MoveUpRight(Command):
def __init__(self, robot, name=None, timeout=None):
super().__init__(name, timeout)
self.robot = robot
self.portcullis = robot.portcullis
self.requires(self.portcullis)
def initialize(self): # called just before command runs the first time
pass
def execute(self): # called repeatedly while scheduled
self.portcullis.moveRightUp()
def isFinished(self):
return self.portcullis.rightAtTop()
def end(self): # called once after isFinished returns true
self.portcullis.stopRight()
def interrupted(self):
self.robot.log("portcullis.MoveUpRight interrupted")
end()
class MoveDownRight(Command):
def __init__(self, robot, name=None, timeout=None):
super().__init__(name, timeout)
self.robot = robot
self.portcullis = robot.portcullis
self.requires(self.portcullis)
def initialize(self): # called just before command runs the first time
pass
def execute(self): # called repeatedly while scheduled
self.portcullis.moveRightDown()
def isFinished(self):
return self.portcullis.rightAtBottom()
def end(self): # called once after isFinished returns true
self.portcullis.stopRight()
def interrupted(self):
self.robot.log("portcullis.MoveDownRight interrupted")
end()
class MoveUpLeft(Command):
def __init__(self, robot, name=None, timeout=None):
super().__init__(name, timeout)
self.robot = robot
self.portcullis = robot.portcullis
self.requires(self.portcullis)
def initialize(self): # called just before command runs the first time
pass
def execute(self): # called repeatedly while scheduled
self.portcullis.moveLeftUp()
def isFinished(self):
return self.portcullis.leftAtTop()
def end(self): # called once after isFinished returns true
self.portcullis.stopLeft()
def interrupted(self):
self.robot.log("portcullis.MoveUpLeft interrupted")
end()
class MoveDownLeft(Command):
def __init__(self, robot, name=None, timeout=None):
super().__init__(name, timeout)
self.robot = robot
self.portcullis = robot.portcullis
self.requires(self.portcullis)
def initialize(self): # called just before command runs the first time
pass
def execute(self): # called repeatedly while scheduled
self.portcullis.moveLeftDown()
def isFinished(self):
return self.portcullis.leftAtTop()
def end(self): # called once after isFinished returns true
self.portcullis.stopLeft()
def interrupted(self):
self.robot.log("portcullis.MoveDownLeft interrupted")
end()
class BarIn(Command):
def __init__(self, robot, name=None, timeout=None):
super().__init__(name, timeout)
self.robot = robot
self.portcullis = robot.portcullis
self.requires(self.portcullis)
def initialize(self): # called just before command runs the first time
pass
def execute(self): # called repeatedly while scheduled
self.portcullis.barIn()
def isFinished(self):
return False
def end(self): # called once after isFinished returns true
self.portcullis.stopBar()
def interrupted(self):
self.robot.log("portcullis.BardIn interrupted")
end()
class BarOut(Command):
def __init__(self, robot, name=None, timeout=None):
super().__init__(name, timeout)
self.robot = robot
self.portcullis = robot.portcullis
self.requires(self.portcullis)
def initialize(self): # called just before command runs the first time
pass
def execute(self): # called repeatedly while scheduled
self.portcullis.barOut()
def isFinished(self):
return False
def end(self): # called once after isFinished returns true
self.portcullis.stopBar()
def interrupted(self):
self.robot.log("portcullis.BarOut interrupted")
end()
class BarStop(Command):
def __init__(self, robot, name=None, timeout=None):
super().__init__(name, timeout)
self.robot = robot
self.portcullis = robot.portcullis
self.requires(self.portcullis)
def initialize(self): # called just before command runs the first time
pass
def execute(self): # called repeatedly while scheduled
self.portcullis.stopBar()
def isFinished(self):
return True
def end(self): # called once after isFinished returns true
self.portcullis.stopBar()
def interrupted(self):
self.robot.log("portcullis.BarStop interrupted")
end()
| mit |
Johnetordoff/osf.io | scripts/analytics/base.py | 14 | 7997 | import time
import logging
import argparse
import importlib
from datetime import datetime, timedelta
from dateutil.parser import parse
from django.utils import timezone
from website.app import init_app
from website.settings import KEEN as keen_settings
from keen.client import KeenClient
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class BaseAnalytics(object):
@property
def collection_name(self):
raise NotImplementedError('Must specify a Keen event collection name')
@property
def analytic_type(self):
raise NotImplementedError('Must specify the analytic type for logging purposes')
def get_events(self, date):
raise NotImplementedError('You must define a get_events method to gather analytic events')
def send_events(self, events):
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if keen_project and write_key:
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
logger.info('Adding {} events to the {} collection'.format(len(events), self.collection_name))
client.add_events({self.collection_name: events})
else:
logger.info('Keen not enabled - would otherwise be adding the following {} events to the {} collection'.format(len(events), self.collection_name))
print(events)
class SnapshotAnalytics(BaseAnalytics):
@property
def analytic_type(self):
return 'snapshot'
def get_events(self, date=None):
if date:
raise AttributeError('Snapshot analytics may not be called with a date.')
logger.info('Gathering {} analytics for the {} collection'.format(self.analytic_type, self.collection_name))
class SummaryAnalytics(BaseAnalytics):
@property
def analytic_type(self):
return 'summary'
def get_events(self, date):
# Date must be specified, must be a date (not a datetime), and must not be today or in the future
if not date:
raise AttributeError('Script must be called with a date to gather analytics.')
today = timezone.now().date()
if date >= today:
raise AttributeError('Script cannot be called for the same day, or for a date in the future.')
if type(date) != type(today):
raise AttributeError('Please call the script using a date object, not a datetime object')
logger.info('Gathering {} analytics for the {} collection for {}'.format(
self.analytic_type,
self.collection_name,
date.isoformat()
))
def parse_args(self):
parser = argparse.ArgumentParser(
description='Enter the date to gather {} analytics for the {} collection'.format(
self.analytic_type,
self.collection_name
)
)
parser.add_argument('-d', '--date', dest='date')
parser.add_argument('-y', '--yesterday', dest='yesterday', action='store_true')
return parser.parse_args()
class EventAnalytics(SummaryAnalytics):
@property
def analytic_type(self):
return 'event'
def yield_chunked_events(self, events):
""" The keen API likes events in chunks no bigger than 5000 -
Only yield that many at a time.
"""
for i in range(0, len(events), 5000):
yield events[i:i + 5000]
def send_events(self, events):
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if keen_project and write_key:
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
logger.info('Adding {} events to the {} collection'.format(len(events), self.collection_name))
for chunk in self.yield_chunked_events(events):
client.add_events({self.collection_name: chunk})
time.sleep(1)
else:
logger.info(
'Keen not enabled - would otherwise be adding the following {} events to the {} collection'.format(
len(events), self.collection_name
)
)
print(events)
class BaseAnalyticsHarness(object):
def __init__(self):
init_app(routes=False)
@property
def analytics_classes(self):
raise NotImplementedError('Please specify a default set of classes to run with this analytics harness')
def parse_args(self):
parser = argparse.ArgumentParser(description='Populate keen analytics!')
parser.add_argument(
'-as', '--analytics_scripts', nargs='+', dest='analytics_scripts', required=False,
help='Enter the names of scripts inside scripts/analytics you would like to run separated by spaces (ex: -as user_summary node_summary)'
)
return parser.parse_args()
def try_to_import_from_args(self, entered_scripts):
imported_script_classes = []
for script in entered_scripts:
try:
script_events = importlib.import_module('scripts.analytics.{}'.format(script))
imported_script_classes.append(script_events.get_class())
except (ImportError, NameError) as e:
logger.error(e)
logger.error(
'Error importing script - make sure the script specified is inside of scripts/analytics. '
'Also make sure the main analytics class name is the same as the script name but in camel case. '
'For example, the script named scripts/analytics/addon_snapshot.py has class AddonSnapshot'
)
return imported_script_classes
def main(self, command_line=True):
analytics_classes = self.analytics_classes
if command_line:
args = self.parse_args()
if args.analytics_scripts:
analytics_classes = self.try_to_import_from_args(args.analytics_scripts)
for analytics_class in analytics_classes:
class_instance = analytics_class()
events = class_instance.get_events()
class_instance.send_events(events)
class DateAnalyticsHarness(BaseAnalyticsHarness):
def parse_args(self):
parser = argparse.ArgumentParser(description='Populate keen analytics!')
parser.add_argument(
'-as', '--analytics_scripts', nargs='+', dest='analytics_scripts', required=False,
help='Enter the names of scripts inside scripts/analytics you would like to run separated by spaces (ex: -as user_summary node_summary)'
)
parser.add_argument('-d', '--date', dest='date', required=False)
parser.add_argument('-y', '--yesterday', dest='yesterday', action='store_true')
return parser.parse_args()
def main(self, date=None, yesterday=False, command_line=True):
analytics_classes = self.analytics_classes
if yesterday:
date = (timezone.now() - timedelta(days=1)).date()
if command_line:
args = self.parse_args()
if args.yesterday:
date = (timezone.now() - timedelta(days=1)).date()
if not date:
try:
date = parse(args.date).date()
except AttributeError:
raise AttributeError('You must either specify a date or use the yesterday argument to gather analytics for yesterday.')
if args.analytics_scripts:
analytics_classes = self.try_to_import_from_args(args.analytics_scripts)
for analytics_class in analytics_classes:
class_instance = analytics_class()
events = class_instance.get_events(date)
class_instance.send_events(events)
| apache-2.0 |
martindale/elements | share/rpcuser/rpcuser.py | 115 | 1110 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to bitcoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| mit |
davidlmorton/spikepy | spikepy/common/errors.py | 1 | 2270 | # Copyright (C) 2012 David Morton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from exceptions import Exception
class SpikepyError(Exception):
'''
Base class for exceptions in Spikepy.
'''
pass
class FeatureDimensionalityError(SpikepyError):
pass
class DataUnavailableError(SpikepyError):
pass
class NoClustersError(SpikepyError):
pass
class NoCurrentStrategyError(SpikepyError):
pass
class NoTasksError(SpikepyError):
pass
class ImpossibleTaskError(SpikepyError):
pass
class TaskCreationError(SpikepyError):
pass
class CannotMarkTrialError(SpikepyError):
pass
class FileInterpretationError(SpikepyError):
pass
class ConfigError(SpikepyError):
pass
class ResourceLockedError(SpikepyError):
pass
class ResourceNotLockedError(SpikepyError):
pass
class InvalidLockingKeyError(SpikepyError):
pass
class AddResourceError(SpikepyError):
pass
class MissingTrialError(SpikepyError):
pass
class MissingResourceError(SpikepyError):
pass
class InvalidValueError(SpikepyError):
pass
class UnknownStageError(SpikepyError):
pass
class MissingPluginError(SpikepyError):
pass
class PluginDefinitionError(SpikepyError):
pass
class UnknownCategoryError(SpikepyError):
pass
class DuplicateStrategyError(SpikepyError):
pass
class MissingStrategyError(SpikepyError):
pass
class ArgumentTypeError(SpikepyError):
pass
class InconsistentNameError(SpikepyError):
pass
class NameForbiddenError(SpikepyError):
pass
class SettingsNameForbiddenError(NameForbiddenError):
pass
class MethodsUsedNameForbiddenError(NameForbiddenError):
pass
| gpl-3.0 |
jbenden/ansible | lib/ansible/modules/network/nxos/nxos_vrf_interface.py | 4 | 8095 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vrf_interface
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages interface specific VRF configuration.
description:
- Manages interface specific VRF configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- VRF needs to be added globally with M(nxos_vrf) before
adding a VRF to an interface.
- Remove a VRF from an interface will still remove
all L3 attributes just as it does from CLI.
- VRF is not read from an interface until IP address is
configured on that interface.
options:
vrf:
description:
- Name of VRF to be managed.
required: true
interface:
description:
- Full name of interface to be managed, i.e. Ethernet1/1.
required: true
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrf ntc exists on Eth1/1
nxos_vrf_interface:
vrf: ntc
interface: Ethernet1/1
state: present
- name: Ensure ntc VRF does not exist on Eth1/1
nxos_vrf_interface:
vrf: ntc
interface: Ethernet1/1
state: absent
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface loopback16", "vrf member ntc"]
'''
import re
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
return run_commands(module, cmds)[0]
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'loopback' or intf_type == 'svi':
mode = 'layer3'
return mode
def get_vrf_list(module):
command = 'show vrf all'
vrf_list = []
body = execute_show_command(command, module)
try:
vrf_table = body['TABLE_vrf']['ROW_vrf']
except (KeyError, AttributeError):
return vrf_list
for each in vrf_table:
vrf_list.append(str(each['vrf_name']))
return vrf_list
def get_interface_info(interface, module):
if not interface.startswith('loopback'):
interface = interface.capitalize()
command = 'show run | section interface.{0}'.format(interface)
vrf_regex = ".*vrf\s+member\s+(?P<vrf>\S+).*"
try:
body = execute_show_command(command, module)
match_vrf = re.match(vrf_regex, body, re.DOTALL)
group_vrf = match_vrf.groupdict()
vrf = group_vrf["vrf"]
except (AttributeError, TypeError):
return ""
return vrf
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError, IndexError):
return 'DNE'
def main():
argument_spec = dict(
vrf=dict(required=True),
interface=dict(type='str', required=True),
state=dict(default='present', choices=['present', 'absent'], required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
vrf = module.params['vrf']
interface = module.params['interface'].lower()
state = module.params['state']
current_vrfs = get_vrf_list(module)
if vrf not in current_vrfs:
warnings.append("The VRF is not present/active on the device. "
"Use nxos_vrf to fix this.")
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and module.params['transport'] == 'cli'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg="interface does not exist on switch. Verify "
"switch platform or create it first with "
"nxos_interface if it's a logical interface")
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='Ensure interface is a Layer 3 port before '
'configuring a VRF on an interface. You can '
'use nxos_interface')
proposed = dict(interface=interface, vrf=vrf)
current_vrf = get_interface_info(interface, module)
existing = dict(interface=interface, vrf=current_vrf)
changed = False
end_state = existing
if not existing['vrf']:
pass
elif vrf != existing['vrf'] and state == 'absent':
module.fail_json(msg='The VRF you are trying to remove '
'from the interface does not exist '
'on that interface.',
interface=interface, proposed_vrf=vrf,
existing_vrf=existing['vrf'])
commands = []
if existing:
if state == 'absent':
if existing and vrf == existing['vrf']:
command = 'no vrf member {0}'.format(vrf)
commands.append(command)
elif state == 'present':
if existing['vrf'] != vrf:
command = 'vrf member {0}'.format(vrf)
commands.append(command)
if commands:
commands.insert(0, 'interface {0}'.format(interface))
if commands:
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
load_config(module, commands)
changed = True
changed_vrf = get_interface_info(interface, module)
end_state = dict(interface=interface, vrf=changed_vrf)
if 'configure' in commands:
commands.pop(0)
results['commands'] = commands
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
theopak/glassface | server/glassface/urls.py | 1 | 1575 | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'glassface.views.splash', name='home'),
url(r'^twitteradd/([a-zA-Z0-9]+)$', 'glassface.views.twitteradd'),
#url(r'^process$', 'glassface.views.'),
# url(r'^glassface/', include('glassface.foo.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': "/"}),
url(r'^accounts/signup/$', 'glassface.views.create_user'),
url(r'^test_google/(?P<google_user_id>[^/]+)/(?P<circle_id>[^/]+)/$','glassface.views.add_to_circle'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'),
url(r'^facebook/add/(.+)/$', 'glassface.facebookfriender.views.add'),
url(r'^linkfacebook/$', 'glassface.views.linkfacebook'),
url(r'^destroy/(?P<backend>[^/]+)/$','glassface.views.destroy',name='destroy'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url('', include('social.apps.django_app.urls', namespace='social')),
# inputs from the app
url(r'^app_login/$', 'glassface.views.app_login'),
url(r'^app_identify/$', 'glassface.views.app_identify'),
url(r'^app_confirm/$', 'glassface.views.app_confirm'),
)
| mit |
nuxeh/keystone | keystone/openstack/common/versionutils.py | 6 | 8989 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import copy
import functools
import inspect
import logging
from oslo_config import cfg
import pkg_resources
import six
from keystone.openstack.common._i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
deprecated_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
def list_opts():
"""Entry point for oslo.config-generator.
"""
return [(None, copy.deepcopy(deprecated_opts))]
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
LIBERTY = 'L'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
'L': 'Liberty',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
report_deprecated_feature(LOG, msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
report_deprecated_feature(LOG, msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
# Track the messages we have sent already. See
# report_deprecated_feature().
_deprecated_messages_sent = {}
def report_deprecated_feature(logger, msg, *args, **kwargs):
"""Call this function when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
CONF.register_opts(deprecated_opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = _deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
logger.warn(stdmsg, *args, **kwargs)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| apache-2.0 |
spaceof7/QGIS | tests/src/python/test_authmanager_password_postgres.py | 21 | 8707 | # -*- coding: utf-8 -*-
"""
Tests for auth manager Password access to postgres.
This is an integration test for QGIS Desktop Auth Manager postgres provider that
checks if QGIS can use a stored auth manager auth configuration to access
a Password protected postgres.
Configuration from the environment:
* QGIS_POSTGRES_SERVER_PORT (default: 55432)
* QGIS_POSTGRES_EXECUTABLE_PATH (default: /usr/lib/postgresql/9.4/bin)
From build dir, run: ctest -R PyQgsAuthManagerPasswordPostgresTest -V
or, if your PostgreSQL path differs from the default:
QGIS_POSTGRES_EXECUTABLE_PATH=/usr/lib/postgresql/<your_version_goes_here>/bin \
ctest -R PyQgsAuthManagerPasswordPostgresTest -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import time
import signal
import stat
import subprocess
import tempfile
from shutil import rmtree
from utilities import unitTestDataPath
from qgis.core import (
QgsApplication,
QgsAuthManager,
QgsAuthMethodConfig,
QgsVectorLayer,
QgsDataSourceUri,
QgsWkbTypes,
)
from qgis.PyQt.QtNetwork import QSslCertificate
from qgis.testing import (
start_app,
unittest,
)
__author__ = 'Alessandro Pasotti'
__date__ = '25/10/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
QGIS_POSTGRES_SERVER_PORT = os.environ.get('QGIS_POSTGRES_SERVER_PORT', '55432')
QGIS_POSTGRES_EXECUTABLE_PATH = os.environ.get('QGIS_POSTGRES_EXECUTABLE_PATH', '/usr/lib/postgresql/9.4/bin')
assert os.path.exists(QGIS_POSTGRES_EXECUTABLE_PATH)
QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()
# Postgres test path
QGIS_PG_TEST_PATH = tempfile.mkdtemp()
os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH
qgis_app = start_app()
QGIS_POSTGRES_CONF_TEMPLATE = """
hba_file = '%(tempfolder)s/pg_hba.conf'
listen_addresses = '*'
port = %(port)s
max_connections = 100
unix_socket_directories = '%(tempfolder)s'
ssl = true
ssl_ciphers = 'DEFAULT:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
ssl_cert_file = '%(server_cert)s'
ssl_key_file = '%(server_key)s'
ssl_ca_file = '%(sslrootcert_path)s'
password_encryption = on
"""
QGIS_POSTGRES_HBA_TEMPLATE = """
hostssl all all 0.0.0.0/0 md5
hostssl all all ::1/0 md5
host all all 127.0.0.1/32 trust
host all all ::1/32 trust
"""
class TestAuthManager(unittest.TestCase):
@classmethod
def setUpAuth(cls):
"""Run before all tests and set up authentication"""
authm = QgsApplication.authManager()
assert (authm.setMasterPassword('masterpassword', True))
cls.pg_conf = os.path.join(cls.tempfolder, 'postgresql.conf')
cls.pg_hba = os.path.join(cls.tempfolder, 'pg_hba.conf')
# Client side
cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem')
assert os.path.isfile(cls.sslrootcert_path)
os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
cls.auth_config = QgsAuthMethodConfig("Basic")
cls.auth_config.setConfig('username', cls.username)
cls.auth_config.setConfig('password', cls.password)
cls.auth_config.setName('test_password_auth_config')
cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
assert cls.sslrootcert is not None
authm.storeCertAuthorities(cls.sslrootcert)
authm.rebuildCaCertsCache()
authm.rebuildTrustedCaCertsCache()
authm.rebuildCertTrustCache()
assert (authm.storeAuthenticationConfig(cls.auth_config)[0])
assert cls.auth_config.isValid()
# Server side
cls.server_cert = os.path.join(cls.certsdata_path, 'localhost_ssl_cert.pem')
cls.server_key = os.path.join(cls.certsdata_path, 'localhost_ssl_key.pem')
cls.server_rootcert = cls.sslrootcert_path
os.chmod(cls.server_cert, stat.S_IRUSR)
os.chmod(cls.server_key, stat.S_IRUSR)
os.chmod(cls.server_rootcert, stat.S_IRUSR)
# Place conf in the data folder
with open(cls.pg_conf, 'w+') as f:
f.write(QGIS_POSTGRES_CONF_TEMPLATE % {
'port': cls.port,
'tempfolder': cls.tempfolder,
'server_cert': cls.server_cert,
'server_key': cls.server_key,
'sslrootcert_path': cls.sslrootcert_path,
})
with open(cls.pg_hba, 'w+') as f:
f.write(QGIS_POSTGRES_HBA_TEMPLATE)
@classmethod
def setUpClass(cls):
"""Run before all tests:
Creates an auth configuration"""
cls.port = QGIS_POSTGRES_SERVER_PORT
cls.username = 'username'
cls.password = 'password'
cls.dbname = 'test_password'
cls.tempfolder = QGIS_PG_TEST_PATH
cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys')
cls.hostname = 'localhost'
cls.data_path = os.path.join(cls.tempfolder, 'data')
os.mkdir(cls.data_path)
# Disable SSL verification for setup operations
env = dict(os.environ)
env['PGSSLMODE'] = 'disable'
cls.setUpAuth()
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'initdb'), '-D', cls.data_path])
cls.server = subprocess.Popen([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'postgres'), '-D',
cls.data_path, '-c',
"config_file=%s" % cls.pg_conf],
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait max 10 secs for the server to start
end = time.time() + 10
while True:
line = cls.server.stderr.readline()
print(line)
if line.find(b"database system is ready to accept") != -1:
break
if time.time() > end:
raise Exception("Timeout connecting to PostgreSQL")
# Create a DB
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'createdb'), '-h', 'localhost', '-p', cls.port, 'test_password'], env=env)
# Inject test SQL from test path
test_sql = os.path.join(unitTestDataPath('provider'), 'testdata_pg.sql')
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-f', test_sql, cls.dbname], env=env)
# Create a role
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-c', 'CREATE ROLE "%s" WITH SUPERUSER LOGIN PASSWORD \'%s\'' % (cls.username, cls.password), cls.dbname], env=env)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
os.kill(cls.server.pid, signal.SIGABRT)
del cls.server
time.sleep(2)
rmtree(QGIS_AUTH_DB_DIR_PATH)
rmtree(cls.tempfolder)
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _getPostGISLayer(cls, type_name, layer_name=None, authcfg=None):
"""
PG layer factory
"""
if layer_name is None:
layer_name = 'pg_' + type_name
uri = QgsDataSourceUri()
uri.setWkbType(QgsWkbTypes.Point)
uri.setConnection("localhost", cls.port, cls.dbname, "", "", QgsDataSourceUri.SslVerifyFull, authcfg)
uri.setKeyColumn('pk')
uri.setSrid('EPSG:4326')
uri.setDataSource('qgis_test', 'someData', "geom", "", "pk")
# Note: do not expand here!
layer = QgsVectorLayer(uri.uri(False), layer_name, 'postgres')
return layer
def testValidAuthAccess(self):
"""
Access the protected layer with valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(pg_layer.isValid())
def testInvalidAuthAccess(self):
"""
Access the protected layer with not valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé')
self.assertFalse(pg_layer.isValid())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
zobe123/Plex-CS | lib/logutils/queue.py | 34 | 7547 | #
# Copyright (C) 2010-2013 Vinay Sajip. See LICENSE.txt for details.
#
"""
This module contains classes which help you work with queues. A typical
application is when you want to log from performance-critical threads, but
where the handlers you want to use are slow (for example,
:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue,
pass it to a :class:`QueueHandler` instance and use that instance with your
loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same
queue and some slow handlers, and call :meth:`~QueueListener.start` on it.
This will start monitoring the queue on a separate thread and call all the
configured handlers *on that thread*, so that your logging thread is not held
up by the slow handlers.
Note that as well as in-process queues, you can use these classes with queues
from the :mod:`multiprocessing` module.
**N.B.** This is part of the standard library since Python 3.2, so the
version here is for use with earlier Python versions.
"""
import logging
try:
import Queue as queue
except ImportError:
import queue
import threading
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
:param queue: The queue to send `LogRecords` to.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses :meth:`~queue.Queue.put_nowait`. You may
want to override this method if you want to use blocking, timeouts or
custom queue implementations.
:param record: The record to enqueue.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
:param record: The record to prepare.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
:param record: The record to emit.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
:param record: The queue to listen to.
:param handlers: The handlers to invoke on everything received from
the queue.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses :meth:`~queue.Queue.get`. You may want to
override this method if you want to use timeouts or work with custom
queue implementations.
:param block: Whether to block if the queue is empty. If `False` and
the queue is empty, an :class:`~queue.Empty` exception
will be thrown.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
:param record: The record to prepare.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
:param record: The record to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
Writes a sentinel to the queue to tell the listener to quit. This
implementation uses ``put_nowait()``. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
| gpl-3.0 |
BT-fgarbely/account-invoicing | account_invoice_rounding/res_config.py | 30 | 3354 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class AccountConfigSettings(models.TransientModel):
_inherit = 'account.config.settings'
tax_calculation_rounding = fields.Float(
related='company_id.tax_calculation_rounding',
string='Tax Rounding unit',
default=0.05)
tax_calculation_rounding_method = fields.Selection(
related='company_id.tax_calculation_rounding_method',
selection=[
('round_per_line', 'Round per line'),
('round_globally', 'Round globally'),
('swedish_round_globally', 'Swedish Round globally'),
('swedish_add_invoice_line',
'Swedish Round by adding an invoice line'),
],
string='Tax calculation rounding method',
help="If you select 'Round per line' : for each tax, the tax "
"amount will first be computed and rounded for each "
"PO/SO/invoice line and then these rounded amounts will be "
"summed, leading to the total amount for that tax. If you "
"select 'Round globally': for each tax, the tax amount will "
"be computed for each PO/SO/invoice line, then these amounts"
" will be summed and eventually this total tax amount will "
"be rounded. If you sell with tax included, you should "
"choose 'Round per line' because you certainly want the sum "
"of your tax-included line subtotals to be equal to the "
"total amount with taxes.")
tax_calculation_rounding_account_id = fields.Many2one(
related='company_id.tax_calculation_rounding_account_id',
comodel='account.account',
string='Tax Rounding account',
domain=[('type', '<>', 'view')])
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
res = super(AccountConfigSettings, self
).onchange_company_id(cr, uid, ids,
company_id, context=context)
company = self.pool.get('res.company').browse(cr, uid, company_id,
context=context)
res['value'][
'tax_calculation_rounding'] = company.tax_calculation_rounding
res['value']['tax_calculation_rounding_account_id'] = \
company.tax_calculation_rounding_account_id.id
return res
| agpl-3.0 |
ruschelp/cortex-vfx | test/IECore/JPEGImageWriter.py | 12 | 11591 | ##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import glob
import sys
import os
from IECore import *
class TestJPEGImageWriter(unittest.TestCase):
def __verifyImageRGB( self, imgNew, imgOrig, maxError = 0.004 ):
self.assertEqual( type(imgNew), ImagePrimitive )
if "R" in imgOrig :
self.assert_( "R" in imgNew )
self.assert_( "G" in imgNew )
self.assert_( "B" in imgNew )
self.failIf( "Y" in imgNew )
elif "Y" in imgOrig :
self.assert_( "Y" in imgNew )
self.failIf( "R" in imgNew )
self.failIf( "G" in imgNew )
self.failIf( "B" in imgNew )
# We don't expect to find alpha in JPEGs
self.failIf( "A" in imgNew )
op = ImageDiffOp()
res = op(
imageA = imgNew,
imageB = imgOrig,
maxError = maxError,
skipMissingChannels = True
)
self.failIf( res.value )
def __makeFloatImage( self, dataWindow, displayWindow, withAlpha = False, dataType = FloatVectorData ) :
img = ImagePrimitive( dataWindow, displayWindow )
w = dataWindow.max.x - dataWindow.min.x + 1
h = dataWindow.max.y - dataWindow.min.y + 1
area = w * h
R = dataType( area )
G = dataType( area )
B = dataType( area )
if withAlpha:
A = dataType( area )
offset = 0
for y in range( 0, h ) :
for x in range( 0, w ) :
R[offset] = float(x) / (w - 1)
G[offset] = float(y) / (h - 1)
B[offset] = 0.0
if withAlpha:
A[offset] = 0.5
offset = offset + 1
img["R"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, R )
img["G"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, G )
img["B"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, B )
if withAlpha:
img["A"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, A )
return img
def __makeIntImage( self, dataWindow, displayWindow, dataType = UIntVectorData, maxInt = 2**32-1 ) :
img = ImagePrimitive( dataWindow, displayWindow )
w = dataWindow.max.x - dataWindow.min.x + 1
h = dataWindow.max.y - dataWindow.min.y + 1
area = w * h
R = dataType( area )
G = dataType( area )
B = dataType( area )
offset = 0
for y in range( 0, h ) :
for x in range( 0, w ) :
R[offset] = int( maxInt * float(x) / (w - 1) )
G[offset] = int( maxInt * float(y) / (h - 1) )
B[offset] = 0
offset = offset + 1
img["R"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, R )
img["G"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, G )
img["B"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, B )
return img
def __makeGreyscaleImage( self, dataWindow, displayWindow ) :
img = ImagePrimitive( dataWindow, displayWindow )
w = dataWindow.max.x - dataWindow.min.x + 1
h = dataWindow.max.y - dataWindow.min.y + 1
area = w * h
Y = FloatVectorData( area )
offset = 0
for y in range( 0, h ) :
for x in range( 0, w ) :
Y[offset] = float(x) / (w - 1) * float(y) / (h - 1)
offset = offset + 1
img["Y"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, Y )
return img
def testConstruction( self ):
img = ImagePrimitive()
self.assert_( JPEGImageWriter.canWrite( img, "test/IECore/data/jpg/output.jpg" ) )
w = Writer.create( img, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
def testQuality ( self ) :
w = Box2i(
V2i( 0, 0 ),
V2i( 99, 99)
)
img = self.__makeFloatImage( w, w )
w = Writer.create( img, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
qualitySizeMap = {}
lastSize = None
for q in [ 0, 10, 50, 80, 100 ]:
w["quality"] = q
if os.path.exists( "test/IECore/data/jpg/output.jpg" ) :
os.remove( "test/IECore/data/jpg/output.jpg" )
w.write()
self.assert_( os.path.exists( "test/IECore/data/jpg/output.jpg" ) )
size = os.path.getsize( "test/IECore/data/jpg/output.jpg" )
qualitySizeMap[q] = size
if lastSize :
self.assert_( size >= lastSize )
lastSize = size
self.assert_( qualitySizeMap[100] > qualitySizeMap[0] )
def testWrite( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
dataWindow = displayWindow
# JPEG default channels are 8-bit
rawImage = self.__makeIntImage( dataWindow, displayWindow, dataType = UCharVectorData, maxInt = 2**8-1 )
for dataType in [ FloatVectorData, HalfVectorData, DoubleVectorData ] :
self.setUp()
rawMode = ( dataType != FloatVectorData )
imgOrig = self.__makeFloatImage( dataWindow, displayWindow, dataType = dataType )
w = Writer.create( imgOrig, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
w['rawChannels'] = rawMode
w.write()
self.assert_( os.path.exists( "test/IECore/data/jpg/output.jpg" ) )
# Now we've written the image, verify the rgb
r = Reader.create( "test/IECore/data/jpg/output.jpg" )
r['rawChannels'] = rawMode
imgNew = r.read()
if rawMode :
self.assertEqual( type(imgNew['R'].data), UCharVectorData )
self.__verifyImageRGB( rawImage, imgNew )
else :
self.assertEqual( type(imgNew['R'].data), FloatVectorData )
self.__verifyImageRGB( imgOrig, imgNew, 0.008 )
self.tearDown()
for dataType in [ ( UIntVectorData, 2**32-1), (UCharVectorData, 2**8-1 ), (UShortVectorData, 2**16-1 ) ] :
self.setUp()
imgOrig = self.__makeIntImage( dataWindow, displayWindow, dataType = dataType[0], maxInt = dataType[1] )
w = Writer.create( imgOrig, "test/IECore/data/jpg/output.jpg" )
w['rawChannels'] = True
self.assertEqual( type(w), JPEGImageWriter )
w.write()
self.assert_( os.path.exists( "test/IECore/data/jpg/output.jpg" ) )
# Now we've written the image, verify the rgb
r = Reader.create( "test/IECore/data/jpg/output.jpg" )
r['rawChannels'] = True
imgNew = r.read()
self.__verifyImageRGB( rawImage, imgNew )
self.tearDown()
def testGreyscaleWrite( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 199, 99 )
)
dataWindow = displayWindow
imgOrig = self.__makeGreyscaleImage( dataWindow, displayWindow )
w = Writer.create( imgOrig, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
w.write()
self.assert_( os.path.exists( "test/IECore/data/jpg/output.jpg" ) )
r = Reader.create( "test/IECore/data/jpg/output.jpg" )
imgNew = r.read()
channelNames = r.channelNames()
self.assertEqual( len(channelNames), 1 )
self.__verifyImageRGB( imgNew, imgOrig )
def testWriteIncomplete( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
dataWindow = displayWindow
img = self.__makeFloatImage( dataWindow, displayWindow )
# We don't have enough data to fill this dataWindow
img.dataWindow = Box2i(
V2i( 0, 0 ),
V2i( 199, 199 )
)
self.failIf( img.arePrimitiveVariablesValid() )
w = Writer.create( img, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
self.assertRaises( RuntimeError, w.write )
self.failIf( os.path.exists( "test/IECore/data/jpg/output.jpg" ) )
def testErrors( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
dataWindow = displayWindow
# Try and write an image with the "R" channel of an unsupported type
img = self.__makeFloatImage( dataWindow, displayWindow )
img[ "R" ] = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, StringData( "hello") )
w = Writer.create( img, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
self.assertRaises( RuntimeError, w.write )
def testWindowWrite( self ) :
dataWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
img = self.__makeFloatImage( dataWindow, dataWindow )
img.displayWindow = Box2i(
V2i( -20, -20 ),
V2i( 199, 199 )
)
w = Writer.create( img, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
w['colorSpace'] = 'linear'
w.write()
self.assert_( os.path.exists( "test/IECore/data/jpg/output.jpg" ) )
r = Reader.create( "test/IECore/data/jpg/output.jpg" )
r['colorSpace'] = 'linear'
imgNew = r.read()
r = Reader.create( "test/IECore/data/expectedResults/windowWrite.jpg" )
r['colorSpace'] = 'linear'
imgExpected = r.read()
self.__verifyImageRGB( imgNew, imgExpected )
def testOversizeDataWindow( self ) :
r = Reader.create( "test/IECore/data/exrFiles/oversizeDataWindow.exr" )
r['colorSpace'] = 'linear'
img = r.read()
w = Writer.create( img, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
w['colorSpace'] = 'linear'
w.write()
r = Reader.create( "test/IECore/data/jpg/output.jpg" )
r['colorSpace'] = 'linear'
imgNew = r.read()
r = Reader.create( "test/IECore/data/expectedResults/oversizeDataWindow.jpg" )
r['colorSpace'] = 'linear'
imgExpected = r.read()
self.__verifyImageRGB( imgNew, imgExpected )
def testRegularDataWindow( self ) :
r = Reader.create( "test/IECore/data/exrFiles/uvMapWithDataWindow.100x100.exr" )
r['colorSpace'] = 'linear'
img = r.read()
w = Writer.create( img, "test/IECore/data/jpg/output.jpg" )
self.assertEqual( type(w), JPEGImageWriter )
w['colorSpace'] = 'linear'
w.write()
r = Reader.create( "test/IECore/data/jpg/output.jpg" )
r['colorSpace'] = 'linear'
imgNew = r.read()
r = Reader.create( "test/IECore/data/expectedResults/uvMapWithDataWindow.100x100.jpg" )
r['colorSpace'] = 'linear'
imgExpected = r.read()
self.__verifyImageRGB( imgNew, imgExpected )
def setUp( self ) :
if os.path.exists( "test/IECore/data/jpg/output.jpg" ) :
os.remove( "test/IECore/data/jpg/output.jpg" )
def tearDown( self ) :
if os.path.exists( "test/IECore/data/jpg/output.jpg" ) :
os.remove( "test/IECore/data/jpg/output.jpg" )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
frreiss/tensorflow-fred | tensorflow/python/keras/layers/preprocessing/normalization_test.py | 3 | 12624 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import normalization
from tensorflow.python.keras.layers.preprocessing import normalization_v1
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return normalization.Normalization
else:
return normalization_v1.Normalization
def _get_layer_computation_test_cases():
test_cases = ({
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": -1,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element"
}, {
"adapt_data": np.array([[1], [2], [3], [4], [5]], dtype=np.int32),
"axis": -1,
"test_data": np.array([[1], [2], [3]], np.int32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_int_data"
}, {
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis"
}, {
"adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data"
}, {
"adapt_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"axis":
1,
"test_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"expected":
np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]],
[[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]],
np.float32),
"testcase_name":
"3d_internal_axis"
}, {
"adapt_data":
np.array(
[[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]],
np.float32),
"axis": (1, 2),
"test_data":
np.array(
[[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]],
np.float32),
"expected":
np.array(
[[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]],
np.float32),
"testcase_name":
"3d_multiple_axis"
}, {
"adapt_data":
np.zeros((3, 4)),
"axis": -1,
"test_data":
np.zeros((3, 4)),
"expected":
np.zeros((3, 4)),
"testcase_name":
"zero_variance"
})
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes
class NormalizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_layer_api_compatibility(self):
cls = get_layer_class()
with CustomObjectScope({"Normalization": cls}):
output_data = testing_utils.layer_test(
cls,
kwargs={"axis": -1},
input_shape=(None, 3),
input_data=np.array([[3, 1, 2], [6, 5, 4]], dtype=np.float32),
validate_training=False,
adapt_data=np.array([[1, 2, 1], [2, 3, 4], [1, 2, 1], [2, 3, 4]]))
expected = np.array([[3., -3., -0.33333333], [9., 5., 1.]])
self.assertAllClose(expected, output_data)
def test_combiner_api_compatibility(self):
data = np.array([[1], [2], [3], [4], [5]])
combiner = normalization._NormalizingCombiner(axis=-1)
expected = {
"count": np.array(5.0),
"variance": np.array([2.]),
"mean": np.array([3.])
}
expected_accumulator = combiner._create_accumulator(expected["count"],
expected["mean"],
expected["variance"])
self.validate_accumulator_serialize_and_deserialize(combiner, data,
expected_accumulator)
self.validate_accumulator_uniqueness(combiner, data)
self.validate_accumulator_extract(combiner, data, expected)
self.validate_accumulator_extract_and_restore(combiner, data,
expected)
@parameterized.named_parameters(
{
"data": np.array([[1], [2], [3], [4], [5]]),
"axis": -1,
"expected": {
"count": np.array(5.0),
"variance": np.array([2.]),
"mean": np.array([3.])
},
"testcase_name": "2d_single_element"
}, {
"data": np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]),
"axis": -1,
"expected": {
"count": np.array(5.0),
"mean": np.array([3., 4.]),
"variance": np.array([2., 2.])
},
"testcase_name": "2d_multi_element"
}, {
"data": np.array([[[1, 2]], [[2, 3]], [[3, 4]], [[4, 5]], [[5, 6]]]),
"axis": 2,
"expected": {
"count": np.array(5.0),
"mean": np.array([3., 4.]),
"variance": np.array([2., 2.])
},
"testcase_name": "3d_multi_element"
}, {
"data": np.array([[[1, 2]], [[2, 3]], [[3, 4]], [[4, 5]], [[5, 6]]]),
"axis": (1, 2),
"expected": {
"count": np.array(5.0),
"mean": np.array([[3., 4.]]),
"variance": np.array([[2., 2.]])
},
"testcase_name": "3d_multi_element_multi_axis"
}, {
"data":
np.array([[[1, 2], [2, 3]], [[3, 4], [4, 5]], [[1, 2], [2, 3]],
[[3, 4], [4, 5]]]),
"axis":
1,
"expected": {
"count": np.array(8.0),
"mean": np.array([2.5, 3.5]),
"variance": np.array([1.25, 1.25])
},
"testcase_name":
"3d_multi_element_internal_axis"
})
def test_combiner_computation_multi_value_axis(self, data, axis, expected):
combiner = normalization._NormalizingCombiner(axis=axis)
expected_accumulator = combiner._create_accumulator(**expected)
self.validate_accumulator_computation(combiner, data, expected_accumulator)
@parameterized.named_parameters(*_get_layer_computation_test_cases())
def test_layer_computation(self, adapt_data, axis, test_data, use_dataset,
expected):
input_shape = tuple([None for _ in range(test_data.ndim - 1)])
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = dataset_ops.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = dataset_ops.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
cls = get_layer_class()
layer = cls(axis=axis)
layer.adapt(adapt_data)
input_data = keras.Input(shape=input_shape)
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
def test_mean_setting_continued_adapt_failure(self):
if not context.executing_eagerly():
self.skipTest("'assign' doesn't work in V1, so don't test in V1.")
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((None, 2))
layer.mean.assign([1.3, 2.0])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
def test_var_setting_continued_adapt_failure(self):
if not context.executing_eagerly():
self.skipTest("'assign' doesn't work in V1, so don't test in V1.")
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((None, 2))
layer.variance.assign([1.3, 2.0])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
def test_weight_setting_continued_adapt_failure(self):
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((None, 2))
layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0]), np.array(0)])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
def test_weight_setting_no_count_continued_adapt_failure(self):
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((None, 2))
layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0])])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
def test_1d_data(self):
data = [0, 2, 0, 2]
cls = get_layer_class()
layer = cls(axis=-1)
layer.adapt(data)
output = layer(data)
self.assertListEqual(output.shape.as_list(), [4, 1])
if context.executing_eagerly():
self.assertAllClose(output.numpy(), [[-1], [1], [-1], [1]])
@parameterized.parameters(
{"axis": 0},
{"axis": (-1, 0)},
)
def test_zeros_fail_init(self, axis):
cls = get_layer_class()
with self.assertRaisesRegex(ValueError,
"The argument 'axis' may not be 0."):
cls(axis=axis)
@parameterized.parameters(
# Out of bounds
{"axis": 3},
{"axis": -3},
# In a tuple
{"axis": (1, 3)},
{"axis": (1, -3)},
)
def test_bad_axis_fail_build(self, axis):
cls = get_layer_class()
layer = cls(axis=axis)
with self.assertRaisesRegex(ValueError,
r"in the range \[1-ndim, ndim-1\]."):
layer.build([None, 2, 3])
@parameterized.parameters(
# Results should be identical no matter how the axes are specified (3d).
{"axis": (1, 2)},
{"axis": (2, 1)},
{"axis": (1, -1)},
{"axis": (-1, 1)},
)
def test_axis_permutations(self, axis):
cls = get_layer_class()
layer = cls(axis=axis)
# data.shape = [2, 2, 3]
data = np.array([[[0., 1., 2.], [0., 2., 6.]],
[[2., 3., 4.], [3., 6., 10.]]])
expect = np.array([[[-1., -1., -1.], [-1., -1., -1.]],
[[1., 1., 1.], [1., 1., 1.]]])
layer.adapt(data)
self.assertAllClose(expect, layer(data))
def test_model_summary_after_layer_adapt(self):
data = np.array([[[0., 1., 2.], [0., 2., 6.]],
[[2., 3., 4.], [3., 6., 10.]]])
cls = get_layer_class()
layer = cls(axis=-1)
layer.adapt(data)
model = keras.Sequential(
[layer,
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(1)])
model.summary()
if __name__ == "__main__":
test.main()
| apache-2.0 |
bac/horizon | horizon/forms/fields.py | 4 | 15925 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import re
import netaddr
import six
import uuid
from django.core.exceptions import ValidationError # noqa
from django.core import urlresolvers
from django.forms import fields
from django.forms import forms
from django.forms.utils import flatatt # noqa
from django.forms import widgets
from django.template import Context # noqa
from django.template.loader import get_template # noqa
from django.utils.encoding import force_text
from django.utils.functional import Promise # noqa
from django.utils import html
from django.utils.safestring import mark_safe # noqa
from django.utils.translation import ugettext_lazy as _
ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$')
IPv4 = 1
IPv6 = 2
class IPField(fields.Field):
"""Form field for entering IP/range values, with validation.
Supports IPv4/IPv6 in the format:
.. xxx.xxx.xxx.xxx
.. xxx.xxx.xxx.xxx/zz
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz
and all compressed forms. Also the short forms
are supported:
xxx/yy
xxx.xxx/yy
.. attribute:: version
Specifies which IP version to validate,
valid values are 1 (fields.IPv4), 2 (fields.IPv6) or
both - 3 (fields.IPv4 | fields.IPv6).
Defaults to IPv4 (1)
.. attribute:: mask
Boolean flag to validate subnet masks along with IP address.
E.g: 10.0.0.1/32
.. attribute:: mask_range_from
Subnet range limitation, e.g. 16
That means the input mask will be checked to be in the range
16:max_value. Useful to limit the subnet ranges
to A/B/C-class networks.
"""
invalid_format_message = _("Incorrect format for IP address")
invalid_version_message = _("Invalid version for IP address")
invalid_mask_message = _("Invalid subnet mask")
max_v4_mask = 32
max_v6_mask = 128
def __init__(self, *args, **kwargs):
self.mask = kwargs.pop("mask", None)
self.min_mask = kwargs.pop("mask_range_from", 0)
self.version = kwargs.pop('version', IPv4)
super(IPField, self).__init__(*args, **kwargs)
def validate(self, value):
super(IPField, self).validate(value)
if not value and not self.required:
return
try:
if self.mask:
self.ip = netaddr.IPNetwork(value)
else:
self.ip = netaddr.IPAddress(value)
except Exception:
raise ValidationError(self.invalid_format_message)
if not any([self.version & IPv4 > 0 and self.ip.version == 4,
self.version & IPv6 > 0 and self.ip.version == 6]):
raise ValidationError(self.invalid_version_message)
if self.mask:
if self.ip.version == 4 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask:
raise ValidationError(self.invalid_mask_message)
if self.ip.version == 6 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask:
raise ValidationError(self.invalid_mask_message)
def clean(self, value):
super(IPField, self).clean(value)
return str(getattr(self, "ip", ""))
class MultiIPField(IPField):
"""Extends IPField to allow comma-separated lists of addresses."""
def validate(self, value):
self.addresses = []
if value:
addresses = value.split(',')
for ip in addresses:
super(MultiIPField, self).validate(ip)
self.addresses.append(ip)
else:
super(MultiIPField, self).validate(value)
def clean(self, value):
super(MultiIPField, self).clean(value)
return str(','.join(getattr(self, "addresses", [])))
class SelectWidget(widgets.Select):
"""Customizable select widget, that allows to render
data-xxx attributes from choices. This widget also
allows user to specify additional html attributes
for choices.
.. attribute:: data_attrs
Specifies object properties to serialize as
data-xxx attribute. If passed ('id', ),
this will be rendered as:
<option data-id="123">option_value</option>
where 123 is the value of choice_value.id
.. attribute:: transform
A callable used to render the display value
from the option object.
.. attribute:: transform_html_attrs
A callable used to render additional HTML attributes
for the option object. It returns a dictionary
containing the html attributes and their values.
For example, to define a title attribute for the
choices::
helpText = { 'Apple': 'This is a fruit',
'Carrot': 'This is a vegetable' }
def get_title(data):
text = helpText.get(data, None)
if text:
return {'title': text}
else:
return {}
....
....
widget=forms.ThemableSelect( attrs={'class': 'switchable',
'data-slug': 'source'},
transform_html_attrs=get_title )
self.fields[<field name>].choices =
([
('apple','Apple'),
('carrot','Carrot')
])
"""
def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None,
transform_html_attrs=None):
self.data_attrs = data_attrs
self.transform = transform
self.transform_html_attrs = transform_html_attrs
super(SelectWidget, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
other_html = (u' selected="selected"'
if option_value in selected_choices else '')
other_html += self.transform_option_html_attrs(option_label)
data_attr_html = self.get_data_attrs(option_label)
if data_attr_html:
other_html += ' ' + data_attr_html
option_label = self.transform_option_label(option_label)
return u'<option value="%s"%s>%s</option>' % (
html.escape(option_value), other_html, option_label)
def get_data_attrs(self, option_label):
other_html = []
if not isinstance(option_label, (six.string_types, Promise)):
for data_attr in self.data_attrs:
data_value = html.conditional_escape(
force_text(getattr(option_label,
data_attr, "")))
other_html.append('data-%s="%s"' % (data_attr, data_value))
return ' '.join(other_html)
def transform_option_label(self, option_label):
if (not isinstance(option_label, (six.string_types, Promise)) and
callable(self.transform)):
option_label = self.transform(option_label)
return html.conditional_escape(force_text(option_label))
def transform_option_html_attrs(self, option_label):
if not callable(self.transform_html_attrs):
return ''
return flatatt(self.transform_html_attrs(option_label))
class ThemableSelectWidget(SelectWidget):
"""Bootstrap base select field widget."""
def render(self, name, value, attrs=None, choices=()):
# NOTE(woodnt): Currently the "attrs" contents are being added to the
# select that's hidden. It's unclear whether this is the
# desired behavior. In some cases, the attribute should
# remain solely on the now-hidden select. But in others
# if it should live on the bootstrap button (visible)
# or both.
new_choices = []
initial_value = value
for opt_value, opt_label in itertools.chain(self.choices, choices):
other_html = self.transform_option_html_attrs(opt_label)
data_attr_html = self.get_data_attrs(opt_label)
if data_attr_html:
other_html += ' ' + data_attr_html
opt_label = self.transform_option_label(opt_label)
# If value exists, save off its label for use
if opt_value == value:
initial_value = opt_label
if other_html:
new_choices.append((opt_value, opt_label, other_html))
else:
new_choices.append((opt_value, opt_label))
if value is None and new_choices:
initial_value = new_choices[0][1]
attrs = self.build_attrs(attrs)
id = attrs.pop('id', 'id_%s' % name)
template = get_template('horizon/common/fields/_themable_select.html')
context = Context({
'name': name,
'options': new_choices,
'id': id,
'value': value,
'initial_value': initial_value,
'select_attrs': attrs,
})
return template.render(context)
class DynamicSelectWidget(SelectWidget):
"""A subclass of the ``Select`` widget which renders extra attributes for
use in callbacks to handle dynamic changes to the available choices.
"""
_data_add_url_attr = "data-add-item-url"
def render(self, *args, **kwargs):
add_item_url = self.get_add_item_url()
if add_item_url is not None:
self.attrs[self._data_add_url_attr] = add_item_url
return super(DynamicSelectWidget, self).render(*args, **kwargs)
def get_add_item_url(self):
if callable(self.add_item_link):
return self.add_item_link()
try:
if self.add_item_link_args:
return urlresolvers.reverse(self.add_item_link,
args=self.add_item_link_args)
else:
return urlresolvers.reverse(self.add_item_link)
except urlresolvers.NoReverseMatch:
return self.add_item_link
class ThemableDynamicSelectWidget(ThemableSelectWidget, DynamicSelectWidget):
pass
class ThemableChoiceField(fields.ChoiceField):
"""Bootstrap based select field."""
widget = ThemableSelectWidget
class DynamicChoiceField(fields.ChoiceField):
"""A subclass of ``ChoiceField`` with additional properties that make
dynamically updating its elements easier.
Notably, the field declaration takes an extra argument, ``add_item_link``
which may be a string or callable defining the URL that should be used
for the "add" link associated with the field.
"""
widget = DynamicSelectWidget
def __init__(self,
add_item_link=None,
add_item_link_args=None,
*args,
**kwargs):
super(DynamicChoiceField, self).__init__(*args, **kwargs)
self.widget.add_item_link = add_item_link
self.widget.add_item_link_args = add_item_link_args
class ThemableDynamicChoiceField(DynamicChoiceField):
widget = ThemableDynamicSelectWidget
class DynamicTypedChoiceField(DynamicChoiceField, fields.TypedChoiceField):
"""Simple mix of ``DynamicChoiceField`` and ``TypedChoiceField``."""
pass
class ThemableDynamicTypedChoiceField(ThemableDynamicChoiceField,
fields.TypedChoiceField):
"""Simple mix of ``ThemableDynamicChoiceField`` & ``TypedChoiceField``."""
pass
class ThemableCheckboxInput(widgets.CheckboxInput):
"""A subclass of the ``Checkbox`` widget which renders extra markup to
allow a custom checkbox experience.
"""
def render(self, name, value, attrs=None):
label_for = attrs.get('id', '')
if not label_for:
attrs['id'] = uuid.uuid4()
label_for = attrs['id']
return html.format_html(
u'<div class="themable-checkbox">{}<label for="{}"></label></div>',
super(ThemableCheckboxInput, self).render(name, value, attrs),
label_for
)
class ThemableCheckboxChoiceInput(widgets.CheckboxChoiceInput):
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = html.format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return html.format_html(
u'<div class="themable-checkbox">{}<label{}>' +
u'<span>{}</span></label></div>',
self.tag(attrs), label_for, self.choice_label
)
class ThemableCheckboxFieldRenderer(widgets.CheckboxFieldRenderer):
choice_input_class = ThemableCheckboxChoiceInput
class ThemableCheckboxSelectMultiple(widgets.CheckboxSelectMultiple):
renderer = ThemableCheckboxFieldRenderer
_empty_value = []
class ExternalFileField(fields.FileField):
"""A special flavor of FileField which is meant to be used in cases when
instead of uploading file to Django it should be uploaded to some external
location, while the form validation is done as usual. Should be paired
with ExternalUploadMeta metaclass embedded into the Form class.
"""
def __init__(self, *args, **kwargs):
super(ExternalFileField, self).__init__(*args, **kwargs)
self.widget.attrs.update({'data-external-upload': 'true'})
class ExternalUploadMeta(forms.DeclarativeFieldsMetaclass):
"""Set this class as the metaclass of a form that contains
ExternalFileField in order to process ExternalFileField fields in a
specific way. A hidden CharField twin of FieldField is created which
contains just the filename (if any file was selected on browser side) and
a special `clean` method for FileField is defined which extracts just file
name. This allows to avoid actual file upload to Django server, yet
process form clean() phase as usual. Actual file upload happens entirely
on client-side.
"""
def __new__(mcs, name, bases, attrs):
def get_double_name(name):
suffix = '__hidden'
slen = len(suffix)
return name[:-slen] if name.endswith(suffix) else name + suffix
def make_clean_method(field_name):
def _clean_method(self):
value = self.cleaned_data[field_name]
if value:
self.cleaned_data[get_double_name(field_name)] = value
return value
return _clean_method
new_attrs = {}
for attr_name, attr in attrs.items():
new_attrs[attr_name] = attr
if isinstance(attr, ExternalFileField):
hidden_field = fields.CharField(widget=fields.HiddenInput,
required=False)
hidden_field.creation_counter = attr.creation_counter + 1000
new_attr_name = get_double_name(attr_name)
new_attrs[new_attr_name] = hidden_field
meth_name = 'clean_' + new_attr_name
new_attrs[meth_name] = make_clean_method(new_attr_name)
return super(ExternalUploadMeta, mcs).__new__(
mcs, name, bases, new_attrs)
| apache-2.0 |
TridevGuha/pywikibot-core | scripts/coordinate_import.py | 4 | 4085 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
Coordinate importing script.
Usage:
python pwb.py coordinate_import -lang:en -family:wikipedia \
-cat:Category:Coordinates_not_on_Wikidata
This will work on all pages in the category "coordinates not on Wikidata" and
will import the coordinates on these pages to Wikidata.
The data from the "GeoData" extension (https://www.mediawiki.org/wiki/Extension:GeoData)
is used so that extension has to be setup properly. You can look at the
[[Special:Nearby]] page on your local Wiki to see if it's populated.
You can use any typical pagegenerator to provide with a list of pages:
python pwb.py coordinate_import -lang:it -family:wikipedia \
-namespace:0 -transcludes:Infobox_stazione_ferroviaria
¶ms;
"""
#
# (C) Multichill, 2014
# (C) Pywikibot team, 2013-2015
#
# Distributed under the terms of MIT License.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import pagegenerators, WikidataBot
from pywikibot.exceptions import CoordinateGlobeUnknownException
class CoordImportRobot(WikidataBot):
"""A bot to import coordinates to Wikidata."""
def __init__(self, generator):
"""
Constructor.
Arguments:
* generator - A generator that yields Page objects.
"""
super(CoordImportRobot, self).__init__()
self.generator = pagegenerators.PreloadingGenerator(generator)
self.cacheSources()
self.prop = 'P625'
def has_coord_qualifier(self, claims):
"""
Check if self.prop is used as property for a qualifier.
@param claims: the Wikibase claims to check in
@type claims: dict
@return: the first property for which self.prop
is used as qualifier, or None if any
@return: unicode or None
"""
for prop in claims:
for claim in claims[prop]:
if self.prop in claim.qualifiers:
return prop
def treat(self, page, item):
"""Treat page/item."""
self.current_page = page
coordinate = page.coordinates(primary_only=True)
if not coordinate:
return
claims = item.get().get('claims')
if self.prop in claims:
pywikibot.output(u'Item %s already contains coordinates (%s)'
% (item.title(), self.prop))
return
prop = self.has_coord_qualifier(claims)
if prop:
pywikibot.output(u'Item %s already contains coordinates'
u' (%s) as qualifier for %s'
% (item.title(), self.prop, prop))
return
newclaim = pywikibot.Claim(self.repo, self.prop)
newclaim.setTarget(coordinate)
pywikibot.output(u'Adding %s, %s to %s' % (coordinate.lat,
coordinate.lon,
item.title()))
try:
item.addClaim(newclaim)
source = self.getSource(page.site)
if source:
newclaim.addSource(source, bot=True)
except CoordinateGlobeUnknownException as e:
pywikibot.output(u'Skipping unsupported globe: %s' % e.args)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
generator_factory = pagegenerators.GeneratorFactory()
for arg in local_args:
if generator_factory.handleArg(arg):
continue
generator = generator_factory.getCombinedGenerator()
if generator:
coordbot = CoordImportRobot(generator)
coordbot.run()
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == "__main__":
main()
| mit |
DPaaS-Raksha/raksha | raksha/openstack/common/lockutils.py | 1 | 10139 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from oslo.config import cfg
from raksha.openstack.common import fileutils
from raksha.openstack.common.gettextutils import _
from raksha.openstack.common import local
from raksha.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files. Default to a '
'temp directory'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
'for method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at '
'%(path)s for method '
'"%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
LOG.debug(_('Released file lock "%(lock)s" at '
'%(path)s for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to
# cleanup the locks left behind by unit
# tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
finally:
local.strong_store.locks_held.remove(name)
return retval
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
| apache-2.0 |
RayRuizhiLiao/ITK_4D | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/class_declaration.py | 1 | 26207 | # Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines classes, that describes C++ classes
This modules contains definition for next C++ declarations:
- class definition
- class declaration
- small helper class for describing C++ class hierarchy
"""
import warnings
from . import scopedef
from . import declaration_utils
from . import declaration
from . import templates
from . import cpptypes
from .. import utils
class ACCESS_TYPES(object):
"""class that defines "access" constants"""
PUBLIC = "public"
PRIVATE = "private"
PROTECTED = "protected"
ALL = [PUBLIC, PRIVATE, PROTECTED]
class CLASS_TYPES(object):
"""class that defines "class" type constants"""
CLASS = "class"
STRUCT = "struct"
UNION = "union"
ALL = [CLASS, STRUCT, UNION]
def get_partial_name(name):
from . import container_traits # prevent cyclic dependencies
ct = container_traits.find_container_traits(name)
if ct:
return ct.remove_defaults(name)
elif templates.is_instantiation(name):
tmpl_name, args = templates.split(name)
for i, arg_name in enumerate(args):
args[i] = get_partial_name(arg_name.strip())
return templates.join(tmpl_name, args)
else:
return name
class hierarchy_info_t(object):
"""describes class relationship"""
def __init__(self, related_class=None, access=None, is_virtual=False):
"""creates class that contains partial information about class
relationship"""
if related_class:
assert(isinstance(related_class, class_t))
self._related_class = related_class
if access:
assert(access in ACCESS_TYPES.ALL)
self._access = access
self._is_virtual = is_virtual
self._declaration_path = None
self._declaration_path_hash = None
def __eq__(self, other):
if not isinstance(other, hierarchy_info_t):
return False
return (self.declaration_path_hash ==
other.declaration_path_hash) \
and self._declaration_path == other._declaration_path \
and self._access == other._access \
and self._is_virtual == other._is_virtual
def __hash__(self):
return self.declaration_path_hash
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return self.__class__.__name__ < other.__class__.__name__
return (self.declaration_path, self.access, self.is_virtual) < \
(other.declaration_path, other.access, other.is_virtual)
@property
def related_class(self):
"""reference to base or derived :class:`class <class_t>`"""
return self._related_class
@related_class.setter
def related_class(self, new_related_class):
if new_related_class:
assert(isinstance(new_related_class, class_t))
self._related_class = new_related_class
self._declaration_path = None
self._declaration_path_hash = None
@property
def access(self):
return self._access
@access.setter
def access(self, new_access):
assert(new_access in ACCESS_TYPES.ALL)
self._access = new_access
# TODO: Why is there an access_type / access which are the same ?
@property
def access_type(self):
"""describes :class:`hierarchy type <ACCESS_TYPES>`"""
return self.access
@access_type.setter
def access_type(self, new_access_type):
self.access = new_access_type
# TODO: check whether GCC XML support this and if so parser this
# information
@property
def is_virtual(self):
"""indicates whether the inheritance is virtual or not"""
return self._is_virtual
@is_virtual.setter
def is_virtual(self, new_is_virtual):
self._is_virtual = new_is_virtual
@property
def declaration_path(self):
if self._declaration_path is None:
self._declaration_path = declaration_utils.declaration_path(
self.related_class)
return self._declaration_path
@property
def declaration_path_hash(self):
if self._declaration_path_hash is None:
self._declaration_path_hash = hash(tuple(self.declaration_path))
return self._declaration_path_hash
class class_declaration_t(declaration.declaration_t):
"""describes class declaration"""
def __init__(self, name=''):
"""creates class that describes C++ class declaration
( and not definition )"""
declaration.declaration_t.__init__(self, name)
self._aliases = []
self._container_traits = None # Deprecated
self._container_traits_set = False # Deprecated
self._container_traits_cache = None
def _get__cmp__items(self):
"""implementation details"""
return []
def i_depend_on_them(self, recursive=True):
return []
@property
def aliases(self):
"""List of :class:`aliases <typedef_t>` to this instance"""
return self._aliases
@aliases.setter
def aliases(self, new_aliases):
self._aliases = new_aliases
@property
def container_traits(self):
"""reference to :class:`container_traits_impl_t` or None"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The container_traits attribute is deprecated. \n" +
"Please use the find_container_traits function from the"
"declarations module instead.",
DeprecationWarning)
if self._container_traits_set is False:
from . import container_traits # prevent cyclic dependencies
self._container_traits_set = True
self._container_traits = container_traits.find_container_traits(
self)
self._container_traits_cache = self._container_traits
return self._container_traits
def _get_partial_name_impl(self):
return get_partial_name(self.name)
class class_t(scopedef.scopedef_t):
"""describes class definition"""
# Can be set from outside
USE_DEMANGLED_AS_NAME = True
def __init__(
self,
name='',
class_type=CLASS_TYPES.CLASS,
is_abstract=False):
"""creates class that describes C++ class definition"""
scopedef.scopedef_t.__init__(self, name)
if class_type:
assert(class_type in CLASS_TYPES.ALL)
self._class_type = class_type
self._bases = []
self._derived = []
self._is_abstract = is_abstract
self._public_members = []
self._private_members = []
self._protected_members = []
self._aliases = []
self._byte_size = 0
self._byte_align = 0
self._container_traits_cache = None
self._container_traits = None # Deprecated
self._container_traits_set = False # Deprecated
self._recursive_bases = None
self._recursive_derived = None
self._use_demangled_as_name = False
@property
def use_demangled_as_name(self):
if "GCC" in utils.xml_generator:
return class_t.USE_DEMANGLED_AS_NAME
elif "CastXML" in utils.xml_generator:
return False
@use_demangled_as_name.setter
def use_demangled_as_name(self, use_demangled_as_name):
self._use_demangled_as_name = use_demangled_as_name
def _get_name_impl(self):
if not self._name: # class with empty name
return self._name
elif self.use_demangled_as_name and self.demangled:
if not self.cache.demangled_name:
fname = declaration_utils.full_name(self.parent)
if fname.startswith('::') and \
not self.demangled.startswith('::'):
fname = fname[2:]
if self.demangled.startswith(fname):
tmp = self.demangled[len(fname):] # demangled::name
if tmp.startswith('::'):
tmp = tmp[2:]
if '<' not in tmp and '<' in self._name:
# we have template class, but for some reason demangled
# name doesn't contain any template
# This happens for std::string class, but this breaks
# other cases, because this behaviour is not consistent
self.cache.demangled_name = self._name
return self.cache.demangled_name
else:
self.cache.demangled_name = tmp
return tmp
else:
self.cache.demangled_name = self._name
return self._name
else:
return self.cache.demangled_name
else:
return self._name
def __str__(self):
name = declaration_utils.full_name(self)
if name[:2] == "::":
name = name[2:]
return "%s [%s]" % (name, self.class_type)
def _get__cmp__scope_items(self):
"""implementation details"""
return [
self.class_type,
[declaration_utils.declaration_path(base.related_class) for
base in self.bases].sort(),
[declaration_utils.declaration_path(derive.related_class) for
derive in self.derived].sort(),
self.is_abstract,
self.public_members.sort(),
self.private_members.sort(),
self.protected_members.sort()]
def __eq__(self, other):
if not scopedef.scopedef_t.__eq__(self, other):
return False
return self.class_type == other.class_type \
and [declaration_utils.declaration_path(base.related_class) for
base in self.bases].sort() \
== [declaration_utils.declaration_path(base.related_class) for
base in other.bases].sort() \
and [declaration_utils.declaration_path(derive.related_class) for
derive in self.derived].sort() \
== [declaration_utils.declaration_path(derive.related_class) for
derive in other.derived].sort() \
and self.is_abstract == other.is_abstract \
and self.public_members.sort() \
== other.public_members.sort() \
and self.private_members.sort() \
== other.private_members.sort() \
and self.protected_members.sort() \
== other.protected_members.sort()
def __hash__(self):
return hash(self.class_type)
@property
def class_type(self):
"""describes class :class:`type <CLASS_TYPES>`"""
return self._class_type
@class_type.setter
def class_type(self, new_class_type):
if new_class_type:
assert(new_class_type in CLASS_TYPES.ALL)
self._class_type = new_class_type
@property
def bases(self):
"""list of :class:`base classes <hierarchy_info_t>`"""
return self._bases
@bases.setter
def bases(self, new_bases):
self._bases = new_bases
@property
def recursive_bases(self):
"""list of all :class:`base classes <hierarchy_info_t>`"""
if self._recursive_bases is None:
to_go = self.bases[:]
all_bases = []
while to_go:
base = to_go.pop()
if base not in all_bases:
all_bases.append(base)
to_go.extend(base.related_class.bases)
self._recursive_bases = all_bases
return self._recursive_bases
@property
def derived(self):
"""list of :class:`derived classes <hierarchy_info_t>`"""
return self._derived
@derived.setter
def derived(self, new_derived):
self._derived = new_derived
@property
def recursive_derived(self):
"""list of all :class:`derive classes <hierarchy_info_t>`"""
if self._recursive_derived is None:
to_go = self.derived[:]
all_derived = []
while to_go:
derive = to_go.pop()
if derive not in all_derived:
all_derived.append(derive)
to_go.extend(derive.related_class.derived)
self._recursive_derived = all_derived
return self._recursive_derived
@property
def is_abstract(self):
"""describes whether class abstract or not"""
return self._is_abstract
@is_abstract.setter
def is_abstract(self, is_abstract):
self._is_abstract = is_abstract
@property
def public_members(self):
"""list of all public :class:`members <declarationt_>`"""
return self._public_members
@public_members.setter
def public_members(self, new_public_members):
self._public_members = new_public_members
@property
def private_members(self):
"""list of all private :class:`members <declarationt_>`"""
return self._private_members
@private_members.setter
def private_members(self, new_private_members):
self._private_members = new_private_members
@property
def protected_members(self):
"""list of all protected :class:`members <declarationt_>`"""
return self._protected_members
@protected_members.setter
def protected_members(self, new_protected_members):
self._protected_members = new_protected_members
@property
def aliases(self):
"""List of :class:`aliases <typedef_t>` to this instance"""
return self._aliases
@aliases.setter
def aliases(self, new_aliases):
self._aliases = new_aliases
@property
def byte_size(self):
"""Size of this class in bytes @type: int"""
return self._byte_size
@byte_size.setter
def byte_size(self, new_byte_size):
self._byte_size = new_byte_size
@property
def byte_align(self):
"""Alignment of this class in bytes @type: int"""
return self._byte_align
@byte_align.setter
def byte_align(self, new_byte_align):
self._byte_align = new_byte_align
def _get_declarations_impl(self):
return self.get_members()
def get_members(self, access=None):
"""
returns list of members according to access type
If access equals to None, then returned list will contain all members.
You should not modify the list content, otherwise different
optimization data will stop work and may to give you wrong results.
:param access: describes desired members
:type access: :class:ACCESS_TYPES
:rtype: [ members ]
"""
if access == ACCESS_TYPES.PUBLIC:
return self.public_members
elif access == ACCESS_TYPES.PROTECTED:
return self.protected_members
elif access == ACCESS_TYPES.PRIVATE:
return self.private_members
else:
all_members = []
all_members.extend(self.public_members)
all_members.extend(self.protected_members)
all_members.extend(self.private_members)
return all_members
def adopt_declaration(self, decl, access):
"""adds new declaration to the class
:param decl: reference to a :class:`declaration_t`
:param access: member access type
:type access: :class:ACCESS_TYPES
"""
if access == ACCESS_TYPES.PUBLIC:
self.public_members.append(decl)
elif access == ACCESS_TYPES.PROTECTED:
self.protected_members.append(decl)
elif access == ACCESS_TYPES.PRIVATE:
self.private_members.append(decl)
else:
raise RuntimeError("Invalid access type: %s." % access)
decl.parent = self
decl.cache.reset()
decl.cache.access_type = access
def remove_declaration(self, decl):
"""
removes decl from members list
:param decl: declaration to be removed
:type decl: :class:`declaration_t`
"""
access_type = self.find_out_member_access_type(decl)
if access_type == ACCESS_TYPES.PUBLIC:
container = self.public_members
elif access_type == ACCESS_TYPES.PROTECTED:
container = self.protected_members
else: # decl.cache.access_type == ACCESS_TYPES.PRVATE
container = self.private_members
del container[container.index(decl)]
decl.cache.reset()
def find_out_member_access_type(self, member):
"""
returns member access type
:param member: member of the class
:type member: :class:`declaration_t`
:rtype: :class:ACCESS_TYPES
"""
assert member.parent is self
if not member.cache.access_type:
if member in self.public_members:
access_type = ACCESS_TYPES.PUBLIC
elif member in self.protected_members:
access_type = ACCESS_TYPES.PROTECTED
elif member in self.private_members:
access_type = ACCESS_TYPES.PRIVATE
else:
raise RuntimeError(
"Unable to find member within internal members list.")
member.cache.access_type = access_type
return access_type
else:
return member.cache.access_type
def __find_out_member_dependencies(self, access_type):
members = self.get_members(access_type)
answer = []
for mem in members:
answer.extend(mem.i_depend_on_them(recursive=True))
member_ids = set([id(m) for m in members])
for dependency in answer:
if id(dependency.declaration) in member_ids:
dependency.access_type = access_type
return answer
def i_depend_on_them(self, recursive=True):
answer = []
for base in self.bases:
answer.append(
dependency_info_t(
self,
base.related_class,
base.access_type,
"base class"))
if recursive:
for access_type in ACCESS_TYPES.ALL:
answer.extend(self.__find_out_member_dependencies(access_type))
return answer
@property
def container_traits(self):
"""reference to :class:`container_traits_impl_t` or None"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The container_traits attribute is deprecated. \n" +
"Please use the find_container_traits function from the"
"declarations module instead.",
DeprecationWarning)
if self._container_traits_set is False:
from . import container_traits # prevent cyclic dependencies
self._container_traits_set = True
self._container_traits = container_traits.find_container_traits(
self)
self._container_traits_cache = self.container_traits
return self._container_traits
def find_copy_constructor(self):
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The find_copy_constructor method is deprecated. \n" +
"Please use the find_copy_constructor function from the"
"declarations module instead.",
DeprecationWarning)
from . import type_traits_classes # prevent cyclic dependencies
return type_traits_classes.find_copy_constructor(self)
def find_trivial_constructor(self):
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The find_trivial_constructor method is deprecated. \n" +
"Please use the find_trivial_constructor function from the"
"declarations module instead.",
DeprecationWarning)
from . import type_traits_classes # prevent cyclic dependencies
return type_traits_classes.find_trivial_constructor(self)
def _get_partial_name_impl(self):
from . import type_traits # prevent cyclic dependencies
if type_traits.is_std_string(self):
return 'string'
elif type_traits.is_std_wstring(self):
return 'wstring'
else:
return get_partial_name(self.name)
def find_noncopyable_vars(self):
"""returns list of all `noncopyable` variables"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The find_noncopyable_vars method is deprecated. \n" +
"Please use the find_noncopyable_vars function from the"
"declarations module instead.",
DeprecationWarning)
from . import type_traits_classes # prevent cyclic dependencies
type_traits_classes.find_noncopyable_vars(self)
@property
def has_vtable(self):
"""True, if class has virtual table, False otherwise"""
# Deprecated since 1.8.0. Will be removed in 1.9.0
warnings.warn(
"The has_vtable argument is deprecated. \n" +
"Please use the has_vtable function from the declarations \n" +
"module instead.",
DeprecationWarning)
# prevent cyclic import
from . import type_traits_classes
return type_traits_classes.has_vtable(self)
@property
def top_class(self):
"""reference to a parent class, which contains this class and defined
within a namespace
if this class is defined under a namespace, self will be returned"""
curr = self
parent = self.parent
while isinstance(parent, class_t):
curr = parent
parent = parent.parent
return curr
class_types = (class_t, class_declaration_t)
class impl_details(object):
@staticmethod
def dig_declarations(depend_on_it):
# FIXME: prevent cyclic imports
from . import type_traits
if isinstance(depend_on_it, declaration.declaration_t):
return [depend_on_it]
base_type = type_traits.base_type(
type_traits.remove_alias(depend_on_it))
if isinstance(base_type, cpptypes.declarated_t):
return [base_type.declaration]
elif isinstance(base_type, cpptypes.calldef_type_t):
result = []
result.extend(impl_details.dig_declarations(base_type.return_type))
for argtype in base_type.arguments_types:
result.extend(impl_details.dig_declarations(argtype))
if isinstance(base_type, cpptypes.member_function_type_t):
result.extend(
impl_details.dig_declarations(
base_type.class_inst))
return result
return []
class dependency_info_t(object):
def __init__(self, declaration, depend_on_it, access_type=None, hint=None):
object.__init__(self)
assert isinstance(
depend_on_it,
(class_t,
cpptypes.type_t))
self._declaration = declaration
self._depend_on_it = depend_on_it
self._access_type = access_type
self._hint = hint
@property
def declaration(self):
return self._declaration
# short name
decl = declaration
@property
def depend_on_it(self):
return self._depend_on_it
@property
def access_type(self):
return self._access_type
@access_type.setter
def access_type(self, access_type):
self._access_type = access_type
def __str__(self):
return 'declaration "%s" depends( %s ) on "%s" ' \
% (self.declaration, self.access_type, self.depend_on_it)
@property
def hint(self):
"""The declaration, that report dependency can put some additional
inforamtion about dependency. It can be used later"""
return self._hint
def find_out_depend_on_it_declarations(self):
"""If declaration depends on other declaration and not on some type
this function will return reference to it. Otherwise None will be
returned
"""
return impl_details.dig_declarations(self.depend_on_it)
@staticmethod
def i_depend_on_them(decl):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for dependency_info in decl.i_depend_on_them():
for ddecl in dependency_info.find_out_depend_on_it_declarations():
if ddecl:
to_be_included.add(ddecl)
if isinstance(decl.parent, class_t):
to_be_included.add(decl.parent)
return to_be_included
@staticmethod
def we_depend_on_them(decls):
"""Returns set of declarations. every item in the returned set,
depends on a declaration from the input"""
to_be_included = set()
for decl in decls:
to_be_included.update(dependency_info_t.i_depend_on_them(decl))
return to_be_included
| apache-2.0 |
kybriainfotech/iSocioCRM | openerp/tools/sql.py | 455 | 1173 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def drop_view_if_exists(cr, viewname):
cr.execute("DROP view IF EXISTS %s CASCADE" % (viewname,))
cr.commit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Saethlin/astrotools | viewfits.py | 1 | 30427 | #!/usr/bin/env python
"""
A lightweight python-based FITS viewer.
The controls should be self-explatory. If they are not, press ctrl+h for a
stop-gap and submit an issue. Controls ought to be obvious, help me make them.
This is a program for looking at images, not for doing analysis.
TODO:
Doesn't quite display correct pixel's value
HDU dropdown menu
Prevent image value display from jumping around
Investigate source of pointy histogram
Sliders and lines don't quite line up with edges of the plot
"""
import os
import argparse
import bisect
import re
import tkinter as tk
from tkinter import filedialog
from tkinter import font as tkFont
import numpy as np
MYNAME = 'viewfits 0.9.1'
EXTENSIONS = ['fit', 'fits', 'FIT', 'FITS']
THUMBSIZE = 200
HEIGHT = 500
WIDTH = 800
HISTOGRAM_HEIGHT = 50
class Viewer(tk.Frame):
"""
SAMPLE TEXT
"""
def __init__(self, parent, open_file=None):
"""
Initalize everything
"""
tk.Frame.__init__(self, parent)
self.parent = parent
self.parent.title(MYNAME)
# Initalize a master frame that holds everything
self.frame = tk.Frame(self.parent, bg='')
self.frame.pack(fill=tk.BOTH, expand=1)
self.imgframe = tk.Frame(self.frame)
self.imgframe.pack(fill=tk.BOTH, expand=1, side=tk.LEFT, anchor='nw')
# Label for the main image display
self.main_image = tk.Canvas(self.imgframe, bg='black', cursor='tcross')
self.main_image.pack(fill=tk.BOTH, expand=1, anchor='nw')
self.main_image.image = None
self.main_image.photo = None
# Initialize a canvas to hold the histogram image
self.histogram = tk.Canvas(self.imgframe, bg='black',
height=HISTOGRAM_HEIGHT, highlightthickness=0)
self.histogram.pack(fill=tk.X)
self.histogram.image = None
self.histogram.photo = None
# Sliders for white/black clipping
self.sliders = tk.Canvas(self.imgframe, bg='gray', height=10,
highlightthickness=0)
self.sliders.pack(fill=tk.X)
# Initalize a frame to the right of the canvas that holds the minimap,
# and the directory navigation (dirlist)
self.sideframe = tk.Frame(self.frame, width=THUMBSIZE)
self.sideframe.pack(fill=tk.Y, side=tk.RIGHT, anchor='ne')
# Initalize the minimap that shows the entire image, zoomed out
self.mini_label = tk.Label(self.sideframe, width=THUMBSIZE,
height=THUMBSIZE, bg='black')
self.mini_label.pack(side=tk.TOP)
self.mini_label.photo = None
# Add a label to display the cursor location and value:
self.cursor_info = tk.Frame(self.sideframe)
self.cursor_info.pack(fill=tk.X)
self.cursor_position = tk.Label(self.cursor_info, text='Cursor: ?,?')
self.cursor_position.pack(side=tk.LEFT)
self.cursor_value = tk.Label(self.cursor_info, text='Val: ?')
self.cursor_value.pack(side=tk.RIGHT)
# Initalize the directory navigation setup with a listbox and scrollbar
self.scrollbar = tk.Scrollbar(self.sideframe, orient=tk.VERTICAL)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.dirlist = tk.Listbox(self.sideframe, selectmode=tk.SINGLE,
activestyle='none', borderwidth=0,
highlightthickness=0,
yscrollcommand=self.scrollbar.set)
self.dirlist.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.scrollbar.config(command=self.dirlist.yview)
# Add a button to display the image header
self.headerbutton = tk.Button(self.sideframe, text='Display header', command=self.show_header)
self.headerbutton.pack(side=tk.BOTTOM, fill=tk.X)
self.bind_all('<Control-o>', self.open_dialog)
# Controls for navigating the list of current directory contents
self.bind_all('<Right>', self.open_item)
self.bind_all('<Return>', self.open_item)
self.bind_all('<BackSpace>', self.back)
self.bind_all('<Left>', self.back)
self.bind_all('<Up>', self.up)
self.bind_all('<Down>', self.down)
self.bind_all('<Key>', self.move_to_key)
self.dirlist.bind('<<ListboxSelect>>', self.click_list)
self.dirlist.bind('<Double-Button-1>', self.open_item)
self.bind_all('<Escape>', quit)
self.parent.protocol("WM_DELETE_WINDOW", self.parent.quit)
self.bind_all('<Control-h>', self.show_help)
# Defaults
self.save_dir = os.getcwd()
self.savename = ''
self.filename = ''
self.files = []
self.fileindex = 0
self.selection = 0
self.imagedata = None
self.fitted = False
self.zoom = 1.
self.ypos, self.xpos = 0., 0.
self.last_y, self.last_x = 0., 0.
self.last_dims = 0., 0.
self.last_width = 0
self.black_level = 0
self.white_level = 0
self.help_window = None
self.header_window = None
self.header_text = None
self.h, self.w = 0, 0
self.updating = False
self.mini_label.photo = ImageTk.PhotoImage(Image.fromarray(np.zeros((THUMBSIZE, THUMBSIZE))))
self.mini_label.config(image=self.mini_label.photo)
self.main_image.photo = ImageTk.PhotoImage(Image.fromarray(np.zeros((HEIGHT-HISTOGRAM_HEIGHT, WIDTH-THUMBSIZE))))
self.main_image.itemconfig(self.main_image.image,
image=self.main_image.photo)
self.main_image.config(bg='#f4f4f4')
self.mini_label.config(bg='#f4f4f4')
self.refresh_dirlist(repeat=True)
if open_file is not None:
self.load_image(open_file)
def keybindings(self):
"""
Assign all keybindings that cause exceptions without an image
"""
self.bind_all('<Configure>', self.on_resize)
self.bind_all('<Control-r>', self.renew_scaling)
self.bind_all('<Control-f>', self.zoom_to_fit)
self.bind_all('<Control-s>', self.save_image)
self.bind_all('<MouseWheel>', self.mousewheel_windows)
self.bind_all('<Button-4>', self.mousewheelup_linux)
self.bind_all('<Button-4>', self.mousewheelup_linux)
self.bind_all('<Button-5>', self.mousewheeldown_linux)
self.bind_all('<Button-5>', self.mousewheeldown_linux)
self.bind_all('=', self.mousewheelup_linux)
self.mini_label.bind('<Button-1>', self.click_thumbnail)
self.mini_label.bind('<B1-Motion>', self.click_thumbnail)
self.main_image.bind('<Button-1>', self.click_image)
self.main_image.bind('<B1-Motion>', self.move_image)
self.main_image.bind('<ButtonRelease-1>', self.release_image)
self.main_image.bind('<Motion>', self.update_cursor_info)
self.sliders.bind('<Button-1>', self.click_slider)
self.sliders.bind('<B1-Motion>', self.move_slider)
def update_cursor_info(self, event):
"""
Display the cursor location and image value at that location
"""
y = int(round(self.ypos + event.y/self.zoom - 1))
x = int(round(self.xpos + event.x/self.zoom - 1))
if y < self.imagedata.shape[0] and x < self.imagedata.shape[1]:
self.cursor_position.configure(text='Cursor: '+str(y)+', '+str(x))
self.cursor_value.configure(text='Val: '+str(round(self.imagedata[y, x], 1)))
# This needs a rework to accomodate full name typing
def move_to_key(self, event):
"""
Select the first entry in dirlist that matches a key press
"""
if self.selection < len(self.files)-1 and (
self.files[self.selection][0].lower() == event.char) and (
self.files[self.selection+1][0].lower() == event.char):
self.down(None)
else:
for f in range(len(self.files)):
if self.files[f][0].lower() == event.char:
self.selection = f
self.dirlist.selection_clear(0, tk.END)
self.dirlist.selection_set(self.selection)
self.dirlist.see(self.selection)
return
if f == len(self.files)-1:
return
def mousewheel_windows(self, event):
"""
Zoom in or out on a windows os, if possible
"""
if self.fitted:
if event.delta < 0:
self.zoom = 2.**np.floor(np.log2(self.zoom))
else:
self.zoom = 2.**np.ceil(np.log2(self.zoom))
self.fitted = False
elif event.delta < 0 and self.zoom > 1/8:
self.zoom /= 2
self.ypos -= self.h/2
self.xpos -= self.w/2
elif event.delta > 0 and self.zoom < 8:
self.zoom *= 2
self.ypos += self.h/4
self.xpos += self.w/4
self.redraw_image()
self.redraw_minimap()
def mousewheelup_linux(self, event):
"""
Zoom in, if possible
"""
if self.zoom < 16:
if self.fitted:
self.fitted = False
self.zoom = 2.**np.floor(np.log2(self.zoom))
self.zoom *= 2
self.ypos += self.h/4
self.xpos += self.w/4
self.redraw_image()
self.redraw_minimap()
def mousewheeldown_linux(self, event):
"""
Zoom out, if possible
"""
if self.zoom > 1/16:
if self.fitted:
self.fitted = False
self.zoom = 2.**np.ceil(np.log2(self.zoom))
self.zoom /= 2
self.xpos -= self.h/2
self.ypos -= self.w/2
self.redraw_image()
self.redraw_minimap()
def zoom_to_fit(self, event):
"""
Adjust zoom to fit the entire image in the current window
"""
self.zoom = min(self.main_image.winfo_height()/self.imagedata.shape[0],
self.main_image.winfo_width()/self.imagedata.shape[1])
self.fitted = True
self.redraw_image()
self.redraw_minimap()
def click_thumbnail(self, event):
"""
Center view on a location clicked in the minimap
"""
self.ypos = event.y / self.minidata.shape[0] * \
self.imagedata.shape[0]-self.h/2
self.xpos = event.x / self.minidata.shape[1] * \
self.imagedata.shape[1]-self.w/2
self.redraw_image()
self.redraw_minimap()
# These three functions enable click-and-drag motion of an image
def click_image(self, event):
"""
Keep track of the current cursor position
"""
self.last_y = event.y
self.last_x = event.x
def move_image(self, event):
"""
Move the image view position
"""
last_ypos = self.ypos
last_xpos = self.xpos
self.ypos += (self.last_y-event.y)/self.zoom
self.xpos += (self.last_x-event.x)/self.zoom
self.last_y = event.y
self.last_x = event.x
self.check_view()
moved = (last_ypos != self.ypos) or (last_xpos != self.xpos)
if moved:
self.redraw_image()
self.redraw_minimap()
def release_image(self, event):
"""
Keep track of the image view position for next click-and-drag
"""
self.last_y = self.ypos+self.h/2
self.last_x = self.xpos+self.w/2
# open_item, up, down, back, click_list handle interaction with dirlist
def open_item(self, event):
"""
Handle opening an entry in the dirlist, display image and cd for dirs
"""
fil = self.files[self.selection]
if fil.rsplit('.', 1)[-1] in EXTENSIONS:
self.load_image(fil)
elif os.path.isdir(fil):
os.chdir(fil)
self.refresh_dirlist()
def up(self, event):
"""
Select the item above the current selection in the dirlist
"""
if self.selection > 0:
self.selection -= 1
self.dirlist.selection_clear(0, tk.END)
self.dirlist.selection_set(self.selection)
self.dirlist.see(self.selection)
def down(self, event):
"""
Select the item below the current selection in the dirlist
"""
if self.selection < len(self.files)-1:
self.selection += 1
self.dirlist.selection_clear(0, tk.END)
self.dirlist.selection_set(self.selection)
self.dirlist.see(self.selection)
def back(self, event):
"""
Back up a directory
"""
os.chdir('..')
self.refresh_dirlist()
def click_list(self, event):
"""
Highlight the currently selected item in the directory list
"""
self.selection = self.dirlist.curselection()[0]
self.dirlist.selection_clear(0, tk.END)
self.dirlist.selection_set(self.selection)
self.dirlist.activate(self.selection)
def reload_dirlist(self):
"""
Update the dirlist to the contents of the current directory
"""
try:
new_files = [f for f in os.listdir('.') if (
f.rsplit('.', 1)[-1] in EXTENSIONS) or (
os.path.isdir(f) and os.access(f, os.R_OK)) and
not f.startswith('.')]
new_files.sort(key=str.lower)
new_files.append('..')
removals = [f for f in self.files if f not in new_files]
additions = [f for f in new_files if f not in self.files]
for fil in removals:
remove_index = self.files.index(fil)
self.dirlist.delete(remove_index)
self.files.remove(fil)
for fil in additions:
insert_index = bisect.bisect(self.files, fil)
if insert_index == len(self.files):
insert_index -= 1
self.files.insert(insert_index, fil)
self.dirlist.insert(insert_index, fil)
except WindowsError:
pass
finally:
self.parent.after(500, self.reload_dirlist)
def refresh_dirlist(self, repeat=False):
"""
Display entries in the current directory in the directory list
"""
self.dirlist.delete(0, tk.END)
self.files = [f for f in os.listdir('.') if (
f.rsplit('.', 1)[-1] in EXTENSIONS) or (
os.path.isdir(f) and os.access(f, os.R_OK)) and
not f.startswith('.')]
self.files.sort(key=str.lower)
self.files.append('..')
for f in self.files:
self.dirlist.insert(tk.END, f)
self.selection = 0
self.dirlist.selection_clear(0, tk.END)
self.dirlist.selection_set(0)
if repeat:
self.parent.after(500, self.reload_dirlist)
def open_dialog(self, event):
self.filename = filedialog.askopenfilename(
filetypes=[('FITS files', '*.fit;*.fits;*.FIT;*.FITS'),
('all files', '*')],
initialdir=os.getcwd())
if self.filename not in ('', ()):
os.chdir(os.path.dirname(self.filename))
self.load_image(self.filename)
else:
return
def load_image(self, filename):
"""
Read an image and make sure the display and interface are initalized
"""
if not os.path.isabs(filename):
self.filename = os.path.join(os.getcwd(), filename)
else:
self.filename = filename
# Set backgrounds to the same gray as the default frame background
self.main_image.config(bg='#f4f4f4')
self.mini_label.config(bg='#f4f4f4')
# Load image data and set defaults
temp_hdu = fits.open(self.filename)[0]
temp_data = temp_hdu.data
if temp_data is None or temp_data.ndim != 2:
raise IOError('Invalid fits file')
self.header_text = str(temp_hdu.header).strip()
print(self.header_text)
self.header_text = re.sub("(.{80})", "\\1\n", self.header_text, 0, re.DOTALL).strip()
print(self.header_text)
self.imagedata = temp_data.astype(float)
self.black_level = np.percentile(self.imagedata.ravel()[::100], 10.)
self.white_level = np.percentile(self.imagedata.ravel()[::100], 99.9)
self.zoom = 1.
self.ypos = 0
self.xpos = 0
self.check_view()
self.last_dims = (self.w, self.h)
self.last_width = self.w
self.last_y = self.ypos+self.h//2
self.last_x = self.xpos+self.w//2
# Generate a default save name
self.savename = os.path.basename(self.filename).rsplit('.', 1)[0]+'.png'
# Display the filename of the current image in the title bar
self.parent.title(MYNAME+' ('+self.filename+')')
# Build the histogram image
self.make_histogram_fig()
# Configure the image display frame and canvases
if self.histogram.image is None:
self.histogram.image = self.histogram.create_image(0, 0, image=None, anchor='nw')
self.main_image.image = self.main_image.create_image(0, 0, image=None, anchor='nw')
self.xpos = (self.imagedata.shape[1]-self.w)//2
self.ypos = (self.imagedata.shape[0]-self.h)//2
self.clip_image()
self.redraw_image()
self.redraw_minimap()
self.redraw_histogram()
# Make sure keybindings are initalized
self.keybindings()
def renew_scaling(self, event):
"""
Set a reasonable white and black clipping level based on percentile
"""
self.black_level = np.percentile(self.imagedata, 10.)
self.white_level = np.percentile(self.imagedata, 99.9)
self.clip_image()
self.redraw_image()
self.redraw_minimap()
self.redraw_histogram()
def check_view(self):
"""
Check bounds on view position and compute the view height and width
"""
# Compute view height and width
self.h = int((self.main_image.winfo_height()-2)/self.zoom)
self.w = int((self.main_image.winfo_width()-2)/self.zoom)
# Prevent overscrolling
if self.ypos < 0:
self.ypos = 0
if self.ypos + self.h > self.imagedata.shape[0]:
self.ypos = self.imagedata.shape[0]-self.h
if self.xpos < 0:
self.xpos = 0
if self.xpos + self.w > self.imagedata.shape[1]:
self.xpos = self.imagedata.shape[1]-self.w
# Check for oversized window
if self.h >= self.imagedata.shape[0]:
self.ypos = 0
self.h = self.imagedata.shape[0]
if self.w >= self.imagedata.shape[1]:
self.xpos = 0
self.w = self.imagedata.shape[1]
def on_resize(self, event):
"""
Recompute the image and minimap display.
"""
self.check_view()
# If triggered by a configure event, make sure to only redraw what
# needs to be redrawn
if (self.last_dims[0] != self.w) or (self.last_dims[1] != self.h):
self.redraw_image()
self.redraw_minimap()
self.redraw_histogram()
elif self.last_width != self.histogram.winfo_width():
self.redraw_histogram()
self.last_pos = self.ypos, self.xpos
self.last_dims = self.w, self.h
self.last_width = self.histogram.winfo_width()
def clip_image(self):
"""
Re-clip the currently displayed image
"""
self.clipped = self.imagedata.clip(self.black_level, self.white_level)
self.clipped -= self.black_level
self.clipped *= 255/(self.white_level-self.black_level)
self.clipped = self.clipped.astype(np.uint8, copy=True)
# Rebuild the data used to draw the minimap
mini_zoom = min(THUMBSIZE/self.clipped.shape[0],
THUMBSIZE/self.clipped.shape[1])
mini_shape = (np.array(self.clipped.shape[::-1]) * mini_zoom
).astype(int)
self.clipped = Image.fromarray(self.clipped)
mini = self.clipped.resize(mini_shape, Image.NEAREST)
self.minidata = np.dstack(3*(mini,))
def redraw_image(self):
"""
Re-render only the currently displayed image for canvas
"""
self.check_view()
# Crop the image to the displayed section
crop_region = (int(self.xpos), int(self.ypos), int(self.xpos+self.w),
int(self.ypos+self.h))
imgslice = self.clipped.crop(crop_region)
imgslice.load()
newsize = tuple([int(self.zoom*x) for x in imgslice.size])
resized = imgslice.resize(newsize, Image.NEAREST)
self.main_image.photo = ImageTk.PhotoImage(resized)
self.main_image.itemconfig(self.main_image.image,
image=self.main_image.photo)
def redraw_minimap(self):
"""
Re-render only the minimap
"""
mod = self.minidata.copy()
# Draw the minimap with a green square at the bounds of the view
top = int(self.ypos/self.imagedata.shape[0]*self.minidata.shape[0])
if top < 0:
top = 0
bot = int((self.ypos-1+self.h) / self.imagedata.shape[0] *
self.minidata.shape[0])
if bot > THUMBSIZE-2:
bot = THUMBSIZE-2
lef = int(self.xpos/self.imagedata.shape[1]*self.minidata.shape[1])
if lef < 0:
lef = 0
rig = int((self.xpos-1+self.w) / self.imagedata.shape[1] *
self.minidata.shape[1])
if rig > THUMBSIZE-2:
rig = THUMBSIZE-2
mod[top, lef:rig+1, 1] = 255
mod[bot, lef:rig+1, 1] = 255
mod[top:bot+1, lef, 1] = 255
mod[top:bot+1, rig, 1] = 255
self.mini_label.photo = ImageTk.PhotoImage(Image.fromarray(mod))
self.mini_label.config(image=self.mini_label.photo)
def click_slider(self, event):
"""
Note the current slider position and which was grabbed, prefer white
"""
if abs(self.white_x - event.x) < 5:
self.start_white_x = event.x
self.grabbed = 'white'
elif abs(self.black_x - event.x) < 5:
self.start_black_x = event.x
self.grabbed = 'black'
else:
self.grabbed = None
def move_slider(self, event):
"""
Change clipping based on cursor x position change, update live
"""
xmin, xmax = self.databounds
# Convert shift to a value in pixel brightness
if self.grabbed == 'white':
shift = self.start_white_x - event.x
self.white_level += shift/self.histogram.winfo_width()*(xmin-xmax)
self.start_white_x = event.x
# Prevent slider overlap
if self.white_level <= self.black_level:
self.white_level = self.black_level+1
self.start_white_x = self.black_x
# Prevent slider running off display
if self.white_level > xmax:
self.white_level = xmax
self.start_white_x = self.histogram.winfo_width()
elif self.grabbed == 'black':
shift = self.start_black_x - event.x
self.black_level += shift/self.histogram.winfo_width()*(xmin-xmax)
self.start_black_x = event.x
if self.black_level >= self.white_level:
self.black_level = self.white_level-1
self.start_black_x = self.white_x
if self.black_level < xmin:
self.black_level = xmin
self.start_black_x = 0
self.clip_image()
self.redraw_histogram()
self.redraw_image()
self.redraw_minimap()
def redraw_histogram(self):
"""
Re-render the histogram and the white/black clipping lines
"""
xmin, xmax = self.databounds
hist_resized = self.hist_full.resize((self.main_image.winfo_width(),
HISTOGRAM_HEIGHT), Image.NEAREST)
self.histogram.photo = ImageTk.PhotoImage(hist_resized)
self.histogram.itemconfig(self.histogram.image,
image=self.histogram.photo)
# Draw sliders
self.sliders.delete('all')
self.black_x = (self.black_level-xmin)/(xmax-xmin) * \
self.histogram.winfo_width()
self.sliders.create_line(self.black_x, -1, self.black_x, 12,
arrow=tk.FIRST, arrowshape=(11, 10, 4))
self.white_x = (self.white_level-xmin)/(xmax-xmin) * \
self.histogram.winfo_width()
self.sliders.create_line(self.white_x, -1, self.white_x, 10,
arrow=tk.FIRST, arrowshape=(11, 10, 4),
fill='white')
# Slider lines
self.histogram.delete('bline','wline')
self.histogram.create_line(self.black_x, 0, self.black_x, 50,
fill='blue', tag='bline')
self.histogram.create_line(self.white_x, 0, self.white_x, 50,
fill='blue', tag='wline')
def make_histogram_fig(self):
"""
Plot a histogram of the image data with axes scaled to enhance features
"""
data = self.imagedata.ravel()
# Clipping data makes the histogram look nice but the sliders useless, so just clip the histogram
lower_bound, upper_bound = np.percentile(data[::100], [0.01, 99.95])
self.databounds = lower_bound, upper_bound
mask = (data > lower_bound) & (data < upper_bound)
data = data[mask]
# Rescale data
data -= data.min()
data /= data.max()
data *= self.parent.winfo_screenwidth()
histogram = np.bincount(data.astype(int))[:-1]
left = np.roll(histogram, -1)
right = np.roll(histogram, 1)
peak_mask = (histogram > left) & (histogram > right) & (left > 0) & (right > 0)
histogram[peak_mask] = ((left + right)/2)[peak_mask]
histogram = histogram / histogram.max() * HISTOGRAM_HEIGHT
# Manual plotting
coords = np.arange(0, HISTOGRAM_HEIGHT)[::-1]
coords = np.repeat(coords[:, np.newaxis], self.parent.winfo_screenwidth(), axis=1)
histogram = coords > histogram[np.newaxis, :]
histogram = (histogram * 255).astype(np.uint8)
histogram = np.repeat(histogram[:, :, np.newaxis], 3, axis=2)
self.hist_full = Image.fromarray(histogram)
def save_image(self, event):
"""
Open a file dialog and save the image as currently displayed.
"""
path = filedialog.asksaveasfilename(defaultextension='.png',
initialdir=self.save_dir,
initialfile=self.savename)
# Update defaults if a selection was made
if path != '':
self.savename = os.path.basename(path)
self.save_dir = os.path.dirname(path)
self.main_image.photo.save(path)
def show_header(self, *args):
if (self.header_window is None) and (self.header_text is not None):
self.header_window = tk.Toplevel()
self.header_window.title(os.path.basename(self.filename) + ' header')
self.header_window.resizable(0, 0)
self.header_message = tk.Message(self.header_window, text=self.header_text, font=tkFont.Font(family='Courier', size=12))
self.header_message.pack()
self.parent.bind_all('<Escape>', self.close_header)
self.header_window.protocol("WM_DELETE_WINDOW", self.close_header)
def close_header(self, *args):
self.parent.bind_all('<Escape>', quit)
self.header_window.destroy()
self.header_window = None
def close_help(self, *args):
"""
Remove the help window
"""
self.parent.bind_all('<Escape>', quit)
self.help_window.destroy()
self.help_window = None
# Add a scrollbar and allow resize
def show_help(self, event):
"""
Display a help window with all keybindings
"""
if self.help_window is None:
self.help_window = tk.Toplevel()
self.help_window.title('viewfits help')
self.help_window.resizable(0, 0)
commands = ['open a file with open file dialog',
'save the currently displayed image',
'auto-adjust white and black levels for current image',
'zoom to fit current window size',
'display the help/commands dialog',
'adjust zoom level',
'nagivate current directory',
'open file or change directory to current selection',
'back up to parent directory',
'quit']
keys = ['ctrl + o', 'ctrl + s', 'ctrl + r', 'ctrl + f', 'ctrl + h',
'mousewheel', 'up/down', 'right/enter',
'left/backspace', 'Esc']
self.keys = tk.Message(self.help_window, text='\n\n'.join(keys))
self.keys.pack(side=tk.RIGHT)
self.commands = tk.Message(self.help_window,
text='\n\n'.join(commands), width=400)
self.commands.pack(side=tk.LEFT)
self.parent.bind_all('<Escape>', self.close_help)
self.help_window.protocol("WM_DELETE_WINDOW", self.close_help)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('filename', nargs='?',
help='filename or path to file to display')
args = parser.parse_args()
root = tk.Tk()
root.geometry(str(WIDTH)+'x'+str(HEIGHT))
Viewer(root, args.filename)
root.mainloop()
if __name__ == "__main__":
main()
| mit |
lagopus/ryu-lagopus-ext | ryu/services/protocols/bgp/api/base.py | 4 | 7231 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Public API for BGPSpeaker.
This API can be used by various services like RPC, CLI, IoC, etc.
"""
from __future__ import absolute_import
import logging
import traceback
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import API_ERROR_CODE
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp.rtconf.base import get_validator
from ryu.services.protocols.bgp.rtconf.base import MissingRequiredConf
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
LOG = logging.getLogger('bgpspeaker.api.base')
# Various constants used in API calls
ROUTE_DISTINGUISHER = 'route_dist'
PREFIX = 'prefix'
NEXT_HOP = 'next_hop'
VPN_LABEL = 'label'
API_SYM = 'name'
ORIGIN_RD = 'origin_rd'
ROUTE_FAMILY = 'route_family'
EVPN_ROUTE_TYPE = 'route_type'
EVPN_ESI = 'esi'
EVPN_ETHERNET_TAG_ID = 'ethernet_tag_id'
REDUNDANCY_MODE = 'redundancy_mode'
MAC_ADDR = 'mac_addr'
IP_ADDR = 'ip_addr'
IP_PREFIX = 'ip_prefix'
GW_IP_ADDR = 'gw_ip_addr'
MPLS_LABELS = 'mpls_labels'
TUNNEL_TYPE = 'tunnel_type'
EVPN_VNI = 'vni'
PMSI_TUNNEL_TYPE = 'pmsi_tunnel_type'
FLOWSPEC_FAMILY = 'flowspec_family'
FLOWSPEC_RULES = 'rules'
FLOWSPEC_ACTIONS = 'actions'
# API call registry
_CALL_REGISTRY = {}
@add_bgp_error_metadata(code=API_ERROR_CODE,
sub_code=1,
def_desc='Unknown API error.')
class ApiException(BGPSException):
pass
@add_bgp_error_metadata(code=API_ERROR_CODE,
sub_code=2,
def_desc='API symbol or method is not known.')
class MethodNotFound(ApiException):
pass
@add_bgp_error_metadata(code=API_ERROR_CODE,
sub_code=3,
def_desc='Error related to BGPS core not starting.')
class CoreNotStarted(ApiException):
pass
def register(**kwargs):
"""Decorator for registering API function.
Does not do any check or validation.
"""
def decorator(func):
_CALL_REGISTRY[kwargs.get(API_SYM, func.__name__)] = func
return func
return decorator
class RegisterWithArgChecks(object):
"""Decorator for registering API functions.
Does some argument checking and validation of required arguments.
"""
def __init__(self, name, req_args=None, opt_args=None):
self._name = name
if not req_args:
req_args = []
self._req_args = req_args
if not opt_args:
opt_args = []
self._opt_args = opt_args
self._all_args = (set(self._req_args) | set(self._opt_args))
def __call__(self, func):
"""Wraps given function and registers it as API.
Returns original function.
"""
def wrapped_fun(**kwargs):
"""Wraps a function to do validation before calling actual func.
Wraps a function to take key-value args. only. Checks if:
1) all required argument of wrapped function are provided
2) no extra/un-known arguments are passed
3) checks if validator for required arguments is available
4) validates required arguments
5) if validator for optional arguments is registered,
validates optional arguments.
Raises exception if no validator can be found for required args.
"""
# Check if we are missing arguments.
if not kwargs and len(self._req_args) > 0:
raise MissingRequiredConf(desc='Missing all required '
'attributes.')
# Check if we have unknown arguments.
given_args = set(kwargs.keys())
unknown_attrs = given_args - set(self._all_args)
if unknown_attrs:
raise RuntimeConfigError(desc=('Unknown attributes %r' %
unknown_attrs))
# Check if required arguments are missing
missing_req_args = set(self._req_args) - given_args
if missing_req_args:
conf_name = ', '.join(missing_req_args)
raise MissingRequiredConf(conf_name=conf_name)
#
# Prepare to call wrapped function.
#
# Collect required arguments in the order asked and validate it.
req_values = []
for req_arg in self._req_args:
req_value = kwargs.get(req_arg)
# Validate required value.
validator = get_validator(req_arg)
if not validator:
raise ValueError('No validator registered for function=%s'
' and arg=%s' % (func, req_arg))
validator(req_value)
req_values.append(req_value)
# Collect optional arguments.
opt_items = {}
for opt_arg, opt_value in kwargs.items():
if opt_arg in self._opt_args:
# Validate optional value.
# Note: If no validator registered for optional value,
# skips validation.
validator = get_validator(opt_arg)
if validator:
validator(opt_value)
opt_items[opt_arg] = opt_value
# Call actual function
return func(*req_values, **opt_items)
# Register wrapped function
_CALL_REGISTRY[self._name] = wrapped_fun
return func
def is_call_registered(call_name):
return call_name in _CALL_REGISTRY
def get_call(call_name):
return _CALL_REGISTRY.get(call_name)
def call(symbol, **kwargs):
"""Calls/executes BGPS public API identified by given symbol and passes
given kwargs as param.
"""
LOG.info("API method %s called with args: %s", symbol, str(kwargs))
# TODO(PH, JK) improve the way api function modules are loaded
from . import all # noqa
if not is_call_registered(symbol):
message = 'Did not find any method registered by symbol %s' % symbol
raise MethodNotFound(message)
if not symbol.startswith('core') and not CORE_MANAGER.started:
raise CoreNotStarted(desc='CoreManager is not active.')
call = get_call(symbol)
try:
return call(**kwargs)
except BGPSException as r:
LOG.error(traceback.format_exc())
raise r
except Exception as e:
LOG.error(traceback.format_exc())
raise ApiException(desc=str(e))
| apache-2.0 |
montoyjh/pymatgen | pymatgen/io/feff/inputs.py | 1 | 32751 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
import warnings
from operator import itemgetter
from tabulate import tabulate
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen import Structure, Lattice, Element, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string import str_delimited
"""
This module defines classes for reading/manipulating/writing the main sections
of FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program
control tags.
XANES and EXAFS input files, are available, for non-spin case at this time.
"""
__author__ = "Alan Dozier, Kiran Mathew"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "April 7, 2013"
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = ("CONTROL", "PRINT", "ATOMS", "POTENTIALS", "RECIPROCAL",
"REAL", "MARKER", "LATTICE", "TITLE", "RMULTIPLIER",
"SGROUP", "COORDINATES", "EQUIVALENCE", "CIF", "CGRID",
"CFAVERAGE", "OVERLAP", "EXAFS", "XANES", "ELNES", "EXELFS",
"LDOS", "ELLIPTICITY", "MULTIPOLE", "POLARIZATION",
"RHOZZP", "DANES", "FPRIME", "NRIXS", "XES", "XNCD",
"XMCD", "XNCDCONTROL", "END", "KMESH", "PRINT", "EGRID",
"DIMS", "AFOLP", "EDGE", "COMPTON", "DANES",
"FPRIME" "MDFF", "HOLE", "COREHOLE", "S02", "CHBROAD",
"EXCHANGE", "FOLP", "NOHOLE", "RGRID", "SCF",
"UNFREEZEF", "CHSHIFT", "DEBYE",
"INTERSTITIAL", "CHWIDTH", "EGAP", "EPS0", "EXTPOT",
"ION", "JUMPRM", "EXPOT", "SPIN", "LJMAX", "LDEC", "MPSE",
"PLASMON", "RPHASES", "RSIGMA", "PMBSE", "TDLDA", "FMS",
"DEBYA", "OPCONS", "PREP", "RESTART", "SCREEN", "SETE",
"STRFACTORS", "BANDSTRUCTURE", "RPATH", "NLEG", "PCRITERIA",
"SYMMETRY", "SS", "CRITERIA", "IORDER", "NSTAR", "ABSOLUTE",
"CORRECTIONS", "SIG2", "SIG3", "MBCONV", "SFCONV", "RCONV",
"SELF", "SFSE", "MAGIC", "TARGET", "STRFAC")
class Header(MSONable):
"""
Creates Header for the FEFF input file.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
def __init__(self, struct, source='', comment=''):
if struct.is_ordered:
self.struct = struct
self.source = source
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self.space_number = data["number"]
self.space_group = data["international"]
self.comment = comment or "None given"
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source='', comment=''):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self.space_group, self.space_number
@property
def formula(self):
"""
Formula of structure
"""
return self.struct.composition.formula
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename='feff.inp'):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
# Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
except IndexError:
feffpmg = False
# Reads pymatgen generated header or feff.inp file
if feffpmg:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return ''.join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Note: Checks to see if generated by pymatgen, if not it is impossible
to generate structure object so it is not possible to generate
header object and routine ends
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg:
comment2 = ' '.join(lines[1].split()[2:])
source = ' '.join(lines[2].split()[2:])
basis_vec = lines[6].split(":")[-1].split()
# a, b, c
a = float(basis_vec[0])
b = float(basis_vec[1])
c = float(basis_vec[2])
lengths = [a, b, c]
# alpha, beta, gamma
basis_ang = lines[7].split(":")[-1].split()
alpha = float(basis_ang[0])
beta = float(basis_ang[1])
gamma = float(basis_ang[2])
angles = [alpha, beta, gamma]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
natoms = int(lines[8].split(":")[-1].split()[0])
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
struct = Structure(lattice, atomic_symbols, coords, False,
False, False)
h = Header(struct, source, comment2)
return h
else:
return "Header not generated by pymatgen, cannot return header object"
def __str__(self):
"""
String representation of Header.
"""
to_s = lambda x: "%0.6f" % x
output = ["* This FEFF.inp file generated by pymatgen",
''.join(["TITLE comment: ", self.comment]),
''.join(["TITLE Source: ", self.source]),
"TITLE Structure Summary: {}"
.format(self.struct.composition.formula),
"TITLE Reduced formula: {}"
.format(self.struct.composition.reduced_formula),
"TITLE space group: ({}), space number: ({})"
.format(self.space_group, self.space_number),
"TITLE abc:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.abc])),
"TITLE angles:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.angles])),
"TITLE sites: {}".format(self.struct.num_sites)]
for i, site in enumerate(self.struct):
output.append(" ".join(["*", str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(output)
def write_file(self, filename='HEADER'):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class Atoms(MSONable):
"""
Atomic cluster centered around the absorbing atom.
"""
def __init__(self, struct, absorbing_atom, radius):
"""
Args:
struct (Structure): input structure
absorbing_atom (str/int): Symbol for absorbing atom or site index
radius (float): radius of the atom cluster in Angstroms.
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, self.center_index = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
self.radius = radius
self._cluster = self._set_cluster()
def _set_cluster(self):
"""
Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
"""
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords)
@property
def cluster(self):
"""
Returns the atomic cluster as a Molecule object.
"""
return self._cluster
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "rt") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1 and not ("END" in line):
atoms_str.append(line.replace("\r", ""))
return ''.join(atoms_str)
@staticmethod
def cluster_from_file(filename):
"""
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
"""
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
def get_lines(self):
"""
Returns a list of string representations of the atomic configuration
information(x, y, z, ipot, atom_symbol, distance, id).
Returns:
list: list of strings, sorted by the distance from the absorbing
atom.
"""
lines = [["{:f}".format(self._cluster[0].x),
"{:f}".format(self._cluster[0].y),
"{:f}".format(self._cluster[0].z),
0, self.absorbing_atom, "0.0", 0]]
for i, site in enumerate(self._cluster[1:]):
site_symbol = re.sub(r"[^aA-zZ]+", "", site.species_string)
ipot = self.pot_dict[site_symbol]
lines.append(["{:f}".format(site.x), "{:f}".format(site.y),
"{:f}".format(site.z), ipot, site_symbol,
"{:f}".format(self._cluster.get_distance(0, i + 1)), i + 1])
return sorted(lines, key=itemgetter(5))
def __str__(self):
"""
String representation of Atoms file.
"""
lines_sorted = self.get_lines()
# TODO: remove the formatting and update the unittests
lines_formatted = str(tabulate(lines_sorted,
headers=["* x", "y", "z", "ipot",
"Atom", "Distance", "Number"]))
atom_list = lines_formatted.replace("--", "**")
return ''.join(["ATOMS\n", atom_list, "\nEND\n"])
def write_file(self, filename='ATOMS'):
"""
Write Atoms list to file.
Args:
filename: path for file to be written
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Tags(dict):
"""
FEFF control parameters.
"""
def __init__(self, params=None):
"""
Args:
params: A set of input parameters as a dictionary.
"""
super(Tags, self).__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair. Warns if parameter is not in list of valid
Feff tags. Also cleans the parameter and val by stripping leading and
trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super(Tags, self).__setitem__(key.strip(),
Tags.proc_val(key.strip(), val.strip())
if isinstance(val, str) else val)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates Tags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
Tags object
"""
i = Tags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
# no cross terms for orientation averaged spectrum
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
@staticmethod
def _stringify_val(val):
"""
Convert the given value to string.
"""
if isinstance(val, list):
return " ".join([str(i) for i in val])
else:
return str(val)
def __str__(self):
return self.get_string()
def write_file(self, filename='PARAMETERS'):
"""
Write Tags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = -1
ieels_max = -1
for i, line in enumerate(lines):
m = re.match(r"([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
if key in ["ELNES", "EXELFS"]:
ieels = i
ieels_max = ieels + 5
else:
params[key] = val
if ieels >= 0:
if i >= ieels and i <= ieels_max:
if i == ieels + 1:
if int(line.split()[1]) == 1:
ieels_max -= 1
eels_params.append(line)
if eels_params:
if len(eels_params) == 6:
eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']
else:
eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']
eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])}
for k, v in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key.lower() == 'cif':
m = re.search(r"\w+.cif", val)
return m.group(0)
if key in list_type_keys:
output = list()
toks = re.split(r"\s+", val)
for tok in toks:
m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search(r"^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default",
"FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another Tags object to this object
Facilitates the use of "standard" Tags
"""
params = dict(self)
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Tags have conflicting values!")
else:
params[k] = v
return Tags(params)
class Potential(MSONable):
"""
FEFF atomic potential.
"""
def __init__(self, struct, absorbing_atom):
"""
Args:
struct (Structure): Structure object.
absorbing_atom (str/int): Absorbing atom symbol or site index
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, _ = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
@staticmethod
def pot_string_from_file(filename='feff.inp'):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),
re.compile('^[*]+.*[*]+$')]
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if len(sep_line_pattern[0].findall(line)) > 0 or \
len(sep_line_pattern[1].findall(line)) > 0:
pot_str.append(line)
elif int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str).rstrip('\n')
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichiometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
central_element = Element(self.absorbing_atom)
ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, .0001, 0]]
for el, amt in self.struct.composition.items():
ipot = self.pot_dict[el.symbol]
ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(tabulate(ipot_sorted,
headers=["*ipot", "Z", "tag", "lmax1",
"lmax2", "xnatph(stoichometry)",
"spinph"]))
ipotlist = ipotrow.replace("--", "**")
ipotlist = ''.join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename='POTENTIALS'):
"""
Write to file.
Args:
filename: filename and path to write potential file to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Paths(MSONable):
"""
Set FEFF scattering paths('paths.dat' file used by the 'genfmt' module).
"""
def __init__(self, atoms, paths, degeneracies=None):
"""
Args:
atoms (Atoms): Atoms object
paths (list(list)): list of paths. Each path is a list of atom indices in the atomic
cluster(the molecular cluster created by Atoms class).
e.g. [[0, 1, 2], [5, 9, 4, 1]] -> 2 paths: one with 3 legs and the other with 4 legs.
degeneracies (list): list of degeneracies, one for each path. Set to 1 if not specified.
"""
self.atoms = atoms
self.paths = paths
self.degeneracies = degeneracies or [1] * len(paths)
assert len(self.degeneracies) == len(self.paths)
def __str__(self):
lines = ["PATH", "---------------"]
# max possible, to avoid name collision count down from max value.
path_index = 9999
for i, legs in enumerate(self.paths):
lines.append("{} {} {}".format(path_index, len(legs), self.degeneracies[i]))
lines.append("x y z ipot label")
for l in legs:
coords = self.atoms.cluster[l].coords.tolist()
tmp = "{:.6f} {:.6f} {:.6f}".format(*tuple(coords))
element = str(self.atoms.cluster[l].specie.name)
# the potential index for the absorbing atom(the one at the cluster origin) is 0
potential = 0 if np.linalg.norm(coords) <= 1e-6 else self.atoms.pot_dict[element]
tmp = "{} {} {}".format(tmp, potential, element)
lines.append(tmp)
path_index -= 1
return "\n".join(lines)
def write_file(self, filename="paths.dat"):
"""
Write paths.dat.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "FeffParserError : " + self.msg
def get_atom_map(structure):
"""
Returns a dict that maps each atomic symbol to a unique integer starting
from 1.
Args:
structure (Structure)
Returns:
dict
"""
syms = [site.specie.symbol for site in structure]
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, str):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
elif isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
else:
raise ValueError("absorbing_atom must be either specie symbol or site index")
| mit |
clumsy/intellij-community | python/helpers/pydev/pydevd_traceproperty.py | 53 | 3777 | '''For debug purpose we are replacing actual builtin property by the debug property
'''
from pydevd_comm import GetGlobalDebugger
from pydevd_constants import * #@UnusedWildImport
import pydevd_tracing
#=======================================================================================================================
# replace_builtin_property
#=======================================================================================================================
def replace_builtin_property(new_property=None):
if new_property is None:
new_property = DebugProperty
original = property
if not IS_PY3K:
try:
import __builtin__
__builtin__.__dict__['property'] = new_property
except:
if DebugInfoHolder.DEBUG_TRACE_LEVEL:
import traceback;traceback.print_exc() #@Reimport
else:
try:
import builtins #Python 3.0 does not have the __builtin__ module @UnresolvedImport
builtins.__dict__['property'] = new_property
except:
if DebugInfoHolder.DEBUG_TRACE_LEVEL:
import traceback;traceback.print_exc() #@Reimport
return original
#=======================================================================================================================
# DebugProperty
#=======================================================================================================================
class DebugProperty(object):
"""A custom property which allows python property to get
controlled by the debugger and selectively disable/re-enable
the tracing.
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
global_debugger = GetGlobalDebugger()
try:
if global_debugger is not None and global_debugger.disable_property_getter_trace:
pydevd_tracing.SetTrace(None)
if self.fget is None:
raise AttributeError("unreadable attribute")
return self.fget(obj)
finally:
if global_debugger is not None:
pydevd_tracing.SetTrace(global_debugger.trace_dispatch)
def __set__(self, obj, value):
global_debugger = GetGlobalDebugger()
try:
if global_debugger is not None and global_debugger.disable_property_setter_trace:
pydevd_tracing.SetTrace(None)
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(obj, value)
finally:
if global_debugger is not None:
pydevd_tracing.SetTrace(global_debugger.trace_dispatch)
def __delete__(self, obj):
global_debugger = GetGlobalDebugger()
try:
if global_debugger is not None and global_debugger.disable_property_deleter_trace:
pydevd_tracing.SetTrace(None)
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(obj)
finally:
if global_debugger is not None:
pydevd_tracing.SetTrace(global_debugger.trace_dispatch)
def getter(self, fget):
"""Overriding getter decorator for the property
"""
self.fget = fget
return self
def setter(self, fset):
"""Overriding setter decorator for the property
"""
self.fset = fset
return self
def deleter(self, fdel):
"""Overriding deleter decorator for the property
"""
self.fdel = fdel
return self
| apache-2.0 |
wearpants/osf.io | website/search/share_search.py | 10 | 10194 | from __future__ import unicode_literals
from time import gmtime
from calendar import timegm
from datetime import datetime
import pytz
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from framework import sentry
from elasticsearch import Elasticsearch
from elasticsearch import (
ConnectionError,
)
from website import settings
from website.search.elastic_search import requires_search
from util import generate_color, html_and_illegal_unicode_replace
import logging
logger = logging.getLogger(__name__)
try:
share_es = Elasticsearch(
settings.SHARE_ELASTIC_URI,
request_timeout=settings.ELASTIC_TIMEOUT
)
except ConnectionError:
message = (
'The SEARCH_ENGINE setting is set to "elastic", but there '
'was a problem starting the elasticsearch interface. Is '
'elasticsearch running?'
)
try:
sentry.log_exception()
sentry.log_message(message)
except AssertionError: # App has not yet been initialized
logger.exception(message)
share_es = None
# This is temporary until we update the backend
FRONTEND_VERSION = 1
@requires_search
def search(query, raw=False, index='share'):
# Run the real query and get the results
results = share_es.search(index=index, doc_type=None, body=query)
for hit in results['hits']['hits']:
hit['_source']['highlight'] = hit.get('highlight', {})
if hit['_source'].get('shareProperties'):
hit['_source']['shareProperties']['docID'] = hit['_source']['shareProperties'].get('docID') or hit['_id']
hit['_source']['shareProperties']['source'] = hit['_source']['shareProperties'].get('source') or hit['_type']
return results if raw else {
'results': [hit['_source'] for hit in results['hits']['hits']],
'count': results['hits']['total'],
'aggregations': results.get('aggregations'),
'aggs': results.get('aggs')
}
def remove_key(d, k):
d.pop(k, None)
return d
def clean_count_query(query):
# Get rid of fields not allowed in count queries
for field in ['from', 'size', 'aggs', 'aggregations']:
if query.get(field) is not None:
del query[field]
return query
@requires_search
def count(query, index='share'):
query = clean_count_query(query)
if settings.USE_SHARE:
count = share_es.count(index=index, body=query)['count']
else:
count = 0
return {
'results': [],
'count': count
}
@requires_search
def providers():
provider_map = share_es.search(index='share_providers', doc_type=None, body={
'query': {
'match_all': {}
},
'size': 10000
})
return {
'providerMap': {
hit['_source']['short_name']: hit['_source'] for hit in provider_map['hits']['hits']
}
}
@requires_search
def stats(query=None):
query = query or {'query': {'match_all': {}}}
index = settings.SHARE_ELASTIC_INDEX_TEMPLATE.format(FRONTEND_VERSION)
three_months_ago = timegm((datetime.now() + relativedelta(months=-3)).timetuple()) * 1000
query['aggs'] = {
'sources': {
'terms': {
'field': '_type',
'size': 0,
'min_doc_count': 0,
}
},
'doisMissing': {
'filter': {
'missing': {
'field': 'id.doi'
}
},
'aggs': {
'sources': {
'terms': {
'field': '_type',
'size': 0
}
}
}
},
'dois': {
'filter': {
'exists': {
'field': 'id.doi'
}
},
'aggs': {
'sources': {
'terms': {
'field': '_type',
'size': 0
}
}
}
},
'earlier_documents': {
'filter': {
'range': {
'providerUpdatedDateTime': {
'lt': three_months_ago
}
}
},
'aggs': {
'sources': {
'terms': {
'field': '_type',
'size': 0,
'min_doc_count': 0
}
}
}
}
}
date_histogram_query = {
'query': {
'filtered': {
'query': query['query'],
'filter': {
'range': {
'providerUpdatedDateTime': {
'gt': three_months_ago
}
}
}
}
}
}
date_histogram_query['aggs'] = {
'date_chunks': {
'terms': {
'field': '_type',
'size': 0,
'exclude': 'of|and|or'
},
'aggs': {
'articles_over_time': {
'date_histogram': {
'field': 'providerUpdatedDateTime',
'interval': 'week',
'min_doc_count': 0,
'extended_bounds': {
'min': three_months_ago,
'max': timegm(gmtime()) * 1000
}
}
}
}
}
}
results = share_es.search(index=index,
body=query)
date_results = share_es.search(index=index,
body=date_histogram_query)
results['aggregations']['date_chunks'] = date_results['aggregations']['date_chunks']
chart_results = data_for_charts(results)
return chart_results
def data_for_charts(elastic_results):
source_data = elastic_results['aggregations']['sources']['buckets']
for_charts = {}
## for the donut graph list of many lists, source and count
source_and_counts = [[item['key'], item['doc_count']] for item in source_data]
for_charts['shareDonutGraph'] = source_and_counts
r = generate_color()
stats = {}
colors = {}
for bucket in elastic_results['aggregations']['sources']['buckets']:
stats[bucket['key']] = {
'doc_count': bucket['doc_count'],
}
colors[bucket['key']] = r.next()
for bucket in elastic_results['aggregations']['earlier_documents']['sources']['buckets']:
stats[bucket['key']]['earlier_documents'] = bucket['doc_count']
default_buckets = []
for bucket in elastic_results['aggregations']['date_chunks']['buckets']:
default_buckets = bucket['articles_over_time']['buckets']
stats[bucket['key']]['articles_over_time'] = bucket['articles_over_time']['buckets']
max_len = 0
for key, value in stats.iteritems():
if not stats[key].get('earlier_documents'):
stats[key]['earlier_documents'] = 0
if not stats[key].get('articles_over_time'):
stats[key]['articles_over_time'] = [
{
'key_as_string': item['key_as_string'],
'key': item['key'],
'doc_count': 0
}
for item in default_buckets
]
if len(stats[key]['articles_over_time']) > max_len:
max_len = len(stats[key]['articles_over_time'])
names = ['x']
numbers = [['x']]
for date in stats[stats.keys()[0]]['articles_over_time']:
numbers[0].append(' ')
for key, value in stats.iteritems():
try:
names.append(key)
x = [item['doc_count'] for item in value['articles_over_time']]
if len(x) < max_len:
x += [0] * (max_len - len(x))
x[0] += stats[key].get('earlier_documents', 0)
numbers.append([key] + [sum(x[0:i + 1]) for i in range(len(x[0:]))])
except IndexError:
pass
date_totals = {
'date_numbers': numbers,
'group_names': names
}
if date_totals.get('date_numbers') == [[u'x']]:
for name in date_totals.get('group_names'):
date_totals.get('date_numbers').append([name, 0])
for_charts['date_totals'] = date_totals
all_data = {}
all_data['raw_aggregations'] = elastic_results['aggregations']
all_data['charts'] = {
'shareDonutGraph': {
'type': 'donut',
'columns': for_charts['shareDonutGraph'],
'colors': colors
},
'shareTimeGraph': {
'x': 'x',
'type': 'area-spline',
'columns': for_charts['date_totals']['date_numbers'],
'groups': [for_charts['date_totals']['group_names']],
'colors': colors
}
}
return all_data
def to_atom(result):
return {
'title': html_and_illegal_unicode_replace(result.get('title')) or 'No title provided.',
'summary': html_and_illegal_unicode_replace(result.get('description')) or 'No summary provided.',
'id': result['uris']['canonicalUri'],
'updated': get_date_updated(result),
'links': [
{'href': result['uris']['canonicalUri'], 'rel': 'alternate'}
],
'author': format_contributors_for_atom(result['contributors']),
'categories': [{'term': html_and_illegal_unicode_replace(tag)} for tag in (result.get('tags', []) + result.get('subjects', []))],
'published': parse(result.get('providerUpdatedDateTime'))
}
def format_contributors_for_atom(contributors_list):
return [
{
'name': html_and_illegal_unicode_replace(entry['name'])
} for entry in contributors_list
]
def get_date_updated(result):
try:
updated = pytz.utc.localize(parse(result.get('providerUpdatedDateTime')))
except ValueError:
updated = parse(result.get('providerUpdatedDateTime'))
return updated
| apache-2.0 |
cc272309126/panda3d | direct/src/distributed/AsyncRequest.py | 11 | 10930 | from otp.ai.AIBaseGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.DirectObject import DirectObject
from ConnectionRepository import *
ASYNC_REQUEST_DEFAULT_TIMEOUT_IN_SECONDS = 8.0
ASYNC_REQUEST_INFINITE_RETRIES = -1
ASYNC_REQUEST_DEFAULT_NUM_RETRIES = 0
if __debug__:
_overrideTimeoutTimeForAllAsyncRequests = config.GetFloat("async-request-timeout", -1.0)
_overrideNumRetriesForAllAsyncRequests = config.GetInt("async-request-num-retries", -1)
_breakOnTimeout = config.GetBool("async-request-break-on-timeout", False)
class AsyncRequest(DirectObject):
"""
This class is used to make asynchronos reads and creates to a database.
You can create a list of self.neededObjects and then ask for each to be
read or created, or if you only have one object that you need you can
skip the self.neededObjects because calling askForObject or createObject
will set the self.neededObjects value for you.
Once all the objects have been read or created, the self.finish() method
will be called. You may override this function to run your code in a
derived class.
If you wish to queue up several items that you all need before the finish
method is called, you can put items in self.neededObjects and then call
askForObject or createObject afterwards. That way the _checkCompletion
will not call finish until after all the requests have been done.
If you need to chain serveral object reads or creates, just add more
entries to the self.neededObjects dictionary in the self.finish function
and return without calling AsyncRequest.finish(). Your finish method
will be called again when the new self.neededObjects is complete. You
may repeat this as necessary.
"""
_asyncRequests = {}
notify = DirectNotifyGlobal.directNotify.newCategory('AsyncRequest')
def __init__(self, air, replyToChannelId = None,
timeoutTime = ASYNC_REQUEST_DEFAULT_TIMEOUT_IN_SECONDS,
numRetries = ASYNC_REQUEST_DEFAULT_NUM_RETRIES):
"""
air is the AI Respository.
replyToChannelId may be an avatarId, an accountId, or a channelId.
timeoutTime is how many seconds to wait before aborting the request.
numRetries is the number of times to retry the request before giving up.
"""
assert AsyncRequest.notify.debugCall()
if __debug__:
if _overrideTimeoutTimeForAllAsyncRequests >= 0.0:
timeoutTime = _overrideTimeoutTimeForAllAsyncRequests
if _overrideNumRetriesForAllAsyncRequests >= 0:
numRetries = _overrideNumRetriesForAllAsyncRequests
AsyncRequest._asyncRequests[id(self)] = self
self.deletingMessage = "AsyncRequest-deleting-%s"%(id(self,))
self.air = air
self.replyToChannelId = replyToChannelId
self.timeoutTask = None
self.neededObjects = {}
self._timeoutTime = timeoutTime
self._initialNumRetries = numRetries
def delete(self):
assert AsyncRequest.notify.debugCall()
del AsyncRequest._asyncRequests[id(self)]
self.ignoreAll()
self._resetTimeoutTask(False)
messenger.send(self.deletingMessage, [])
del self.neededObjects
del self.air
del self.replyToChannelId
def askForObjectField(
self, dclassName, fieldName, doId, key = None, context = None):
"""
Request an already created object, i.e. read from database.
"""
assert AsyncRequest.notify.debugCall()
if key is None:
# default the dictionary key to the fieldName
key = fieldName
assert doId
if context is None:
context = self.air.allocateContext()
self.air.contextToClassName[context] = dclassName
self.acceptOnce(
"doFieldResponse-%s"%(context,),
self._checkCompletion, [key])
self.neededObjects[key] = None
self.air.queryObjectField(dclassName, fieldName, doId, context)
self._resetTimeoutTask()
def askForObjectFields(
self, dclassName, fieldNames, doId, key = None, context = None):
"""
Request an already created object, i.e. read from database.
"""
assert AsyncRequest.notify.debugCall()
if key is None:
# default the dictionary key to the fieldName
key = fieldNames[0]
assert doId
if context is None:
context = self.air.allocateContext()
self.air.contextToClassName[context] = dclassName
self.acceptOnce(
"doFieldResponse-%s"%(context,),
self._checkCompletion, [key])
self.air.queryObjectFields(dclassName, fieldNames, doId, context)
self._resetTimeoutTask()
def askForObjectFieldsByString(self, dbId, dclassName, objString, fieldNames, key=None, context=None):
assert AsyncRequest.notify.debugCall()
assert dbId
if key is None:
# default the dictionary key to the fieldNames
key = fieldNames
if context is None:
context=self.air.allocateContext()
self.air.contextToClassName[context]=dclassName
self.acceptOnce(
"doFieldResponse-%s"%(context,),
self._checkCompletion, [key])
self.air.queryObjectStringFields(dbId,dclassName,objString,fieldNames,context)
self._resetTimeoutTask()
def askForObject(self, doId, context = None):
"""
Request an already created object, i.e. read from database.
"""
assert AsyncRequest.notify.debugCall()
assert doId
if context is None:
context = self.air.allocateContext()
self.acceptOnce(
"doRequestResponse-%s"%(context,),
self._checkCompletion, [None])
self.air.queryObjectAll(doId, context)
self._resetTimeoutTask()
def createObject(self, name, className,
databaseId = None, values = None, context = None):
"""
Create a new database object. You can get the doId from within
your self.finish() function.
This functions is different from createObjectId in that it does
generate the object when the response comes back. The object is
added to the doId2do and so forth and treated as a full regular
object (which it is). This is useful on the AI where we really
do want the object on the AI.
"""
assert AsyncRequest.notify.debugCall()
assert name
assert className
self.neededObjects[name] = None
if context is None:
context = self.air.allocateContext()
self.accept(
self.air.getDatabaseGenerateResponseEvent(context),
self._doCreateObject, [name, className, values])
self.air.requestDatabaseGenerate(
className, context, databaseId = databaseId, values = values)
self._resetTimeoutTask()
def createObjectId(self, name, className, values = None, context = None):
"""
Create a new database object. You can get the doId from within
your self.finish() function.
This functions is different from createObject in that it does not
generate the object when the response comes back. It only tells you
the doId. This is useful on the UD where we don't really want the
object on the UD, we just want the object created and the UD wants
to send messages to it using the ID.
"""
assert AsyncRequest.notify.debugCall()
assert name
assert className
self.neededObjects[name] = None
if context is None:
context = self.air.allocateContext()
self.accept(
self.air.getDatabaseGenerateResponseEvent(context),
self._checkCompletion, [name, None])
self.air.requestDatabaseGenerate(className, context, values = values)
self._resetTimeoutTask()
def finish(self):
"""
This is the function that gets called when all of the needed objects
are in (i.e. all the askForObject and createObject requests have
been satisfied).
If the other requests timeout, finish will not be called.
"""
assert self.notify.debugCall()
self.delete()
def _doCreateObject(self, name, className, values, doId):
isInDoId2do = doId in self.air.doId2do
distObj = self.air.generateGlobalObject(doId, className, values)
if not isInDoId2do and game.name == 'uberDog':
# only remove doId if this is the uberdog?, in pirates this was
# causing traded inventory objects to be generated twice, once
# here and again later when it was noticed the doId was not in
# the doId2do list yet.
self.air.doId2do.pop(doId, None)
self._checkCompletion(name, None, distObj)
def _checkCompletion(self, name, context, distObj):
"""
This checks whether we have all the needed objects and calls
finish() if we do.
"""
if name is not None:
self.neededObjects[name] = distObj
else:
self.neededObjects[distObj.doId] = distObj
for i in self.neededObjects.values():
if i is None:
return
self.finish()
def _resetTimeoutTask(self, createAnew = True):
if self.timeoutTask:
taskMgr.remove(self.timeoutTask)
self.timeoutTask = None
if createAnew:
self.numRetries = self._initialNumRetries
self.timeoutTask = taskMgr.doMethodLater(
self._timeoutTime, self.timeout,
"AsyncRequestTimer-%s"%(id(self,)))
def timeout(self, task):
assert AsyncRequest.notify.debugCall(
"neededObjects: %s"%(self.neededObjects,))
if self.numRetries > 0:
assert AsyncRequest.notify.debug(
'Timed out. Trying %d more time(s) : %s' %
(self.numRetries + 1, repr(self.neededObjects)))
self.numRetries -= 1
return Task.again
else:
if __debug__:
if _breakOnTimeout:
if hasattr(self, "avatarId"):
print "\n\nself.avatarId =", self.avatarId
print "\nself.neededObjects =", self.neededObjects
print "\ntimed out after %s seconds.\n\n"%(task.delayTime,)
import pdb; pdb.set_trace()
self.delete()
return Task.done
def cleanupAsyncRequests():
"""
Only call this when the application is shuting down.
"""
for asyncRequest in AsyncRequest._asyncRequests:
asyncRequest.delete()
assert AsyncRequest._asyncRequests == {}
| bsd-3-clause |
davipeterlini/routeflow_tcc | pox/pox/lib/graph/minigraph.py | 26 | 4103 | # Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
A minimal reimplementation of enough of NetworkX's MultiGraph so that
the NOM graph stuff should work. This actually doesn't present an
ideal way to store the underlying graph, but it'll work for now.
"""
from collections import defaultdict as ddict
def _fix_nbunch (nbunch, cls = set):
try:
return cls(nbunch)
except:
return cls([nbunch])
class MultiGraph (object):
def __init__ (self):
self._next_key = 0
self._edges = ddict(lambda:ddict(lambda:ddict(lambda:{})))
# node -> node -> key -> {attr}
self._nodes = {}
# node -> {attr}
def nodes (self, data = False):
if not data:
return self._nodes.keys()
return self._nodes.items()
def edges (self, nbunch = None, data = False, keys = False):
def fix (a,b):
if a>b: return (b,a)
return (a,b)
if nbunch is not None:
nbunch = _fix_nbunch(nbunch)
edges = {}
for e1,otherEnd in self._edges.iteritems():
for e2,rest in otherEnd.iteritems():
if nbunch is not None:
if e1 not in nbunch: continue
if len(nbunch) > 1 and e2 not in nbunch: continue
e = fix(e1,e2)
if e in edges: continue
edges[e] = rest
r = []
for nodes,edgelist in edges.iteritems():
for k,d in edgelist.iteritems():
if data and keys:
r.append((nodes[0],nodes[1],k,d)) # Is the order right?
elif data:
r.append((nodes[0],nodes[1],d))
elif keys:
r.append((nodes[0],nodes[1],k))
else:
r.append(nodes)
return r
def neighbors (self, node):
assert node in self._nodes
return list(set(self._edges[node].keys()))
def _gen_key (self):
r = self._next_key
self._next_key += 1
return r
def add_node (self, node, **attr):
if node in self._nodes:
self._nodes[node].update(attr)
else:
self._nodes[node] = attr
def remove_node (self, node):
others = self._edges[node].keys()
del self._edges[node]
for other in others:
if other == node: continue
del self._edges[other][node]
del self._nodes[node]
def add_edge (self, node1, node2, key=None, **attr):
assert node1 is not node2
self.add_node(node1)
self.add_node(node2)
if key is None: key = self._gen_key()
e = self._edges[node1][node2][key]
e.update(attr)
self._edges[node2][node1][key] = e
def add_edges_from (self, edges, **attr):
for e in edges:
if len(e) == 2:
self.add_edge(*e)
elif len(e) == 3:
d = e[2].copy()
d.update(attr)
self.add_edge(e[0],e[1],**d)
elif len(e) == 4:
d = e[3].copy()
d.update(attr)
self.add_edge(e[0],e[1],key=e[3],**d)
else:
assert False
def remove_edge (self, node1, node2, key=None):
if key is None:
key = self._edges[node1][node2].keys()[0] # First one is fine
del self._edges[node1][node2][key]
del self._edges[node2][node1][key]
def add_path (self, nodes, **attr):
for n in nodes:
self.add_node(n, **attr)
for n1,n2 in zip(nodes[:-1],nodes[1:]):
self.add_edge(n1,n2)
def __getitem__ (self, node):
o = {}
for k0,v0 in self._edges[node].iteritems():
if k0 not in o: o[k0] = {}
for k1,v1 in v0.iteritems():
if k1 not in o[k0]: o[k0][k1] = {}
o[k0][k1] = v1
return o # This is self._edges but as a normal dict
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/mpmath/tests/test_calculus.py | 40 | 1825 | from sympy.mpmath import *
def test_approximation():
mp.dps = 15
f = lambda x: cos(2-2*x)/x
p, err = chebyfit(f, [2, 4], 8, error=True)
assert err < 1e-5
for i in range(10):
x = 2 + i/5.
assert abs(polyval(p, x) - f(x)) < err
def test_limits():
mp.dps = 15
assert limit(lambda x: (x-sin(x))/x**3, 0).ae(mpf(1)/6)
assert limit(lambda n: (1+1/n)**n, inf).ae(e)
def test_polyval():
assert polyval([], 3) == 0
assert polyval([0], 3) == 0
assert polyval([5], 3) == 5
# 4x^3 - 2x + 5
p = [4, 0, -2, 5]
assert polyval(p,4) == 253
assert polyval(p,4,derivative=True) == (253, 190)
def test_polyroots():
p = polyroots([1,-4])
assert p[0].ae(4)
p, q = polyroots([1,2,3])
assert p.ae(-1 - sqrt(2)*j)
assert q.ae(-1 + sqrt(2)*j)
#this is not a real test, it only tests a specific case
assert polyroots([1]) == []
try:
polyroots([0])
assert False
except ValueError:
pass
def test_pade():
one = mpf(1)
mp.dps = 20
N = 10
a = [one]
k = 1
for i in range(1, N+1):
k *= i
a.append(one/k)
p, q = pade(a, N//2, N//2)
for x in arange(0, 1, 0.1):
r = polyval(p[::-1], x)/polyval(q[::-1], x)
assert(r.ae(exp(x), 1.0e-10))
mp.dps = 15
def test_fourier():
mp.dps = 15
c, s = fourier(lambda x: x+1, [-1, 2], 2)
#plot([lambda x: x+1, lambda x: fourierval((c, s), [-1, 2], x)], [-1, 2])
assert c[0].ae(1.5)
assert c[1].ae(-3*sqrt(3)/(2*pi))
assert c[2].ae(3*sqrt(3)/(4*pi))
assert s[0] == 0
assert s[1].ae(3/(2*pi))
assert s[2].ae(3/(4*pi))
assert fourierval((c, s), [-1, 2], 1).ae(1.9134966715663442)
def test_differint():
mp.dps = 15
assert differint(lambda t: t, 2, -0.5).ae(8*sqrt(2/pi)/3)
| agpl-3.0 |
JPaulMora/Pyrit | cpyrit/config.py | 1 | 2322 | # -*- coding: UTF-8 -*-
#
# Copyright 2015, John Mora, [email protected]
# Original Work by Lukas Lueg (c) 2008-2011.
#
# This file is part of Pyrit.
#
# Pyrit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pyrit. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import sys
def default_config():
config = {'default_storage': 'file://',
'use_CUDA': 'false',
'use_OpenCL': 'false',
'rpc_server': 'false',
'rpc_announce': 'true',
'rpc_announce_broadcast': 'false',
'rpc_knownclients': '',
'workunit_size': '75000',
'limit_ncpus': 0}
return config
def read_configfile(filename):
config = default_config()
with open(filename, 'rb') as f:
for line in f:
if line.startswith('#') or '=' not in line:
continue
option, value = map(str.strip, line.split('=', 1))
if option in config:
config[option] = value
else:
print >> sys.stderr, "WARNING: Unknown option '%s' " \
"in configfile '%s'" % (option, filename)
return config
def write_configfile(config, filename):
with open(filename, 'wb') as f:
for option, value in sorted(config.items()):
f.write("%s = %s\n" % (option, value))
configpath = os.path.expanduser(os.path.join('~', '.pyrit'))
default_configfile = os.path.join(configpath, 'config')
if os.path.exists(default_configfile):
cfg = read_configfile(default_configfile)
else:
cfg = default_config()
if not os.path.exists(configpath):
os.makedirs(configpath)
write_configfile(cfg, default_configfile)
| gpl-3.0 |
miumok98/weblate | weblate/trans/tests/test_permissions.py | 6 | 2799 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib.auth.models import User, Group
from django.test import TestCase
from weblate.trans.models import Project
from weblate.trans.permissions import (
check_owner, check_permission, can_delete_comment
)
class PermissionsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('user', '[email protected]', 'x')
self.owner = User.objects.create_user('owner', '[email protected]', 'x')
self.owner.groups.add(Group.objects.get(name='Owners'))
self.project = Project.objects.create(slug='test')
self.project.owners.add(self.owner)
def test_owner_owned(self):
self.assertTrue(
check_owner(self.owner, self.project, 'trans.author_translation')
)
def test_owner_no_perm(self):
self.assertFalse(
check_owner(self.owner, self.project, 'trans.delete_translation')
)
def test_owner_user(self):
self.assertFalse(
check_owner(self.user, self.project, 'trans.author_translation')
)
def test_check_owner(self):
self.assertTrue(
check_permission(
self.owner, self.project, 'trans.author_translation'
)
)
def test_check_user(self):
self.assertFalse(
check_permission(
self.user, self.project, 'trans.author_translation'
)
)
def test_delete_comment_owner(self):
self.assertTrue(can_delete_comment(self.owner, self.project))
def test_delete_comment_user(self):
self.assertFalse(can_delete_comment(self.user, self.project))
def test_cache(self):
key = ('can_delete_comment', self.user.id)
self.assertNotIn(key, self.project.permissions_cache)
self.assertFalse(can_delete_comment(self.user, self.project))
self.assertFalse(self.project.permissions_cache[key])
self.project.permissions_cache[key] = True
self.assertTrue(can_delete_comment(self.user, self.project))
| gpl-3.0 |
inactivist/guess-language | guess_language/blocks_test.py | 66 | 1693 | ''' Copyright (c) 2008, Kent S Johnson
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''
import unittest
from blocks import unicodeBlock
class blocks_test(unittest.TestCase):
def test_unicodeBlock(self):
for c in range(128):
self.assertBlock('Basic Latin', c)
for c in range(0x80, 0x180) + range(0x250, 0x2B0):
self.assertBlock('Extended Latin', c)
self.assertBlock('Thai', 0xE00)
self.assertBlock('Thai', 0xE7F)
self.assertBlock('Lao', 0xE80)
self.assertBlock('Lao', 0x0EFF)
self.assertBlock('Tibetan', 0xF00)
self.assertBlock('Tibetan', 0xFFF)
self.assertBlock('Cyrillic', 0x421)
def assertBlock(self, name, c):
c = unichr(c)
block = unicodeBlock(c)
self.assertEquals(name, unicodeBlock(c), '%s != %s for %r' % (name, block, c))
def setUp(self):
pass
if __name__ == '__main__':
unittest.main() | lgpl-2.1 |
LouisPlisso/pytomo | pytomo/flvlib/primitives.py | 100 | 2046 | import struct
"""
The internal FLV representations of numbers.
"""
__all__ = ['get_ui32', 'make_ui32', 'get_si32_extended', 'make_si32_extended',
'get_ui24', 'make_ui24', 'get_ui16', 'make_ui16',
'get_si16', 'make_si16', 'get_ui8', 'make_ui8',
'get_double', 'make_double', 'EndOfFile']
class EndOfFile(Exception):
pass
# UI32
def get_ui32(f):
try:
ret = struct.unpack(">I", f.read(4))[0]
except struct.error:
raise EndOfFile
return ret
def make_ui32(num):
return struct.pack(">I", num)
# SI32 extended
def get_si32_extended(f):
# The last 8 bits are the high 8 bits of the whole number
# That's how Adobe likes it. Go figure...
low_high = f.read(4)
if len(low_high) < 4:
raise EndOfFile
combined = low_high[3] + low_high[:3]
return struct.unpack(">i", combined)[0]
def make_si32_extended(num):
ret = struct.pack(">i", num)
return ret[1:] + ret[0]
# UI24
def get_ui24(f):
try:
high, low = struct.unpack(">BH", f.read(3))
except struct.error:
raise EndOfFile
ret = (high << 16) + low
return ret
def make_ui24(num):
ret = struct.pack(">I", num)
return ret[1:]
# UI16
def get_ui16(f):
try:
ret = struct.unpack(">H", f.read(2))[0]
except struct.error:
raise EndOfFile
return ret
def make_ui16(num):
return struct.pack(">H", num)
# SI16
def get_si16(f):
try:
ret = struct.unpack(">h", f.read(2))[0]
except struct.error:
raise EndOfFile
return ret
def make_si16(num):
return struct.pack(">h", num)
# UI8
def get_ui8(f):
try:
ret = struct.unpack("B", f.read(1))[0]
except struct.error:
raise EndOfFile
return ret
def make_ui8(num):
return struct.pack("B", num)
# DOUBLE
def get_double(f):
data = f.read(8)
try:
ret = struct.unpack(">d", data)[0]
except struct.error:
raise EndOfFile
return ret
def make_double(num):
return struct.pack(">d", num)
| gpl-2.0 |
v-iam/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2017_03_30/models/virtual_machine_scale_set_instance_view.py | 2 | 2014 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetInstanceView(Model):
"""The instance view of a virtual machine scale set.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar virtual_machine: The instance view status summary for the virtual
machine scale set.
:vartype virtual_machine:
:class:`VirtualMachineScaleSetInstanceViewStatusesSummary
<azure.mgmt.compute.compute.v2017_03_30.models.VirtualMachineScaleSetInstanceViewStatusesSummary>`
:ivar extensions: The extensions information.
:vartype extensions: list of
:class:`VirtualMachineScaleSetVMExtensionsSummary
<azure.mgmt.compute.compute.v2017_03_30.models.VirtualMachineScaleSetVMExtensionsSummary>`
:param statuses: The resource status information.
:type statuses: list of :class:`InstanceViewStatus
<azure.mgmt.compute.compute.v2017_03_30.models.InstanceViewStatus>`
"""
_validation = {
'virtual_machine': {'readonly': True},
'extensions': {'readonly': True},
}
_attribute_map = {
'virtual_machine': {'key': 'virtualMachine', 'type': 'VirtualMachineScaleSetInstanceViewStatusesSummary'},
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetVMExtensionsSummary]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, statuses=None):
self.virtual_machine = None
self.extensions = None
self.statuses = statuses
| mit |
zzzaaa12/PTT_Notify | ptt_notify.py | 1 | 6855 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
import feedparser
import requests
import json
from datetime import datetime
from datetime import timedelta
from send_notify import send_notify_mail
# files in this project
from setting import AUTO_UPDATE_SECS
from setting import SEND_EMAIL
from setting import BOARDS
from setting import SHOW_CONTENT
from setting import G_KEYWORDS
from setting import G_AUTHORS
class PttXmlParser:
def __init__(self):
self.last_updated = datetime.now() + timedelta(hours = -1)
self.board_list = BOARDS
for board in self.board_list:
board['last_updated'] = datetime.now() + timedelta(hours = -1)
def get_matches(self, board_name, title, author, content):
for board in self.board_list:
if board['name'].lower() != board_name.lower():
continue
# specific keywords
for keyword in board['keywords']:
if title.lower().find(keyword.lower()) > -1:
return True
# specific authors
for people in board['authors']:
if people.lower() == author.lower():
return True
# general keywords
if board['search_general_keyword']:
for keyword in G_KEYWORDS:
if title.lower().find(keyword.lower()) > -1:
return True
# general authors
if board['search_general_author']:
for people in G_AUTHORS:
if people.lower() == author.lower():
return True
# search content
if board['search_content']:
for keyword in board['keywords']:
if content.lower().find(keyword.lower()) > -1:
return True
if board['search_general_keyword']:
for keyword in board['keywords']:
if content.lower().find(keyword.lower()) > -1:
return True
return False
def parse_ptt_article(self):
for board in self.board_list:
r_url = 'https://www.ptt.cc/atom/' + board['name'] + '.xml'
print 'get ' + r_url
r = requests.get(r_url)
if r.status_code != 200:
print 'get url fail!!!'
return False
data = feedparser.parse(r.text)
# board information
# 1. title: data['feed']['title']
# 2. url: data['feed']['id']
# article infomation
board['article_list'] = []
got_last_updated_time = False
for item in data['entries']:
author = item['author']
title = item['title']
url = item['id']
content = item['content'][0]['value'].replace('<pre>', '').replace('</pre>', '')
## format of item['published']: 2017-04-22T08:39:34+08:00
publish_time = datetime.strptime(item['published'], '%Y-%m-%dT%H:%M:%S+08:00')
if (publish_time - board['last_updated']).total_seconds() > 0:
# parse articles and compare
if board['show_all_artciles'] or self.get_matches(board['name'], title, author, content):
article_data = {'board':'', 'author':'', 'title':'', 'url':'', 'content':''}
article_data['board'] = board
article_data['author'] = author
article_data['title'] = title
article_data['url'] = url
article_data['content'] = content
article_data['time'] = publish_time.strftime('%H:%M')
board['article_list'].append(article_data)
# read the newest article and save last_updated_time from it
if not got_last_updated_time:
got_last_updated_time = True
last_updated_time = publish_time
board['last_updated'] = last_updated_time
return True
def run(self):
while True:
mail_str = ''
got_board_list = ''
print 'start at: ' + str(datetime.now().strftime('%m/%d %H:%M:%S\n'))
self.parse_ptt_article()
for board in self.board_list:
if len(board['article_list']) == 0:
continue
# create title of notify mail
if SEND_EMAIL:
mail_str = mail_str + board['name'] + u'板:\n'
# add board name in mail title
if got_board_list.find(board['name']) == -1:
if len(got_board_list) == 0:
got_board_list = board['name']
else:
got_board_list = got_board_list + '/' + board['name']
for article in board['article_list']:
mail_str = mail_str + ' ' + article['time'] + ' ' + article['author'] + ' ' + article['title'] + '\n'
mail_str = mail_str + ' ' + article['url']
if SHOW_CONTENT:
mail_str = mail_str + '\n ' + article['content'].replace('\n', '\n ') + '\n'
mail_str = mail_str + '\n\n'
print '\n ' + board['name'] + u'板:'
for article in board['article_list']:
print ' ' + article['time'] + ' ' + article['author'] + ' ' + article['title']
print ' ' + article['url']
# save last updated time
self.last_updated = datetime.now()
f = open('last_updated', 'w')
f.write(self.last_updated.strftime('%m/%d %H:%M:%S\n'))
f.close()
# send notity mail
if SEND_EMAIL and len(mail_str) > 0:
send_notify_mail('PTT [' + self.last_updated.strftime('%H:%M') + ']: ' + got_board_list, mail_str)
print 'notify mail sent (' + self.last_updated.strftime('%m/%d %H:%M') + ')'
print '\nfinish at ' + self.last_updated.strftime('%m/%d %H:%M:%S')
time.sleep(AUTO_UPDATE_SECS)
def main():
try:
parser = PttXmlParser()
parser.run()
except Exception as e:
print 'Error: An exception occurred at ' + datetime.now().strftime('%m/%d %H:%M:%S') + ': \n' + str(e) + '\n'
if SEND_EMAIL:
mail_str = 'An exception occurred at ' + datetime.now().strftime('%m/%d %H:%M:%S') + ':\n' + str(e) + '\n'
send_notify_mail('Error of PTT Notify', mail_str)
if __name__ == '__main__':
main()
| mit |
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_image_filter853_all_of.py | 1 | 4736 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTImageFilter853AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"is_image": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"is_image": "isImage", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_image_filter853_all_of.BTImageFilter853AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
is_image (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| mit |
amith01994/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/models.py | 71 | 19172 | import datetime
import urllib
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.hashcompat import md5_constructor, sha_constructor
from django.utils.translation import ugettext_lazy as _
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return hsh == get_hexdigest(algo, salt, raw_password)
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = datetime.datetime.now()
user.save()
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only e-mail messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given username, e-mail and password.
"""
now = datetime.datetime.now()
# Normalize the address by lowercasing the domain part of the email
# address.
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = self.model(username=username, email=email, is_staff=False,
is_active=True, is_superuser=False, last_login=now,
date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
anon = user.is_anonymous()
for backend in auth.get_backends():
if not anon or backend.supports_anonymous_user:
if hasattr(backend, "get_all_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_all_permissions(user, obj)
)
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_perm"):
if obj is not None:
if (backend.supports_object_permissions and
backend.has_perm(user, perm, obj)):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class User(models.Model):
"""
Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
if raw_password is None:
self.set_unusable_password()
else:
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
if self.password is None \
or self.password == UNUSABLE_PASSWORD:
return False
else:
return True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
If an object is passed in, only permissions matching this object
are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_group_permissions(self, obj)
)
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object
is provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions.
If object is passed, it checks if the user has all required perms
for this object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def get_and_delete_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
m.delete()
return messages
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
def _get_message_set(self):
import warnings
warnings.warn('The user messaging API is deprecated. Please update'
' your code to use the new messages framework.',
category=DeprecationWarning)
return self._message_set
message_set = property(_get_message_set)
class Message(models.Model):
"""
The message system is a lightweight way to queue messages for given
users. A message is associated with a User instance (so it is only
applicable for registered users). There's no concept of expiration or
timestamps. Messages are created by the Django admin after successful
actions. For example, "The poll Foo was created successfully." is a
message.
"""
user = models.ForeignKey(User, related_name='_message_set')
message = models.TextField(_('message'))
def __unicode__(self):
return self.message
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def get_and_delete_messages(self):
return []
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| apache-2.0 |
gvallarelli/inasafe | safe_qgis/impact_calculator_thread.py | 1 | 6203 | """
InaSAFE Disaster risk assessment tool developed by AusAid -
**ISImpactCalculatorThread.**
The module provides a high level interface for running SAFE scenarios.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected], [email protected]'
__date__ = '11/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import threading
import traceback
import sys
from PyQt4.QtCore import (QObject,
pyqtSignal)
from safe_qgis.safe_interface import calculateSafeImpact
from safe_qgis.exceptions import InsufficientParametersException
class ImpactCalculatorThread(threading.Thread, QObject):
"""A threaded class to compute an impact scenario. Under
python a thread can only be run once, so the instances
based on this class are designed to be short lived.
We inherit from QObject so that we can use Qt translation self.tr
calls and emit signals.
.. todo:: implement this class using QThread as a base class since it
supports thread termination which python threading doesnt seem to do.
Also see the techbase article below for emitting signals across
threads using Qt.QueuedConnection.
http://techbase.kde.org/Development/Tutorials/
Python_introduction_to_signals_and_slots
Users of this of this class can listen for signals indicating
when processing is done. For example::
from is_impact_calculator_thread import ImpactCalculatorThread
n = ImpactCalculatorThread()
n.done.connect(n.showMessage)
n.done.emit()
Prints 'hello' to the console
.. seealso::
http://techbase.kde.org/Development/Tutorials/
Python_introduction_to_signals_and_slots
for an alternative (maybe nicer?) approach.
"""
done = pyqtSignal()
def showMessage(self):
"""For testing only"""
print 'hello'
def __init__(self, theHazardLayer, theExposureLayer,
theFunction):
"""Constructor for the impact calculator thread.
Args:
* Hazard layer: InaSAFE read_layer object containing the Hazard data.
* Exposure layer: InaSAFE read_layer object containing the Exposure
data.
* Function: a InaSAFE function that defines how the Hazard assessment
will be computed.
Returns:
None
Raises:
InsufficientParametersException if not all parameters are
set.
Requires three parameters to be set before execution
can take place:
"""
threading.Thread.__init__(self)
QObject.__init__(self)
self._hazardLayer = theHazardLayer
self._exposureLayer = theExposureLayer
self._function = theFunction
self._impactLayer = None
self._result = None
self._exception = None
self._traceback = None
def impactLayer(self):
"""Return the InaSAFE layer instance which is the output from the
last run."""
return self._impactLayer
def result(self):
"""Return the result of the last run."""
return self._result
def lastException(self):
"""Return any exception that may have been raised while running"""
return self._exception
def lastTraceback(self):
"""Return the strack trace for any exception that may of occurred
while running."""
return self._traceback
def run(self):
""" Main function for hazard impact calculation thread.
Requires three properties to be set before execution
can take place:
* Hazard layer - a path to a raster,
* Exposure layer - a path to a vector points layer.
* Function - a function that defines how the Hazard assessment
will be computed.
After the thread is complete, you can use the filename and
result accessors to determine what the result of the analysis was::
calculator = ImpactCalculator()
rasterPath = os.path.join(TESTDATA, 'xxx.asc')
vectorPath = os.path.join(TESTDATA, 'xxx.shp')
calculator.setHazardLayer(self.rasterPath)
calculator.setExposureLayer(self.vectorPath)
calculator.setFunction('Flood Building Impact Function')
myRunner = calculator.getRunner()
#wait till completion
myRunner.join()
myResult = myRunner.result()
myFilename = myRunner.filename()
Args:
None.
Returns:
None
Raises:
InsufficientParametersException
set.
"""
if (self._hazardLayer is None or self._exposureLayer is None
or self._function is None):
myMessage = self.tr('Ensure that hazard, exposure and function '
'are all set before trying to run the '
'analysis.')
raise InsufficientParametersException(myMessage)
try:
myLayers = [self._hazardLayer, self._exposureLayer]
self._impactLayer = calculateSafeImpact(theLayers=myLayers,
theFunction=self._function)
# Catch and handle all exceptions:
# pylint: disable=W0703
except Exception, e:
myMessage = self.tr('Calculation error encountered:\n')
#store the exception so that controller class can get it later
self._exception = e
self._traceback = traceback.format_tb(sys.exc_info()[2])
print myMessage
self._result = myMessage
else:
self._result = self.tr('Calculation completed successfully.')
# pylint: enable=W0703
# Let any listening slots know we are done
self.done.emit()
| gpl-3.0 |
unaguil/hyperion-ns2 | experiments/measures/generic/BroadcastTime.py | 1 | 1941 | from GenericMeasure import GenericMeasure
from GenericAvgMeasure import GenericAvgMeasure
import Units
class ReceivedMessages(GenericMeasure):
def __init__(self, period, simulationTime, broadcastTable):
GenericMeasure.__init__(self, r"DEBUG .*? - Peer .*? received packet .*?(\(.*?\)) .*?([0-9]+\,[0-9]+).*?", period, simulationTime, Units.MESSAGES)
self.__broadcastTable = broadcastTable
def parseInc(self, line):
m = self.getProg().match(line)
if m is not None:
messageID = m.group(1)
if messageID in self.__broadcastTable:
startTime = self.__broadcastTable[messageID]
time = float(m.group(2).replace(',','.'))
return (startTime, time)
return None
class BroadcastedMessages(GenericMeasure):
def __init__(self, period, simulationTime):
GenericMeasure.__init__(self, r"DEBUG .*? - Peer .*? broadcasting .*?(\(.*?\)).*? ([0-9]+\,[0-9]+).*?", period, simulationTime, Units.MESSAGES)
self.__broadcastTable = {}
def parseInc(self, line):
m = self.getProg().match(line)
if m is not None:
messageID = m.group(1)
time = float(m.group(2).replace(',','.'))
self.__broadcastTable[messageID] = time
def getBroadcastTable(self):
return self.__broadcastTable
class BroadcastTime(GenericAvgMeasure):
"""The average time used to broadcast a message"""
def __init__(self, period, simulationTime):
GenericAvgMeasure.__init__(self, period, simulationTime, Units.SECONDS)
self.__broadcastedMessages = BroadcastedMessages(period, simulationTime)
self.__receivedMessages = ReceivedMessages(period, simulationTime, self.__broadcastedMessages.getBroadcastTable())
def parseLine(self, line):
self.__broadcastedMessages.parseInc(line)
result = self.__receivedMessages.parseInc(line)
if result is not None:
broadcastTime, deliveredTime = result
elapsedTime = deliveredTime - broadcastTime
self.periodicAvgValues.addValue(elapsedTime, broadcastTime)
| apache-2.0 |
sajuptpm/manila | contrib/tempest/tempest/config_share.py | 2 | 6876 | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from oslo_config import cfg
from tempest import config # noqa
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt("manila",
default=True,
help="Whether or not manila is expected to be available"),
]
share_group = cfg.OptGroup(name="share", title="Share Service Options")
ShareGroup = [
cfg.StrOpt("region",
default="",
help="The share region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt("catalog_type",
default="share",
help="Catalog type of the Share service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the share service."),
cfg.BoolOpt("multitenancy_enabled",
default=True,
help="This option used to determine backend driver type, "
"multitenant driver uses share-networks, but "
"single-tenant doesn't."),
cfg.ListOpt("enable_protocols",
default=["nfs", "cifs"],
help="First value of list is protocol by default, "
"items of list show enabled protocols at all."),
cfg.ListOpt("enable_ip_rules_for_protocols",
default=["nfs", "cifs", ],
help="Selection of protocols, that should "
"be covered with ip rule tests"),
cfg.ListOpt("enable_user_rules_for_protocols",
default=[],
help="Selection of protocols, that should "
"be covered with user rule tests"),
cfg.StrOpt("username_for_user_rules",
default="Administrator",
help="Username, that will be used in user tests."),
cfg.ListOpt("enable_ro_access_level_for_protocols",
default=["nfs", ],
help="List of protocols to run tests with ro access level."),
cfg.StrOpt("storage_protocol",
default="NFS_CIFS",
help="Backend protocol to target when creating volume types."),
cfg.StrOpt("share_network_id",
default="",
help="Some backend drivers requires share network "
"for share creation. Share network id, that will be "
"used for shares. If not set, it won't be used."),
cfg.StrOpt("alt_share_network_id",
default="",
help="Share network id, that will be used for shares"
" in alt tenant. If not set, it won't be used"),
cfg.StrOpt("admin_share_network_id",
default="",
help="Share network id, that will be used for shares"
" in admin tenant. If not set, it won't be used"),
cfg.BoolOpt("multi_backend",
default=False,
help="Runs Manila multi-backend tests."),
cfg.ListOpt("backend_names",
default=[],
help="Names of share backends, that will be used with "
"multibackend tests. Tempest will use first two values."),
cfg.IntOpt("share_creation_retry_number",
default=0,
help="Defines number of retries for share creation. "
"It is useful to avoid failures caused by unstable "
"environment."),
cfg.IntOpt("build_interval",
default=3,
help="Time in seconds between share availability checks."),
cfg.IntOpt("build_timeout",
default=500,
help="Timeout in seconds to wait for a share to become"
"available."),
cfg.BoolOpt("suppress_errors_in_cleanup",
default=False,
help="Whether to suppress errors with clean up operation "
"or not. There are cases when we may want to skip "
"such errors and catch only test errors."),
cfg.BoolOpt("run_manage_unmanage_tests",
default=False,
help="Defines whether to run manage/unmanage tests or not. "
"These test may leave orphaned resources, so be careful "
"enabling this opt."),
cfg.BoolOpt("run_extend_tests",
default=True,
help="Defines whether to run share extend tests or not. "
"Disable this feature if used driver doesn't "
"support it."),
cfg.BoolOpt("run_shrink_tests",
default=True,
help="Defines whether to run share shrink tests or not. "
"Disable this feature if used driver doesn't "
"support it."),
cfg.StrOpt("image_with_share_tools",
default="manila-service-image",
help="Image name for vm booting with nfs/smb clients tool."),
cfg.StrOpt("image_username",
default="manila",
help="Image username."),
cfg.StrOpt("image_password",
help="Image password. Should be used for "
"'image_with_share_tools' without Nova Metadata support."),
cfg.StrOpt("client_vm_flavor_ref",
default="100",
help="Flavor used for client vm in scenario tests."),
]
class TempestConfigProxyManila(object):
"""Wrapper over standard Tempest config that sets Manila opts."""
def __init__(self):
self._config = config.CONF
config.register_opt_group(
cfg.CONF, service_available_group, ServiceAvailableGroup)
config.register_opt_group(cfg.CONF, share_group, ShareGroup)
self._config.share = cfg.CONF.share
def __getattr__(self, attr):
return getattr(self._config, attr)
CONF = TempestConfigProxyManila()
| apache-2.0 |
frankiecjunle/yunblog | venv/lib/python2.7/site-packages/pygments/formatters/other.py | 363 | 3811 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
| mit |
peterlauri/django | django/core/management/commands/squashmigrations.py | 58 | 8893 | import io
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, migrations
from django.db.migrations.loader import AmbiguityError, MigrationLoader
from django.db.migrations.migration import SwappableTuple
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.writer import MigrationWriter
from django.utils import six
from django.utils.version import get_docs_version
class Command(BaseCommand):
help = "Squashes an existing set of migrations (from first until specified) into a single new one."
def add_arguments(self, parser):
parser.add_argument(
'app_label',
help='App label of the application to squash migrations for.',
)
parser.add_argument(
'start_migration_name', default=None, nargs='?',
help='Migrations will be squashed starting from and including this migration.',
)
parser.add_argument(
'migration_name',
help='Migrations will be squashed until and including this migration.',
)
parser.add_argument(
'--no-optimize', action='store_true', dest='no_optimize', default=False,
help='Do not try to optimize the squashed operations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.',
)
def handle(self, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
app_label = options['app_label']
start_migration_name = options['start_migration_name']
migration_name = options['migration_name']
no_optimize = options['no_optimize']
# Load the current graph state, check the app and migration they asked for exists
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
if app_label not in loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (so squashmigrations on "
"it makes no sense)" % app_label
)
migration = self.find_migration(loader, app_label, migration_name)
# Work out the list of predecessor migrations
migrations_to_squash = [
loader.get_migration(al, mn)
for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name))
if al == migration.app_label
]
if start_migration_name:
start_migration = self.find_migration(loader, app_label, start_migration_name)
start = loader.get_migration(start_migration.app_label, start_migration.name)
try:
start_index = migrations_to_squash.index(start)
migrations_to_squash = migrations_to_squash[start_index:]
except ValueError:
raise CommandError(
"The migration '%s' cannot be found. Maybe it comes after "
"the migration '%s'?\n"
"Have a look at:\n"
" python manage.py showmigrations %s\n"
"to debug this issue." % (start_migration, migration, app_label)
)
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:"))
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = six.moves.input("Do you wish to proceed? [yN] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together,
# along with collecting external dependencies and detecting
# double-squashing
operations = []
dependencies = set()
# We need to take all dependencies from the first migration in the list
# as it may be 0002 depending on 0001
first_migration = True
for smigration in migrations_to_squash:
if smigration.replaces:
raise CommandError(
"You cannot squash squashed migrations! Please transition "
"it to a normal migration first: "
"https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version()
)
operations.extend(smigration.operations)
for dependency in smigration.dependencies:
if isinstance(dependency, SwappableTuple):
if settings.AUTH_USER_MODEL == dependency.setting:
dependencies.add(("__setting__", "AUTH_USER_MODEL"))
else:
dependencies.add(dependency)
elif dependency[0] != smigration.app_label or first_migration:
dependencies.add(dependency)
first_migration = False
if no_optimize:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)"))
new_operations = operations
else:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(
" Optimized from %s operations to %s operations." %
(len(operations), len(new_operations))
)
# Work out the value of replaces (any squashed ones we're re-squashing)
# need to feed their replaces into ours
replaces = []
for migration in migrations_to_squash:
if migration.replaces:
replaces.extend(migration.replaces)
else:
replaces.append((migration.app_label, migration.name))
# Make a new migration with those operations
subclass = type("Migration", (migrations.Migration, ), {
"dependencies": dependencies,
"operations": new_operations,
"replaces": replaces,
})
if start_migration_name:
new_migration = subclass("%s_squashed_%s" % (start_migration.name, migration.name), app_label)
else:
new_migration = subclass("0001_squashed_%s" % migration.name, app_label)
new_migration.initial = True
# Write out the new migration file
writer = MigrationWriter(new_migration)
with io.open(writer.path, "w", encoding='utf-8') as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path))
self.stdout.write(" You should commit this migration but leave the old ones in place;")
self.stdout.write(" the new migration will be used for new installs. Once you are sure")
self.stdout.write(" all instances of the codebase have applied the migrations you squashed,")
self.stdout.write(" you can delete them.")
if writer.needs_manual_porting:
self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required"))
self.stdout.write(" Your migrations contained functions that must be manually copied over,")
self.stdout.write(" as we could not safely copy their implementation.")
self.stdout.write(" See the comment at the top of the squashed migration for details.")
def find_migration(self, loader, app_label, name):
try:
return loader.get_migration_by_prefix(app_label, name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. Please be "
"more specific." % (name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'." %
(name, app_label)
)
| bsd-3-clause |
macs03/demo-cms | cms/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/threadsafe.py | 513 | 2824 | import threading
from django.contrib.gis.geos.libgeos import lgeos, notice_h, error_h, CONTEXT_PTR
class GEOSContextHandle(object):
"""
Python object representing a GEOS context handle.
"""
def __init__(self):
# Initializing the context handler for this thread with
# the notice and error handler.
self.ptr = lgeos.initGEOS_r(notice_h, error_h)
def __del__(self):
if self.ptr: lgeos.finishGEOS_r(self.ptr)
# Defining a thread-local object and creating an instance
# to hold a reference to GEOSContextHandle for this thread.
class GEOSContext(threading.local):
handle = None
thread_context = GEOSContext()
class GEOSFunc(object):
"""
Class that serves as a wrapper for GEOS C Functions, and will
use thread-safe function variants when available.
"""
def __init__(self, func_name):
try:
# GEOS thread-safe function signatures end with '_r', and
# take an additional context handle parameter.
self.cfunc = getattr(lgeos, func_name + '_r')
self.threaded = True
# Create a reference here to thread_context so it's not
# garbage-collected before an attempt to call this object.
self.thread_context = thread_context
except AttributeError:
# Otherwise, use usual function.
self.cfunc = getattr(lgeos, func_name)
self.threaded = False
def __call__(self, *args):
if self.threaded:
# If a context handle does not exist for this thread, initialize one.
if not self.thread_context.handle:
self.thread_context.handle = GEOSContextHandle()
# Call the threaded GEOS routine with pointer of the context handle
# as the first argument.
return self.cfunc(self.thread_context.handle.ptr, *args)
else:
return self.cfunc(*args)
def __str__(self):
return self.cfunc.__name__
# argtypes property
def _get_argtypes(self):
return self.cfunc.argtypes
def _set_argtypes(self, argtypes):
if self.threaded:
new_argtypes = [CONTEXT_PTR]
new_argtypes.extend(argtypes)
self.cfunc.argtypes = new_argtypes
else:
self.cfunc.argtypes = argtypes
argtypes = property(_get_argtypes, _set_argtypes)
# restype property
def _get_restype(self):
return self.cfunc.restype
def _set_restype(self, restype):
self.cfunc.restype = restype
restype = property(_get_restype, _set_restype)
# errcheck property
def _get_errcheck(self):
return self.cfunc.errcheck
def _set_errcheck(self, errcheck):
self.cfunc.errcheck = errcheck
errcheck = property(_get_errcheck, _set_errcheck)
| mit |
ammaradil/fibonacci | Lib/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| mit |
dmnfarrell/peat | Ghost/calc_dCS.py | 1 | 4214 | #!/usr/bin/env python
#
# This file is part of the pKaTool package
# Copyright (C) Jens Erik Nielsen 2008
# All rights reserved
#
#comment
def Nonefix(value):
if value:
return '%6.3f' %(value)
else:
return 'NA'
def main(options,args):
"""Calculate the change in chemical shift due to a full charge on each titratable group"""
import get_dEF
method=options.method
X=get_dEF.map_struct(args[0])
X.build_Hs()
residues=X.PI.residues.keys()
residues.sort()
#
# Get the titratable groups
#
titgroups=X.PI.get_titratable_groups()
titgroups.sort()
import pKa.pKD_tools
if options.group_by_titgroup:
for titgroup in titgroups:
titgroup_type=pKa.pKD_tools.get_titgroup_type_from_titgroup(titgroup)
charge=X.PI.titgroups[titgroup_type]['charge']
print 'TITRATABLE GROUP',titgroup
print 'Residue CS Nitrogen CS Hydrogen'
for residue in residues:
dCS_N=X.get_dCS(residue+':N',titgroup,charge=charge,method=method)
dCS_H=X.get_dCS(residue+':H',titgroup,charge=charge,method=method)
print '%8s, %s, %s' %(residue,Nonefix(dCS_N),Nonefix(dCS_H))
else:
#
# Group by atom
#
for residue in residues:
for atom in [':N',':H']:
changes=[]
for titgroup in titgroups:
titgroup_type=pKa.pKD_tools.get_titgroup_type_from_titgroup(titgroup)
charge=X.PI.titgroups[titgroup_type]['charge']
dCS=X.get_dCS(residue+atom,titgroup,charge=charge,method=method)
changes.append([titgroup,dCS])
#
if options.sort:
def cmpfunc(x,y):
if x[1] is None:
return 1
if y[1] is None:
return -1
return cmp(abs(y[1]),abs(x[1]))
changes.sort(cmpfunc)
print 'Residue: %s, Atom: %s' %(residue,atom[1])
for titgroup,dCS in changes[:options.listnumber]:
if dCS:
if abs(dCS)<options.limit:
continue
print titgroup,dCS
print
return
if __name__=='__main__':
print
print 'Calculate charge-induced Chemical shift changes in protein backbone atoms'
print 'Jens Erik Nielsen, 2008'
print
import sys, os
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] <pdbfile>',version='%prog 1.0')
parser.add_option('-m',"--method", dest='method',
help="Choose method for calculating electric field: APBS [PBE solver], PBE [Chresten's PBE solver], Coulomb [Coulomb's law]. Default: %default",
default='APBS')
parser.add_option('-n',"--number",type='int',dest='listnumber',default=100000,action='store',
help='Print X values for each grouping. Default: %default',metavar='X')
parser.add_option('-l',"--limit",type='float',dest='limit',default=0.0,action='store',
help='Do not list chemical shift differences where abs(d chem shift)< LIMIT. Default: LIMIT=%default',metavar='LIMIT')
parser.add_option('-g','--group',dest='group_by_titgroup',action='store_true',default=True,
help='Group results by titratable group. Default: %default')
parser.add_option('-a','--atom',dest='group_by_titgroup',action='store_false',
help='Group results by atom. Default: False')
parser.add_option('-s','--sort',dest='sort',action='store_true',default=False,
help='Sort chemical shift differences. Default: %default')
(options, args) = parser.parse_args()
if len(args)!=1:
parser.error('You must specify a PDB file')
if options.sort and options.group_by_titgroup:
parser.error('Sorting not (yet) implemented when grouping by titratable group')
#
# Call main
#
main(options,args)
| mit |
petecummings/django | tests/http_utils/tests.py | 304 | 2247 | from __future__ import unicode_literals
import gzip
import io
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.http.utils import conditional_content_removal
from django.test import SimpleTestCase
# based on Python 3.3's gzip.compress
def gzip_compress(data):
buf = io.BytesIO()
f = gzip.GzipFile(fileobj=buf, mode='wb', compresslevel=0)
try:
f.write(data)
finally:
f.close()
return buf.getvalue()
class HttpUtilTests(SimpleTestCase):
def test_conditional_content_removal(self):
"""
Tests that content is removed from regular and streaming responses with
a status_code of 100-199, 204, 304 or a method of "HEAD".
"""
req = HttpRequest()
# Do nothing for 200 responses.
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'abc')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'abc')
# Strip content for some status codes.
for status_code in (100, 150, 199, 204, 304):
res = HttpResponse('abc', status=status_code)
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'], status=status_code)
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Issue #20472
abc = gzip_compress(b'abc')
res = HttpResponse(abc, status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse([abc], status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Strip content for HEAD requests.
req.method = 'HEAD'
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
| bsd-3-clause |
kezabelle/django-testguess | autoguessed/tests/test_urls/returns_templateresponse/test_101110011100.py | 1 | 2776 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.test import TestCase
class GuessedTestCase(TestCase):
"""
Generated: 2015-10-18T13:44:15.980585
is_html5: True
is_ajax: False
is_authenticated: True
has_context_data: True
has_template_name: True
has_get_params: False
supports_model_mommy: False
supports_custom_users: True
supports_html5lib: True
is_get: True
is_post: False
is_json: False
"""
def setUp(self):
from django.contrib.auth import get_user_model
from django.utils.crypto import get_random_string
User = get_user_model()
username = '200@GET'
password = get_random_string(5)
user = User(**{User.USERNAME_FIELD: username})
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.set_password(password)
user.save()
self.user = user
self.auth = {'username': username, 'password': password}
self.client.login(**self.auth)
return None
def test_url_reversed(self):
from django.core.urlresolvers import reverse
url = reverse("1",
args=(),
kwargs={})
self.assertEqual(url, "/1/") # noqa
def test_response_status_code(self):
response = self.client.get('/1/', data={})
self.assertEqual(response.status_code, 200)
def test_response_headers(self):
response = self.client.get('/1/', data={})
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_response_is_html5(self):
from html5lib import parse
response = self.client.get('/1/', data={})
self.assertFalse(response.streaming)
# rather than F, this will E
parse(response.content)
def test_templateresponse_context_data_contains_expected_keys(self):
response = self.client.get('/1/', data={})
expected = set(['dt', 'form', 'model', 'sub', 'thing'])
in_context = set(response.context_data.keys())
self.assertEqual(expected, in_context)
def test_templateresponse_context_data_has_expected_types(self):
from django.contrib.auth.models import User
from django.forms.forms import Form
from django.utils.datetime_safe import datetime
response = self.client.get('/1/', data={})
self.assertIsInstance(response.context_data['dt'], datetime)
self.assertIsInstance(response.context_data['form'], Form)
self.assertIsInstance(response.context_data['model'], User)
self.assertIsInstance(response.context_data['sub'], dict)
self.assertIsInstance(response.context_data['thing'], int)
| bsd-2-clause |
EvanK/ansible | lib/ansible/modules/network/vyos/vyos_l3_interface.py | 56 | 8511 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_l3_interface
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L3 interfaces on VyOS network devices
description:
- This module provides declarative management of L3 interfaces
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
aggregate:
description: List of L3 interfaces definitions
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Set eth0 IPv4 address
vyos_l3_interface:
name: eth0
ipv4: 192.168.0.1/24
- name: Remove eth0 IPv4 address
vyos_l3_interface:
name: eth0
state: absent
- name: Set IP addresses on aggregate
vyos_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
vyos_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces ethernet eth0 address '192.168.0.1/24'
"""
import socket
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import is_masklen, validate_ip_address
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import load_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def is_ipv4(value):
if value:
address = value.split('/')
if is_masklen(address[1]) and validate_ip_address(address[0]):
return True
return False
def is_ipv6(value):
if value:
address = value.split('/')
if 0 <= int(address[1]) <= 128:
try:
socket.inet_pton(socket.AF_INET6, address[0])
except socket.error:
return False
return True
return False
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
ipv4 = w['ipv4']
ipv6 = w['ipv6']
state = w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent' and obj_in_have:
if not ipv4 and not ipv6 and (obj_in_have['ipv4'] or obj_in_have['ipv6']):
if name == "lo":
commands.append('delete interfaces loopback lo address')
else:
commands.append('delete interfaces ethernet ' + name + ' address')
else:
if ipv4 and ipv4 in obj_in_have['ipv4']:
if name == "lo":
commands.append('delete interfaces loopback lo address ' + ipv4)
else:
commands.append('delete interfaces ethernet ' + name + ' address ' + ipv4)
if ipv6 and ipv6 in obj_in_have['ipv6']:
if name == "lo":
commands.append('delete interfaces loopback lo address ' + ipv6)
else:
commands.append('delete interfaces ethernet ' + name + ' address ' + ipv6)
elif (state == 'present' and obj_in_have):
if ipv4 and ipv4 not in obj_in_have['ipv4']:
if name == "lo":
commands.append('set interfaces loopback lo address ' + ipv4)
else:
commands.append('set interfaces ethernet ' + name + ' address ' + ipv4)
if ipv6 and ipv6 not in obj_in_have['ipv6']:
if name == "lo":
commands.append('set interfaces loopback lo address ' + ipv6)
else:
commands.append('set interfaces ethernet ' + name + ' address ' + ipv6)
return commands
def map_config_to_obj(module):
obj = []
output = run_commands(module, ['show interfaces'])
lines = re.split(r'\n[e|l]', output[0])[1:]
if len(lines) > 0:
for line in lines:
splitted_line = line.split()
if len(splitted_line) > 0:
ipv4 = []
ipv6 = []
if splitted_line[0].lower().startswith('th'):
name = 'e' + splitted_line[0].lower()
elif splitted_line[0].lower().startswith('o'):
name = 'l' + splitted_line[0].lower()
for i in splitted_line[1:]:
if (('.' in i or ':' in i) and '/' in i):
value = i.split(r'\n')[0]
if is_ipv4(value):
ipv4.append(value)
elif is_ipv6(value):
ipv6.append(value)
obj.append({'name': name,
'ipv4': ipv4,
'ipv6': ipv6})
return obj
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'ipv4': module.params['ipv4'],
'ipv6': module.params['ipv6'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
ipv4=dict(),
ipv6=dict(),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
maljac/odoo-addons | account_invoice_tax_wizard/__openerp__.py | 1 | 1453 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'category': 'Accounting & Finance',
'data': [
'wizard/account_invoice_tax_wizard_view.xml',
'account_invoice_view.xml',
],
'demo': [],
'depends': ['account'],
'description': '''
Account Invoice Tax Wizard
==========================
Add a wizard to add manual taxes on invoices
''',
'installable': True,
'name': 'Account Invoice Tax Wizard',
'test': [],
'version': '1.243'}
| agpl-3.0 |
bealdav/OCB | openerp/report/pyPdf/generic.py | 136 | 29129 | # vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Implementation of generic PDF objects (dictionary, number, string, and so on)
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "[email protected]"
import re
from utils import readNonWhitespace, RC4_encrypt
import filters
import utils
import decimal
import codecs
def readObject(stream, pdf):
tok = stream.read(1)
stream.seek(-1, 1) # reset to start
if tok == 't' or tok == 'f':
# boolean object
return BooleanObject.readFromStream(stream)
elif tok == '(':
# string object
return readStringFromStream(stream)
elif tok == '/':
# name object
return NameObject.readFromStream(stream)
elif tok == '[':
# array object
return ArrayObject.readFromStream(stream, pdf)
elif tok == 'n':
# null object
return NullObject.readFromStream(stream)
elif tok == '<':
# hexadecimal string OR dictionary
peek = stream.read(2)
stream.seek(-2, 1) # reset to start
if peek == '<<':
return DictionaryObject.readFromStream(stream, pdf)
else:
return readHexStringFromStream(stream)
elif tok == '%':
# comment
while tok not in ('\r', '\n'):
tok = stream.read(1)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
return readObject(stream, pdf)
else:
# number object OR indirect reference
if tok == '+' or tok == '-':
# number
return NumberObject.readFromStream(stream)
peek = stream.read(20)
stream.seek(-len(peek), 1) # reset to start
if re.match(r"(\d+)\s(\d+)\sR[^a-zA-Z]", peek) is not None:
return IndirectObject.readFromStream(stream, pdf)
else:
return NumberObject.readFromStream(stream)
class PdfObject(object):
def getObject(self):
"""Resolves indirect references."""
return self
class NullObject(PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("null")
def readFromStream(stream):
nulltxt = stream.read(4)
if nulltxt != "null":
raise utils.PdfReadError, "error reading null object"
return NullObject()
readFromStream = staticmethod(readFromStream)
class BooleanObject(PdfObject):
def __init__(self, value):
self.value = value
def writeToStream(self, stream, encryption_key):
if self.value:
stream.write("true")
else:
stream.write("false")
def readFromStream(stream):
word = stream.read(4)
if word == "true":
return BooleanObject(True)
elif word == "fals":
stream.read(1)
return BooleanObject(False)
assert False
readFromStream = staticmethod(readFromStream)
class ArrayObject(list, PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("[")
for data in self:
stream.write(" ")
data.writeToStream(stream, encryption_key)
stream.write(" ]")
def readFromStream(stream, pdf):
arr = ArrayObject()
tmp = stream.read(1)
if tmp != "[":
raise utils.PdfReadError, "error reading array"
while True:
# skip leading whitespace
tok = stream.read(1)
while tok.isspace():
tok = stream.read(1)
stream.seek(-1, 1)
# check for array ending
peekahead = stream.read(1)
if peekahead == "]":
break
stream.seek(-1, 1)
# read and append obj
arr.append(readObject(stream, pdf))
return arr
readFromStream = staticmethod(readFromStream)
class IndirectObject(PdfObject):
def __init__(self, idnum, generation, pdf):
self.idnum = idnum
self.generation = generation
self.pdf = pdf
def getObject(self):
return self.pdf.getObject(self).getObject()
def __repr__(self):
return "IndirectObject(%r, %r)" % (self.idnum, self.generation)
def __eq__(self, other):
return (
other is not None and
isinstance(other, IndirectObject) and
self.idnum == other.idnum and
self.generation == other.generation and
self.pdf is other.pdf
)
def __ne__(self, other):
return not self.__eq__(other)
def writeToStream(self, stream, encryption_key):
stream.write("%s %s R" % (self.idnum, self.generation))
def readFromStream(stream, pdf):
idnum = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
idnum += tok
generation = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
generation += tok
r = stream.read(1)
if r != "R":
raise utils.PdfReadError("error reading indirect object reference")
return IndirectObject(int(idnum), int(generation), pdf)
readFromStream = staticmethod(readFromStream)
class FloatObject(decimal.Decimal, PdfObject):
def __new__(cls, value="0", context=None):
return decimal.Decimal.__new__(cls, str(value), context)
def __repr__(self):
if self == self.to_integral():
return str(self.quantize(decimal.Decimal(1)))
else:
# XXX: this adds useless extraneous zeros.
return "%.5f" % self
def writeToStream(self, stream, encryption_key):
stream.write(repr(self))
class NumberObject(int, PdfObject):
def __init__(self, value):
int.__init__(value)
def writeToStream(self, stream, encryption_key):
stream.write(repr(self))
def readFromStream(stream):
name = ""
while True:
tok = stream.read(1)
if tok != '+' and tok != '-' and tok != '.' and not tok.isdigit():
stream.seek(-1, 1)
break
name += tok
if name.find(".") != -1:
return FloatObject(name)
else:
return NumberObject(name)
readFromStream = staticmethod(readFromStream)
##
# Given a string (either a "str" or "unicode"), create a ByteStringObject or a
# TextStringObject to represent the string.
def createStringObject(string):
if isinstance(string, unicode):
return TextStringObject(string)
elif isinstance(string, str):
if string.startswith(codecs.BOM_UTF16_BE):
retval = TextStringObject(string.decode("utf-16"))
retval.autodetect_utf16 = True
return retval
else:
# This is probably a big performance hit here, but we need to
# convert string objects into the text/unicode-aware version if
# possible... and the only way to check if that's possible is
# to try. Some strings are strings, some are just byte arrays.
try:
retval = TextStringObject(decode_pdfdocencoding(string))
retval.autodetect_pdfdocencoding = True
return retval
except UnicodeDecodeError:
return ByteStringObject(string)
else:
raise TypeError("createStringObject should have str or unicode arg")
def readHexStringFromStream(stream):
stream.read(1)
txt = ""
x = ""
while True:
tok = readNonWhitespace(stream)
if tok == ">":
break
x += tok
if len(x) == 2:
txt += chr(int(x, base=16))
x = ""
if len(x) == 1:
x += "0"
if len(x) == 2:
txt += chr(int(x, base=16))
return createStringObject(txt)
def readStringFromStream(stream):
tok = stream.read(1)
parens = 1
txt = ""
while True:
tok = stream.read(1)
if tok == "(":
parens += 1
elif tok == ")":
parens -= 1
if parens == 0:
break
elif tok == "\\":
tok = stream.read(1)
if tok == "n":
tok = "\n"
elif tok == "r":
tok = "\r"
elif tok == "t":
tok = "\t"
elif tok == "b":
tok = "\b"
elif tok == "f":
tok = "\f"
elif tok == "(":
tok = "("
elif tok == ")":
tok = ")"
elif tok == "\\":
tok = "\\"
elif tok.isdigit():
# "The number ddd may consist of one, two, or three
# octal digits; high-order overflow shall be ignored.
# Three octal digits shall be used, with leading zeros
# as needed, if the next character of the string is also
# a digit." (PDF reference 7.3.4.2, p 16)
for i in range(2):
ntok = stream.read(1)
if ntok.isdigit():
tok += ntok
else:
break
tok = chr(int(tok, base=8))
elif tok in "\n\r":
# This case is hit when a backslash followed by a line
# break occurs. If it's a multi-char EOL, consume the
# second character:
tok = stream.read(1)
if not tok in "\n\r":
stream.seek(-1, 1)
# Then don't add anything to the actual string, since this
# line break was escaped:
tok = ''
else:
raise utils.PdfReadError("Unexpected escaped string")
txt += tok
return createStringObject(txt)
##
# Represents a string object where the text encoding could not be determined.
# This occurs quite often, as the PDF spec doesn't provide an alternate way to
# represent strings -- for example, the encryption data stored in files (like
# /O) is clearly not text, but is still stored in a "String" object.
class ByteStringObject(str, PdfObject):
##
# For compatibility with TextStringObject.original_bytes. This method
# returns self.
original_bytes = property(lambda self: self)
def writeToStream(self, stream, encryption_key):
bytearr = self
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
stream.write("<")
stream.write(bytearr.encode("hex"))
stream.write(">")
##
# Represents a string object that has been decoded into a real unicode string.
# If read from a PDF document, this string appeared to match the
# PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to
# occur.
class TextStringObject(unicode, PdfObject):
autodetect_pdfdocencoding = False
autodetect_utf16 = False
##
# It is occasionally possible that a text string object gets created where
# a byte string object was expected due to the autodetection mechanism --
# if that occurs, this "original_bytes" property can be used to
# back-calculate what the original encoded bytes were.
original_bytes = property(lambda self: self.get_original_bytes())
def get_original_bytes(self):
# We're a text string object, but the library is trying to get our raw
# bytes. This can happen if we auto-detected this string as text, but
# we were wrong. It's pretty common. Return the original bytes that
# would have been used to create this object, based upon the autodetect
# method.
if self.autodetect_utf16:
return codecs.BOM_UTF16_BE + self.encode("utf-16be")
elif self.autodetect_pdfdocencoding:
return encode_pdfdocencoding(self)
else:
raise Exception("no information about original bytes")
def writeToStream(self, stream, encryption_key):
# Try to write the string out as a PDFDocEncoding encoded string. It's
# nicer to look at in the PDF file. Sadly, we take a performance hit
# here for trying...
try:
bytearr = encode_pdfdocencoding(self)
except UnicodeEncodeError:
bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be")
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
obj = ByteStringObject(bytearr)
obj.writeToStream(stream, None)
else:
stream.write("(")
for c in bytearr:
if not c.isalnum() and c != ' ':
stream.write("\\%03o" % ord(c))
else:
stream.write(c)
stream.write(")")
class NameObject(str, PdfObject):
delimiterCharacters = "(", ")", "<", ">", "[", "]", "{", "}", "/", "%"
def __init__(self, data):
str.__init__(data)
def writeToStream(self, stream, encryption_key):
stream.write(self)
def readFromStream(stream):
name = stream.read(1)
if name != "/":
raise utils.PdfReadError, "name read error"
while True:
tok = stream.read(1)
if tok.isspace() or tok in NameObject.delimiterCharacters:
stream.seek(-1, 1)
break
name += tok
return NameObject(name)
readFromStream = staticmethod(readFromStream)
class DictionaryObject(dict, PdfObject):
def __init__(self, *args, **kwargs):
if len(args) == 0:
self.update(kwargs)
elif len(args) == 1:
arr = args[0]
# If we're passed a list/tuple, make a dict out of it
if not hasattr(arr, "iteritems"):
newarr = {}
for k, v in arr:
newarr[k] = v
arr = newarr
self.update(arr)
else:
raise TypeError("dict expected at most 1 argument, got 3")
def update(self, arr):
# note, a ValueError halfway through copying values
# will leave half the values in this dict.
for k, v in arr.iteritems():
self.__setitem__(k, v)
def raw_get(self, key):
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.__setitem__(self, key, value)
def setdefault(self, key, value=None):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.setdefault(self, key, value)
def __getitem__(self, key):
return dict.__getitem__(self, key).getObject()
##
# Retrieves XMP (Extensible Metadata Platform) data relevant to the
# this object, if available.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
# @return Returns a {@link #xmp.XmpInformation XmlInformation} instance
# that can be used to access XMP metadata from the document. Can also
# return None if no metadata was found on the document root.
def getXmpMetadata(self):
metadata = self.get("/Metadata", None)
if metadata is None:
return None
metadata = metadata.getObject()
import xmp
if not isinstance(metadata, xmp.XmpInformation):
metadata = xmp.XmpInformation(metadata)
self[NameObject("/Metadata")] = metadata
return metadata
##
# Read-only property that accesses the {@link
# #DictionaryObject.getXmpData getXmpData} function.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None)
def writeToStream(self, stream, encryption_key):
stream.write("<<\n")
for key, value in self.items():
key.writeToStream(stream, encryption_key)
stream.write(" ")
value.writeToStream(stream, encryption_key)
stream.write("\n")
stream.write(">>")
def readFromStream(stream, pdf):
tmp = stream.read(2)
if tmp != "<<":
raise utils.PdfReadError, "dictionary read error"
data = {}
while True:
tok = readNonWhitespace(stream)
if tok == ">":
stream.read(1)
break
stream.seek(-1, 1)
key = readObject(stream, pdf)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
value = readObject(stream, pdf)
if data.has_key(key):
# multiple definitions of key not permitted
raise utils.PdfReadError, "multiple definitions in dictionary"
data[key] = value
pos = stream.tell()
s = readNonWhitespace(stream)
if s == 's' and stream.read(5) == 'tream':
eol = stream.read(1)
# odd PDF file output has spaces after 'stream' keyword but before EOL.
# patch provided by Danial Sandler
while eol == ' ':
eol = stream.read(1)
assert eol in ("\n", "\r")
if eol == "\r":
# read \n after
stream.read(1)
# this is a stream object, not a dictionary
assert data.has_key("/Length")
length = data["/Length"]
if isinstance(length, IndirectObject):
t = stream.tell()
length = pdf.getObject(length)
stream.seek(t, 0)
data["__streamdata__"] = stream.read(length)
e = readNonWhitespace(stream)
ndstream = stream.read(8)
if (e + ndstream) != "endstream":
# (sigh) - the odd PDF file has a length that is too long, so
# we need to read backwards to find the "endstream" ending.
# ReportLab (unknown version) generates files with this bug,
# and Python users into PDF files tend to be our audience.
# we need to do this to correct the streamdata and chop off
# an extra character.
pos = stream.tell()
stream.seek(-10, 1)
end = stream.read(9)
if end == "endstream":
# we found it by looking back one character further.
data["__streamdata__"] = data["__streamdata__"][:-1]
else:
stream.seek(pos, 0)
raise utils.PdfReadError, "Unable to find 'endstream' marker after stream."
else:
stream.seek(pos, 0)
if data.has_key("__streamdata__"):
return StreamObject.initializeFromDictionary(data)
else:
retval = DictionaryObject()
retval.update(data)
return retval
readFromStream = staticmethod(readFromStream)
class StreamObject(DictionaryObject):
def __init__(self):
self._data = None
self.decodedSelf = None
def writeToStream(self, stream, encryption_key):
self[NameObject("/Length")] = NumberObject(len(self._data))
DictionaryObject.writeToStream(self, stream, encryption_key)
del self["/Length"]
stream.write("\nstream\n")
data = self._data
if encryption_key:
data = RC4_encrypt(encryption_key, data)
stream.write(data)
stream.write("\nendstream")
def initializeFromDictionary(data):
if data.has_key("/Filter"):
retval = EncodedStreamObject()
else:
retval = DecodedStreamObject()
retval._data = data["__streamdata__"]
del data["__streamdata__"]
del data["/Length"]
retval.update(data)
return retval
initializeFromDictionary = staticmethod(initializeFromDictionary)
def flateEncode(self):
if self.has_key("/Filter"):
f = self["/Filter"]
if isinstance(f, ArrayObject):
f.insert(0, NameObject("/FlateDecode"))
else:
newf = ArrayObject()
newf.append(NameObject("/FlateDecode"))
newf.append(f)
f = newf
else:
f = NameObject("/FlateDecode")
retval = EncodedStreamObject()
retval[NameObject("/Filter")] = f
retval._data = filters.FlateDecode.encode(self._data)
return retval
class DecodedStreamObject(StreamObject):
def getData(self):
return self._data
def setData(self, data):
self._data = data
class EncodedStreamObject(StreamObject):
def __init__(self):
self.decodedSelf = None
def getData(self):
if self.decodedSelf:
# cached version of decoded object
return self.decodedSelf.getData()
else:
# create decoded object
decoded = DecodedStreamObject()
decoded._data = filters.decodeStreamData(self)
for key, value in self.items():
if not key in ("/Length", "/Filter", "/DecodeParms"):
decoded[key] = value
self.decodedSelf = decoded
return decoded._data
def setData(self, data):
raise utils.PdfReadError, "Creating EncodedStreamObject is not currently supported"
class RectangleObject(ArrayObject):
def __init__(self, arr):
# must have four points
assert len(arr) == 4
# automatically convert arr[x] into NumberObject(arr[x]) if necessary
ArrayObject.__init__(self, [self.ensureIsNumber(x) for x in arr])
def ensureIsNumber(self, value):
if not isinstance(value, (NumberObject, FloatObject)):
value = FloatObject(value)
return value
def __repr__(self):
return "RectangleObject(%s)" % repr(list(self))
def getLowerLeft_x(self):
return self[0]
def getLowerLeft_y(self):
return self[1]
def getUpperRight_x(self):
return self[2]
def getUpperRight_y(self):
return self[3]
def getUpperLeft_x(self):
return self.getLowerLeft_x()
def getUpperLeft_y(self):
return self.getUpperRight_y()
def getLowerRight_x(self):
return self.getUpperRight_x()
def getLowerRight_y(self):
return self.getLowerLeft_y()
def getLowerLeft(self):
return self.getLowerLeft_x(), self.getLowerLeft_y()
def getLowerRight(self):
return self.getLowerRight_x(), self.getLowerRight_y()
def getUpperLeft(self):
return self.getUpperLeft_x(), self.getUpperLeft_y()
def getUpperRight(self):
return self.getUpperRight_x(), self.getUpperRight_y()
def setLowerLeft(self, value):
self[0], self[1] = [self.ensureIsNumber(x) for x in value]
def setLowerRight(self, value):
self[2], self[1] = [self.ensureIsNumber(x) for x in value]
def setUpperLeft(self, value):
self[0], self[3] = [self.ensureIsNumber(x) for x in value]
def setUpperRight(self, value):
self[2], self[3] = [self.ensureIsNumber(x) for x in value]
def getWidth(self):
return self.getUpperRight_x() - self.getLowerLeft_x()
def getHeight(self):
return self.getUpperRight_y() - self.getLowerLeft_x()
lowerLeft = property(getLowerLeft, setLowerLeft, None, None)
lowerRight = property(getLowerRight, setLowerRight, None, None)
upperLeft = property(getUpperLeft, setUpperLeft, None, None)
upperRight = property(getUpperRight, setUpperRight, None, None)
def encode_pdfdocencoding(unicode_string):
retval = ''
for c in unicode_string:
try:
retval += chr(_pdfDocEncoding_rev[c])
except KeyError:
raise UnicodeEncodeError("pdfdocencoding", c, -1, -1,
"does not exist in translation table")
return retval
def decode_pdfdocencoding(byte_array):
retval = u''
for b in byte_array:
c = _pdfDocEncoding[ord(b)]
if c == u'\u0000':
raise UnicodeDecodeError("pdfdocencoding", b, -1, -1,
"does not exist in translation table")
retval += c
return retval
_pdfDocEncoding = (
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u02d8', u'\u02c7', u'\u02c6', u'\u02d9', u'\u02dd', u'\u02db', u'\u02da', u'\u02dc',
u'\u0020', u'\u0021', u'\u0022', u'\u0023', u'\u0024', u'\u0025', u'\u0026', u'\u0027',
u'\u0028', u'\u0029', u'\u002a', u'\u002b', u'\u002c', u'\u002d', u'\u002e', u'\u002f',
u'\u0030', u'\u0031', u'\u0032', u'\u0033', u'\u0034', u'\u0035', u'\u0036', u'\u0037',
u'\u0038', u'\u0039', u'\u003a', u'\u003b', u'\u003c', u'\u003d', u'\u003e', u'\u003f',
u'\u0040', u'\u0041', u'\u0042', u'\u0043', u'\u0044', u'\u0045', u'\u0046', u'\u0047',
u'\u0048', u'\u0049', u'\u004a', u'\u004b', u'\u004c', u'\u004d', u'\u004e', u'\u004f',
u'\u0050', u'\u0051', u'\u0052', u'\u0053', u'\u0054', u'\u0055', u'\u0056', u'\u0057',
u'\u0058', u'\u0059', u'\u005a', u'\u005b', u'\u005c', u'\u005d', u'\u005e', u'\u005f',
u'\u0060', u'\u0061', u'\u0062', u'\u0063', u'\u0064', u'\u0065', u'\u0066', u'\u0067',
u'\u0068', u'\u0069', u'\u006a', u'\u006b', u'\u006c', u'\u006d', u'\u006e', u'\u006f',
u'\u0070', u'\u0071', u'\u0072', u'\u0073', u'\u0074', u'\u0075', u'\u0076', u'\u0077',
u'\u0078', u'\u0079', u'\u007a', u'\u007b', u'\u007c', u'\u007d', u'\u007e', u'\u0000',
u'\u2022', u'\u2020', u'\u2021', u'\u2026', u'\u2014', u'\u2013', u'\u0192', u'\u2044',
u'\u2039', u'\u203a', u'\u2212', u'\u2030', u'\u201e', u'\u201c', u'\u201d', u'\u2018',
u'\u2019', u'\u201a', u'\u2122', u'\ufb01', u'\ufb02', u'\u0141', u'\u0152', u'\u0160',
u'\u0178', u'\u017d', u'\u0131', u'\u0142', u'\u0153', u'\u0161', u'\u017e', u'\u0000',
u'\u20ac', u'\u00a1', u'\u00a2', u'\u00a3', u'\u00a4', u'\u00a5', u'\u00a6', u'\u00a7',
u'\u00a8', u'\u00a9', u'\u00aa', u'\u00ab', u'\u00ac', u'\u0000', u'\u00ae', u'\u00af',
u'\u00b0', u'\u00b1', u'\u00b2', u'\u00b3', u'\u00b4', u'\u00b5', u'\u00b6', u'\u00b7',
u'\u00b8', u'\u00b9', u'\u00ba', u'\u00bb', u'\u00bc', u'\u00bd', u'\u00be', u'\u00bf',
u'\u00c0', u'\u00c1', u'\u00c2', u'\u00c3', u'\u00c4', u'\u00c5', u'\u00c6', u'\u00c7',
u'\u00c8', u'\u00c9', u'\u00ca', u'\u00cb', u'\u00cc', u'\u00cd', u'\u00ce', u'\u00cf',
u'\u00d0', u'\u00d1', u'\u00d2', u'\u00d3', u'\u00d4', u'\u00d5', u'\u00d6', u'\u00d7',
u'\u00d8', u'\u00d9', u'\u00da', u'\u00db', u'\u00dc', u'\u00dd', u'\u00de', u'\u00df',
u'\u00e0', u'\u00e1', u'\u00e2', u'\u00e3', u'\u00e4', u'\u00e5', u'\u00e6', u'\u00e7',
u'\u00e8', u'\u00e9', u'\u00ea', u'\u00eb', u'\u00ec', u'\u00ed', u'\u00ee', u'\u00ef',
u'\u00f0', u'\u00f1', u'\u00f2', u'\u00f3', u'\u00f4', u'\u00f5', u'\u00f6', u'\u00f7',
u'\u00f8', u'\u00f9', u'\u00fa', u'\u00fb', u'\u00fc', u'\u00fd', u'\u00fe', u'\u00ff'
)
assert len(_pdfDocEncoding) == 256
_pdfDocEncoding_rev = {}
for i in xrange(256):
char = _pdfDocEncoding[i]
if char == u"\u0000":
continue
assert char not in _pdfDocEncoding_rev
_pdfDocEncoding_rev[char] = i
| agpl-3.0 |
Innovahn/odoo.old | addons/hr_holidays/report/holidays_summary_report.py | 333 | 10372 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
import openerp
from openerp.osv import fields, osv
from openerp.report.interface import report_rml
from openerp.report.interface import toxml
from openerp.report import report_sxw
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp.tools import to_xml
def lengthmonth(year, month):
if month == 2 and ((year % 4 == 0) and ((year % 100 != 0) or (year % 400 == 0))):
return 29
return [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
def strToDate(dt):
if dt:
dt_date=datetime.date(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]))
return dt_date
else:
return
def emp_create_xml(self, cr, uid, dept, holiday_type, row_id, empid, name, som, eom):
display={}
if dept==0:
count=0
registry = openerp.registry(cr.dbname)
holidays_ids = registry['hr.holidays'].search(cr, uid, [('employee_id','in',[empid,False]), ('type', '=', 'remove')])
ids_date = registry['hr.holidays'].read(cr, uid, holidays_ids, ['date_from','date_to','holiday_status_id','state'])
for index in range(1,61):
diff=index-1
current=som+datetime.timedelta(diff)
for item in ids_date:
if current >= strToDate(item['date_from']) and current <= strToDate(item['date_to']):
if item['state'] in holiday_type:
display[index]=item['holiday_status_id'][0]
count=count +1
else:
display[index]=' '
break
else:
display[index]=' '
else:
for index in range(1,61):
display[index]=' '
count=''
data_xml=['<info id="%d" number="%d" val="%s" />' % (row_id,x,display[x]) for x in range(1,len(display)+1) ]
# Computing the xml
xml = '''
%s
<employee row="%d" id="%d" name="%s" sum="%s">
</employee>
''' % (data_xml,row_id,dept, ustr(toxml(name)),count)
return xml
class report_custom(report_rml):
def create_xml(self, cr, uid, ids, data, context):
registry = openerp.registry(cr.dbname)
obj_dept = registry['hr.department']
obj_emp = registry['hr.employee']
depts=[]
emp_id={}
rpt_obj = registry['hr.holidays']
rml_obj=report_sxw.rml_parse(cr, uid, rpt_obj._name,context)
cr.execute("SELECT name FROM res_company")
res=cr.fetchone()[0]
date_xml=[]
date_today=time.strftime('%Y-%m-%d %H:%M:%S')
date_xml +=['<res name="%s" today="%s" />' % (to_xml(res),date_today)]
cr.execute("SELECT id, name, color_name FROM hr_holidays_status ORDER BY id")
legend=cr.fetchall()
today=datetime.datetime.today()
first_date=data['form']['date_from']
som = strToDate(first_date)
eom = som+datetime.timedelta(59)
day_diff=eom-som
name = ''
if len(data['form'].get('emp', ())) == 1:
name = obj_emp.read(cr, uid, data['form']['emp'][0], ['name'])['name']
if data['form']['holiday_type']!='both':
type=data['form']['holiday_type']
if data['form']['holiday_type']=='Confirmed':
holiday_type=('confirm')
else:
holiday_type=('validate')
else:
type="Confirmed and Approved"
holiday_type=('confirm','validate')
date_xml.append('<from>%s</from>\n'% (str(rml_obj.formatLang(som.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<to>%s</to>\n' %(str(rml_obj.formatLang(eom.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<type>%s</type>'%(type))
date_xml.append('<name>%s</name>'%(name))
# date_xml=[]
for l in range(0,len(legend)):
date_xml += ['<legend row="%d" id="%d" name="%s" color="%s" />' % (l+1,legend[l][0],_(legend[l][1]),legend[l][2])]
date_xml += ['<date month="%s" year="%d" />' % (ustr(som.strftime('%B')), som.year),'<days>']
cell=1
if day_diff.days>=30:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
if day_diff.days>=(lengthmonth(som.year, som.month)-som.day):
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, eom.day+1)]
cell=x-som.day+1
day_diff1=day_diff.days-cell+1
width_dict={}
month_dict={}
i=1
j=1
year=som.year
month=som.month
month_dict[j]=som.strftime('%B')
width_dict[j]=cell
while day_diff1>0:
if month+i<=12:
if day_diff1 > lengthmonth(year,i+month): # Not on 30 else you have problems when entering 01-01-2009 for example
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(year,i+month)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
else:
years=year+1
year=years
month=0
i=1
if day_diff1>=30:
som1=datetime.date(years,i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(years,i)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(years,i,1)
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
date_xml.append('</days>')
date_xml.append('<cols>3.5cm%s,0.4cm</cols>\n' % (',0.4cm' * (60)))
date_xml = ''.join(date_xml)
st='<cols_months>3.5cm'
for m in range(1,len(width_dict)+1):
st+=',' + str(0.4 *width_dict[m])+'cm'
st+=',0.4cm</cols_months>\n'
months_xml =['<months number="%d" name="%s"/>' % (x, _(month_dict[x])) for x in range(1,len(month_dict)+1) ]
months_xml.append(st)
emp_xml=''
row_id=1
if data['model'] == 'hr.employee':
for items in obj_emp.read(cr, uid, data['form']['emp'], ['id', 'name']):
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, items['id'], items['name'], som, eom)
row_id = row_id +1
elif data['model']=='ir.ui.menu':
for dept in obj_dept.browse(cr, uid, data['form']['depts'], context=context):
emp_ids = obj_emp.search(cr, uid, [('department_id', '=', dept.id)], context=context)
if emp_ids==[]:
continue
dept_done=0
for item in obj_emp.read(cr, uid, emp_ids, ['id', 'name']):
if dept_done==0:
emp_xml += emp_create_xml(self, cr, uid, 1, holiday_type, row_id, dept.id, dept.name, som, eom)
row_id = row_id +1
dept_done=1
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, item['id'], item['name'], som, eom)
row_id = row_id +1
header_xml = '''
<header>
<date>%s</date>
<company>%s</company>
</header>
''' % (str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")),to_xml(registry['res.users'].browse(cr,uid,uid).company_id.name))
# Computing the xml
xml='''<?xml version="1.0" encoding="UTF-8" ?>
<report>
%s
%s
%s
%s
</report>
''' % (header_xml,months_xml,date_xml, ustr(emp_xml))
return xml
report_custom('report.holidays.summary', 'hr.holidays', '', 'addons/hr_holidays/report/holidays_summary.xsl')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
matthew-brett/bibstuff | bibstuff/bibname.py | 1 | 10649 | #! /usr/bin/env python
#File: bibname.py
"""
:mod:`bibstuff.bibname` --- Name Parser and Formatter
=====================================================
Parses bibtex-formatted author/editor raw names and provides
formatting functions (e.g., via bibstyles/shared.NamesFormatter).
:author: Dylan W. Schwilk
:contact: http://www.schwilk.org
:author: Alan G. Isaac
:contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm
:copyright: 2009 by Dylan Schwilk and Alan G Isaac
:license: MIT (see `license.txt`_)
:date: 2009-01-14
:since: 2006-08-29
:note: Major change as of 2008-07-02. Now the ebnf grammar and processor
handles parsing of a list of names (a bibtex names field such as editor
or author) and parses the single author name into its fvlj parts. This
eliminates the need for the original hand-coded parse_raw_names_parts
function. Moved to using names_dicts rather than names_parts. The
grammar handles latex accents and ligatures as well as braces strings so
that a name such as {Barnes and Noble, Inc} is parsed as a single name
and not split on the " and ".
:todo: The dispatch processor does not currently strip the leading and trailing
braces from latex/bibtex strings. Not hard to add (see bibfile.py). This
should be done eventually.
:todo: The grammar does not support quoted strings, only braces strings. Could
be added fairly simply
.. _license.txt: ./license.txt
"""
__docformat__ = "restructuredtext en"
__authors__ = ["Dylan W. Schwilk", "Alan G. Isaac"]
__version__ = '2.0'
__needs__ = '2.4'
################ IMPORTS #############################
# import from standard library
import logging
logging.basicConfig(format='\n%(levelname)s:\n%(message)s\n')
bibname_logger = logging.getLogger('bibstuff_logger')
# import dependencies
import simpleparse
from simpleparse.dispatchprocessor import dispatch, DispatchProcessor, getString, lines
from string import maketrans
# BibStuff imports
from bibstuff import bibstyles, bibfile, bibgrammar
######################################################
################## Global Variables ##################
# constant needed for populating dicts in names_dicts with empty lists for
# missing parts
nameparts = ("first","last","von","jr")
# The EBNF description of a bibtex name field (such as a list of author names).
ebnf_bibname = r"""
namelist := sp*, name, (_and_, name)*
<_and_> := sp+, "and", sp+
name := vlf / fvl / fl / vljf / fvlj / l
>l< := last
>vlf< := (von, sp+)*, last, (sp+, last)*, comma, (sp*, first)+
>fl< := first, sp+, (first, sp+, ?(capitalized/capstring))*, last
>fvl< := (first, sp+)+, (von, sp+)+, last, (sp+, last)*
>fvlj< := fvl, comma, jr
>vljf< := (von, sp+)*, last, (sp+, last)*, comma, jr, comma, first, (sp+ , first)*
von := lowercase / lowerstring
first := capitalized / capstring
last := capitalized / capstring
jr := "jr" / "Jr" / "JR" / "Junior" / "junior" /
"Sr" / "sr" / "II" / "III" / "IV" / "2nd" / "3rd" / "4th"
<comma> := sp*, ',', sp*
<capitalized> := capital , anyc*
<lowercase> := ?lowerc, -"and ", anyc* # Mustn't grab the delimiter _and_ for a part
<ltx_accent> := '\\`' / "\\'" / '\\^' / '\\"' / '\\H' / '\\~' / '\\c' / '\\=' / '\\b' / '\\.' /
'\\d' / '\\u' / '\\v' / '\\t'
<ltx_ij_accent> := '\\^{\\i}' / '\\"{\\i}' / '\\^{\\j}' / '\\"{\\j}'
<ltx_ligature_uc> := '\\AE' / '\\OE' / '\\AA' / '\\O'
<ltx_ligature_lc> := '\\ae' / '\\oe' / '\\aa' / '\\o' / '\\ss'
<capital> := ('{',capital,'}') / [A-Z] /
(ltx_accent, [A-Z]) / (ltx_accent, '{' , [A-Z] , '}') /
ltx_ligature_uc
<lowerc> := ('{',lowerc,'}') / [a-z] / (ltx_accent, [a-z]) /
(ltx_accent, '{' , [a-z] , '}') /
ltx_ij_accent / ltx_ligature_lc
<anyc> := [~'-] / capital / lowerc
<string> := '{' , braces_string?, '}'
<capstring> := '{' , cap_braces_string?, '}'
<lowerstring> := '{' , lower_braces_string?, '}'
<cap_braces_string> := ( (capital, -[{}]*) / capstring)+
<lower_braces_string> := ( (capital, -[{}]*) / lowerstring)+
<braces_string> := (-[{}]+ / string)+
<sp> := [ \t\n\r.]
"""
bibnamelist_parser = simpleparse.parser.Parser(ebnf_bibname, 'namelist')
######################################################
# ----------- Public Classes and Functions -----------------#
# ----------------------------------------------------------
# BibName
# -------
# Parser processor for bibtex names
# ----------------------------------------------------------
class BibName( simpleparse.dispatchprocessor.DispatchProcessor ):
"""Processes a bibtex names entry (author, editor, etc) and
stores the resulting raw_names_parts.
:note: a BibName object should be bibstyle independent.
"""
def __init__(self, raw_names=None, from_field=None) : #:note: 2006-07-25 add initialization based on raw name
"""initialize a BibName instance
:Parameters:
`raw_names` : str
the raw names (e.g., unparsed author field of a BibEntry instance)
`from_field` : str
the entry field for the raw name
:note: 2006-08-02 add `from_field` argument (set by `BibEntry.make_names`)
"""
self.from_field = from_field
self.raw_names = raw_names
self.names_dicts = []
#populate self.names_dicts from raw_names
if raw_names:
self.parse_raw_names(raw_names)
############### PRODUCTION FUNCTIONS #######################
# Handle each name by adding new dict to list "names_dicts", then
# handle each name part by adding to last dict in names_dict list.
def name(self, (tag,start,stop,subtags), buffer):
"""Prduction function to process a single name in a nameslist"""
self.names_dicts.append({}) # add new dict to list
for part in subtags:
dispatch(self, part, buffer)
# Create empty lists for missing parts
for p in nameparts:
if not self.names_dicts[-1].has_key(p):
self.names_dicts[-1][p] = []
def last(self, (tag,start,stop,subtags), buffer ):
"""Processes last name part in a single name of a bibtex names field"""
if self.names_dicts[-1].has_key("last"):
self.names_dicts[-1]["last"].append(buffer[start:stop])
else:
self.names_dicts[-1]["last"] = [buffer[start:stop],]
def first(self, (tag,start,stop,subtags), buffer ):
"""Processes first name part in a single name of a bibtex names field"""
if self.names_dicts[-1].has_key("first"):
self.names_dicts[-1]["first"].append(buffer[start:stop])
else:
self.names_dicts[-1]["first"] = [buffer[start:stop],]
def von(self, (tag,start,stop,subtags), buffer ):
"""Processes von name part in a single name of a bibtex names field"""
if self.names_dicts[-1].has_key("von"):
self.names_dicts[-1]["von"].append(buffer[start:stop])
else:
self.names_dicts[-1]["von"] = [buffer[start:stop],]
def jr(self, (tag,start,stop,subtags), buffer ):
"""Processes jr name part in a single name of a bibtex names field"""
# Just on jr part so simple add list with one item
self.names_dicts[-1]["jr"] = [ buffer[start:stop],]
############## HELPER FUNCTIONS ######################
def parse_raw_names(self, raw_name):
"""This function can be used to populate an empty BibName
instance or replace all the name values currently contained in
an instance. It parses the names field with the bibname grammar"""
self.names_dicts = [] # Replace extant list of names
bibnamelist_parser.parse(raw_name, processor = self)
def get_names_dicts(self): #:note: renamed
"""
Return a list of name dicts,
one dict per name,
having the fields: first , von, last, jr
"""
return self.names_dicts
#ai: method to get last names, which is needed by bibstyle.py and by
#some style sortkeys
def get_last_names(self):
"""Return list of strings, where each string is a last name.
:TODO: graceful handling of missing names parts
"""
result = list(' '.join(name_dict['last']) for name_dict in self.names_dicts)
#bibname_logger.debug("BibName.get_last_names result: "+str(result))
return result
def format(self, names_formatter):
"""
format a BibName object into a string useful for citations
:note: called by the BibEntry class in bibfile.py when entry formatting
is requested
"""
return names_formatter.format_names(self)
def getNames(src) :
"""Returns list of name dicts. Each dict has keys "first", "last",
"von", "jr". `src` is a string is in bibtex name format.
"""
try :
p = BibName(src) #:note: 2006-07-25 allow initialization w src
return p.get_names_dicts() #:note: 2006-07-25 renamed
except :
bibname_logger.error('Error in name %s' % src)
raise
# command-line version
if __name__ =="__main__":
import sys
from optparse import OptionParser
usage = "usage: %prog [options] filenames"
parser = OptionParser(usage=usage, version ="%prog " + __version__)
parser.add_option("-t", "--template", action="store", type="string", \
dest="template", default = 'f{.}. |v |l| jr', help="Name format template")
parser.add_option("-i", "--initials", action="store_true", dest="initials", \
default = True, help="Initialize first names")
parser.add_option("-I", "--no-initials", action="store_false", dest="initials", \
default = True, help="do not initialize first names")
parser.add_option("-l", "--last-names", action="store_true", dest="last_names", \
default = False, help="Print last names only.")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="Print INFO messages to stdout, default=%default")
# get options
(options, args) = parser.parse_args()
if options.verbose:
bibname_logger.setLevel(logging.INFO)
if options.last_names:
options.template = 'l'
if options.initials :
initials = 'f' # only first names. Does any style ever use initials for anything else?
else :
initials = ''
if len(args) == 0 :
src = sys.stdin.read()
else :
flist = list()
for fname in args:
try:
flist.append(open(fname,'r'))
except IOError :
bibname_logger.warn('Error in filelist: %s.'%fname)
src = '\n'.join(f.read() for f in flist)
map(lambda f: f.close(), flist)
if not src:
bibname_logger.error("No bibtex source database found")
sys.exit(1)
else:
bfile = bibfile.BibFile()
bibgrammar.Parse(src, bfile)
names_formatter = bibstyles.shared.NamesFormatter(template_list=[options.template]*2,initials=initials)
for entry in bfile.entries:
print entry.format_names(names_formatter)
| mit |
MSU-CS-Software-Engineering/habitgame | landing_page.py | 1 | 16402 | import os.path
from datetime import date, timedelta #For Timestamps
from tkinter import *
from tkinter.ttk import *
from tkinter import messagebox #Must be explicitly imported. Used for placeholders.
class Hack_Frame(Frame):
def __init__(self, parent, ID, hack_type, empty=0):
Frame.__init__(self, parent)
self.parent = parent
self.ID = ID
self.hack_type = hack_type
self.top_class = self.parent.parent.parent
self.name_label = Label(self,
text = '',
anchor = CENTER,
background = "#F9D386",
font = "Veranda 16 bold")
self.name_label.pack(fill = X, expand = True)
self.description_label = Label(self,
text = '',
wraplength = 375,
background = "#EFE4B0",
font = "arial 12",
padding = 5, justify = LEFT)
self.description_label.pack(fill = X, expand = True)
self.value_label = Label(self,
text = '',
font = "arial 12",
padding = 5,
background = "#EFE4B0")
self.value_label.pack(fill = X, expand = True)
self.date_time_label = Label(self,
text = '',
font = "arial 12",
padding = 5,
background = "#EFE4B0")
self.date_time_label.pack(fill = X, expand = True)
if empty == 0:
self.complete_button = Button(self)
self.complete_button.configure(text = 'Complete',
style = 'complete.TButton',
image = self.parent.parent.complete_button_image,
compound = 'left',
cursor = 'hand2',
command = self.remove)
complete_button_style = Style()
complete_button_style.configure('complete.TButton',
font = 'arial 12 bold',
relief = 'flat',
padding = '0 3 0 3',
foreground = 'black',
background = '#C6E29A')
self.complete_button.image = self.parent.parent.complete_button_image
self.complete_button.pack(fill = X,
expand = True,
side = BOTTOM)
self.set_style('frame_style.TFrame', '#EBEDF1', 100)
def remove(self):
#Pass the data to the top-level(parent->parent->parent)
self.top_class.complete_hack(self.ID)
self.destroy()
def function_builder(self, args):
messagebox.showinfo("Place Holder", "go to " + args)
def set_complete_button_text(self, text):
self.complete_button.configure(text = text)
def set_style(self, name, background_color, height=None):
_style = Style()
if height != None:
_style.configure(name, background = background_color,
height = height)
else:
_style.configure(name, background = background_color)
self.configure(style = name)
def set_description_label(self, text):
self.description_label.configure(text = "Note: " + text)
def set_name_label(self, text):
self.name_label.configure(text = text)
def set_value_label(self, text):
self.value_label.configure(text = "Value: " + text)
def set_date_time_label(self, text):
self.date_time_label.configure(text = "Date/Time: " + text)
class Landing_Area_Frame(Frame):
def __init__(self, parent, hack_type):
Frame.__init__(self, parent)
self.set_style()
self.frames = []
self.number_of_frames = 0
self.empty_frames_message = ''
self.hack_type = hack_type
self.column = self.get_column()
self.parent = parent
self.current_row = 0
def remove_frames(self):
for frame in self.frames:
frame.destroy()
self.current_row = 1
self.frames = []
def set_style(self, height=None):
_style = Style()
_style.configure('frame_style.TFrame',
background = '#EBEDF1')
if height != None:
_style.configure(height = height)
self.configure(style = 'frame_style.TFrame')
def set_header(self, text, color):
self.header = Label(self, text = text,
anchor = CENTER,
font = "Veranda 18 bold",
foreground = 'black',
background = color)
self.header.grid(row = 0, column = self.column, sticky = 'new',
pady = (3,0), padx = 3)
def get_current_row(self):
self.current_row += 1
return self.current_row
def get_column(self):
if self.hack_type == 'habit':
return 0
elif self.hack_type == 'daily':
return 2
else:
return 4
def set_frames(self, hack_dict):
#Hack Code Area
if len(hack_dict) > 4:
self.number_of_frames = 5
elif len(hack_dict) == 0:
self.number_of_frames = 0
hack_frame = Hack_Frame(self, 0, self.hack_type, 1)
hack_frame.grid(row = 1, column = self.column, sticky = 'news',pady = (3,0), padx = 3)
if self.hack_type == 'daily':
label_string = 'dailies'
elif self.hack_type == 'habit':
label_string = 'habits'
else:
label_string = 'tasks'
hack_frame.set_name_label("No "+label_string+" to Display")
self.frames.append(hack_frame)
else:
self.number_of_frames = len(hack_dict)
if self.number_of_frames != 0:
for key in hack_dict.keys():
hack = hack_dict[key]
if hack.timestamp <= self.parent.parent.current_date:
hack_frame = Hack_Frame(self, hack.ID, self.hack_type)
hack_frame.grid(row = self.get_current_row(),
column = self.column,
sticky = 'new', pady = (3,0),
padx = 3)
hack_frame.set_name_label(hack.title)
hack_frame.set_description_label(hack.description)
hack_frame.set_value_label(str(hack.value))
hack_frame.set_date_time_label(str(hack.timestamp))
self.frames.append(hack_frame)
else:
flag_hack = False
for key in hack_dict.keys():
hack = hack_dict[key]
if hack.h_type == 'daily' and hack.timestamp <= self.parent.parent.current_date:
flag_hack = True
if flag_hack == False:
hack_frame = Hack_Frame(self, 0, self.hack_type, 1)
hack_frame.grid(row = 1, column = self.column, sticky = 'news',pady = (3,0), padx = 3)
if self.hack_type == 'daily':
label_string = 'dailies'
elif self.hack_type == 'habit':
label_string = 'habits'
else:
label_string = 'tasks'
hack_frame.set_name_label("No "+label_string+" to Display")
self.frames.append(hack_frame)
class Landing_Page (Frame):
def __init__(self, parent, character):
Frame.__init__(self, parent)
self.parent = parent
self.character = character
self.complete_button_image = PhotoImage(file=os.path.join("assets", "art", "check.gif"))
self.columnconfigure(0, weight = 1)
self.columnconfigure(2, weight = 1)
self.columnconfigure(4, weight = 1)
#self.columnconfigure(3, weight = 1)
#self.rowconfigure (4, weight =1)
#self.rowconfigure (5, weight = 1)
self.rowconfigure (6, weight = 1)
self.habit_dict = {k:self.character.hacks[k]
for k in self.character.hacks
if self.character.hacks[k].h_type == 'habit'}
self.dailies_dict = {k:self.character.hacks[k]
for k in self.character.hacks
if self.character.hacks[k].h_type == 'daily'}
self.tasks_dict = {k:self.character.hacks[k]
for k in self.character.hacks
if self.character.hacks[k].h_type == 'task'}
self.set_landing_window()
self.go_to_habits_button = Button(self.habit_area,
text = 'GO TO HABITS',
cursor = 'hand2',
style = 'go_to_button.TButton')
self.go_to_dailies_button = Button(self.daily_area,
text = 'GO TO DAILIES',
cursor = 'hand2',
style = 'go_to_button.TButton')
self.go_to_tasks_button = Button(self.task_area,
text='GO TO TASKS',
cursor = 'hand2',
style = 'go_to_button.TButton')
go_to_button_style = Style()
go_to_button_style.configure('go_to_button.TButton',
font = 'arial 14 bold',
relief = 'flat',
padding = 5,
foreground ='#54C9EB',
background = '#283D57')
self.go_to_habits_button.pack(fill = X, expand = False, side = BOTTOM)
self.go_to_dailies_button.pack(fill = X, expand = False, side = BOTTOM)
self.go_to_tasks_button.pack(fill = X, expand = False, side = BOTTOM)
def redraw(self, character):
self.go_to_habits_button.destroy()
self.go_to_dailies_button.destroy()
self.go_to_tasks_button.destroy()
#Update class' character data instance
self.character = character
self.habit_dict = {k:self.character.hacks[k]
for k in self.character.hacks
if self.character.hacks[k].h_type == 'habit'}
self.dailies_dict = {k:self.character.hacks[k]
for k in self.character.hacks
if self.character.hacks[k].h_type == 'daily'}
self.tasks_dict = {k:self.character.hacks[k]
for k in self.character.hacks
if self.character.hacks[k].h_type == 'task'}
#Destroy frames
self.habit_area.remove_frames()
self.daily_area.remove_frames()
self.task_area.remove_frames()
#Update area frames
self.habit_area.set_frames(self.habit_dict)
self.daily_area.set_frames(self.dailies_dict)
self.task_area.set_frames(self.tasks_dict)
self.go_to_habits_button = Button(self.habit_area,
text = 'GO TO HABITS',
cursor = 'hand2',
style = 'go_to_button.TButton')
self.go_to_dailies_button = Button(self.daily_area,
text = 'GO TO DAILIES',
cursor = 'hand2',
style = 'go_to_button.TButton')
self.go_to_tasks_button = Button(self.task_area,
text='GO TO TASKS',
cursor = 'hand2',
style = 'go_to_button.TButton')
go_to_button_style = Style()
go_to_button_style.configure('go_to_button.TButton',
font = 'arial 14 bold',
relief = 'flat',
padding = 5,
foreground ='#54C9EB',
background = '#283D57')
self.go_to_habits_button.pack(fill = X, expand = False, side = BOTTOM)
self.go_to_dailies_button.pack(fill = X, expand = False, side = BOTTOM)
self.go_to_tasks_button.pack(fill = X, expand = False, side = BOTTOM)
self.parent.bind_buttons()
def set_landing_window(self):
#label above progress bar
self.progress_label = Label(self, text="Daily Progress", padding=0)
self.progress_label.grid(row = 4, column =2 ,sticky='ew', pady=4, padx=5)
self.progress_label.configure(anchor = CENTER, font='arial 18 italic')
self.progress_label.rowconfigure(4, weight =1)
self.progress_label.columnconfigure(3, weight = 1)
#progress bar
#progress = Progressbar(self, orient = 'horizontal', mode= 'determinate')
#progress.grid(row = 5, column=0, columnspan = 6, stick = 'ew', padx = 3)
#progress.start()
#progress.rowconfigure(5, weight =1)
#progress.columnconfigure(0, weight = 1)
#three areas for adding dailies, task, habit widgets
self.habit_area = Landing_Area_Frame(self, 'habit')
self.habit_area.set_header('HABITS', '#D95B5B')
self.habit_area.set_frames(self.habit_dict)
self.habit_area.grid(row = 6, column = 0,
columnspan = 2, rowspan = 4,
padx = 5, sticky = 'enws')
self.habit_area.grid_propagate(False)
self.habit_area.rowconfigure(6, weight = 1)
self.habit_area.columnconfigure(0, weight = 1)
self.daily_area = Landing_Area_Frame(self, 'daily')
self.daily_area.set_header('DAILIES', '#9AD95B')
self.daily_area.set_frames(self.dailies_dict)
self.daily_area.grid(row = 6, column = 2, columnspan = 2,
rowspan = 4, padx = 5, sticky = 'enws')
self.daily_area.rowconfigure(6, weight = 1)
self.daily_area.columnconfigure(2, weight = 1)
self.daily_area.grid_propagate(False)
self.task_area = Landing_Area_Frame(self, 'task')
self.task_area.set_header('TASKS', '#5BADD9')
self.task_area.set_frames(self.tasks_dict)
self.task_area.grid(row = 6, column = 4, columnspan = 2,
rowspan = 4, padx = 5, sticky = 'news')
self.task_area.rowconfigure(6, weight = 1)
self.task_area.columnconfigure(4, weight = 1)
self.task_area.grid_propagate(False)
#Bottom go to buttons
def start(self):
self.progress["value"] = 0
self.max = 24
self.progress["midnight"]=24
self.progress["value"] = 12
def button_default(self):
#Placeholder
pass
| mit |
jjmleiro/hue | desktop/core/ext-py/Django-1.6.10/django/utils/translation/trans_real.py | 74 | 25651 | """Translation helper functions."""
from __future__ import unicode_literals
import locale
import os
import re
import sys
import gettext as gettext_module
from threading import local
import warnings
from django.utils.importlib import import_module
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import memoize
from django.utils._os import upath
from django.utils.safestring import mark_safe, SafeData
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
_checked_languages = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
# force unicode, because lazy version expects unicode
result = force_text(message)
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies
or session and during format localization.
"""
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
check_for_language = memoize(check_for_language, _checked_languages, 1)
def get_supported_language_variant(lang_code, supported=None, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
if lang_code:
# if fr-CA is not supported, try fr-ca; if that fails, fallback to fr.
generic_lang_code = lang_code.split('-')[0]
variants = (lang_code, lang_code.lower(), generic_lang_code,
generic_lang_code.lower())
for code in variants:
if code in supported and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported:
if supported_code.startswith((generic_lang_code + '-',
generic_lang_code.lower() + '-')):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, supported=None, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, supported, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
global _accepted
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info, supported)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code, supported)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
try:
accept_lang = get_supported_language_variant(accept_lang, supported)
except LookupError:
continue
else:
_accepted[normalized] = accept_lang
return accept_lang
try:
return get_supported_language_variant(settings.LANGUAGE_CODE, supported)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.conf import settings
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO()
message_context = None
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural)))
else:
out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular)))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return force_str(out.getvalue())
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
if priority:
priority = float(priority)
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| apache-2.0 |
osbzr/gooderp_org | website_mail/controllers/email_designer.py | 243 | 3151 | # -*- coding: utf-8 -*-
from urllib import urlencode
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.mail import html_sanitize
class WebsiteEmailDesigner(http.Controller):
@http.route('/website_mail/email_designer', type='http', auth="user", website=True)
def index(self, model, res_id, template_model=None, **kw):
if not model or not model in request.registry or not res_id:
return request.redirect('/')
model_fields = request.registry[model]._fields
if 'body' not in model_fields and 'body_html' not in model_fields or \
'email' not in model_fields and 'email_from' not in model_fields or \
'name' not in model_fields and 'subject' not in model_fields:
return request.redirect('/')
res_id = int(res_id)
obj_ids = request.registry[model].exists(request.cr, request.uid, [res_id], context=request.context)
if not obj_ids:
return request.redirect('/')
# try to find fields to display / edit -> as t-field is static, we have to limit
# the available fields to a given subset
email_from_field = 'email'
if 'email_from' in model_fields:
email_from_field = 'email_from'
subject_field = 'name'
if 'subject' in model_fields:
subject_field = 'subject'
body_field = 'body'
if 'body_html' in model_fields:
body_field = 'body_html'
cr, uid, context = request.cr, request.uid, request.context
record = request.registry[model].browse(cr, uid, res_id, context=context)
values = {
'record': record,
'templates': None,
'model': model,
'res_id': res_id,
'email_from_field': email_from_field,
'subject_field': subject_field,
'body_field': body_field,
'return_action': kw.get('return_action', ''),
}
if getattr(record, body_field):
values['mode'] = 'email_designer'
else:
if kw.get('enable_editor'):
kw.pop('enable_editor')
fragments = dict(model=model, res_id=res_id, **kw)
if template_model:
fragments['template_model'] = template_model
return request.redirect('/website_mail/email_designer?%s' % urlencode(fragments))
values['mode'] = 'email_template'
tmpl_obj = request.registry['email.template']
if template_model:
tids = tmpl_obj.search(cr, uid, [('model', '=', template_model)], context=context)
else:
tids = tmpl_obj.search(cr, uid, [], context=context)
templates = tmpl_obj.browse(cr, uid, tids, context=context)
values['templates'] = templates
values['html_sanitize'] = html_sanitize
return request.website.render("website_mail.email_designer", values)
@http.route(['/website_mail/snippets'], type='json', auth="user", website=True)
def snippets(self):
return request.website._render('website_mail.email_designer_snippets')
| agpl-3.0 |
dya2/python-for-android | python-modules/twisted/twisted/trial/test/test_runner.py | 49 | 29815 | # Copyright (c) 2005-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange
# Author: Robert Collins
import StringIO, os, sys
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial.itrial import IReporter, ITestCase
from twisted.trial import unittest, runner, reporter, util
from twisted.python import failure, log, reflect, filepath
from twisted.python.filepath import FilePath
from twisted.scripts import trial
from twisted.plugins import twisted_trial
from twisted import plugin
from twisted.internet import defer
pyunit = __import__('unittest')
class CapturingDebugger(object):
def __init__(self):
self._calls = []
def runcall(self, *args, **kwargs):
self._calls.append('runcall')
args[0](*args[1:], **kwargs)
class CapturingReporter(object):
"""
Reporter that keeps a log of all actions performed on it.
"""
implements(IReporter)
stream = None
tbformat = None
args = None
separator = None
testsRun = None
def __init__(self, stream=None, tbformat=None, rterrors=None,
publisher=None):
"""
Create a capturing reporter.
"""
self._calls = []
self.shouldStop = False
self._stream = stream
self._tbformat = tbformat
self._rterrors = rterrors
self._publisher = publisher
def startTest(self, method):
"""
Report the beginning of a run of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('startTest')
def stopTest(self, method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('stopTest')
def cleanupErrors(self, errs):
"""called when the reactor has been left in a 'dirty' state
@param errs: a list of L{twisted.python.failure.Failure}s
"""
self._calls.append('cleanupError')
def addSuccess(self, test):
self._calls.append('addSuccess')
def done(self):
"""
Do nothing. These tests don't care about done.
"""
class TrialRunnerTestsMixin:
"""
Mixin defining tests for L{runner.TrialRunner}.
"""
def tearDown(self):
self.runner._tearDownLogFile()
def test_empty(self):
"""
Empty test method, used by the other tests.
"""
def _getObservers(self):
return log.theLogPublisher.observers
def test_addObservers(self):
"""
Any log system observers L{TrialRunner.run} adds are removed by the
time it returns.
"""
originalCount = len(self._getObservers())
self.runner.run(self.test)
newCount = len(self._getObservers())
self.assertEqual(newCount, originalCount)
def test_logFileAlwaysActive(self):
"""
Test that a new file is opened on each run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObserver)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.runner.run(self.test)
self.failUnlessEqual(len(l), 2)
self.failIf(l[0] is l[1], "Should have created a new file observer")
def test_logFileGetsClosed(self):
"""
Test that file created is closed during the run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObject)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.failUnlessEqual(len(l), 1)
self.failUnless(l[0].closed)
class TestTrialRunner(TrialRunnerTestsMixin, unittest.TestCase):
"""
Tests for L{runner.TrialRunner} with the feature to turn unclean errors
into warnings disabled.
"""
def setUp(self):
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream)
self.test = TestTrialRunner('test_empty')
def test_publisher(self):
"""
The reporter constructed by L{runner.TrialRunner} is passed
L{twisted.python.log} as the value for the C{publisher} parameter.
"""
result = self.runner._makeResult()
self.assertIdentical(result._publisher, log)
class TrialRunnerWithUncleanWarningsReporter(TrialRunnerTestsMixin,
unittest.TestCase):
"""
Tests for the TrialRunner's interaction with an unclean-error suppressing
reporter.
"""
def setUp(self):
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream,
uncleanWarnings=True)
self.test = TestTrialRunner('test_empty')
class DryRunMixin(object):
suppress = [util.suppress(
category=DeprecationWarning,
message="Test visitors deprecated in Twisted 8.0")]
def setUp(self):
self.log = []
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter,
runner.TrialRunner.DRY_RUN,
stream=self.stream)
self.makeTestFixtures()
def makeTestFixtures(self):
"""
Set C{self.test} and C{self.suite}, where C{self.suite} is an empty
TestSuite.
"""
def test_empty(self):
"""
If there are no tests, the reporter should not receive any events to
report.
"""
result = self.runner.run(runner.TestSuite())
self.assertEqual(result._calls, [])
def test_singleCaseReporting(self):
"""
If we are running a single test, check the reporter starts, passes and
then stops the test during a dry run.
"""
result = self.runner.run(self.test)
self.assertEqual(result._calls, ['startTest', 'addSuccess', 'stopTest'])
def test_testsNotRun(self):
"""
When we are doing a dry run, the tests should not actually be run.
"""
self.runner.run(self.test)
self.assertEqual(self.log, [])
class DryRunTest(DryRunMixin, unittest.TestCase):
"""
Check that 'dry run' mode works well with Trial tests.
"""
def makeTestFixtures(self):
class MockTest(unittest.TestCase):
def test_foo(test):
self.log.append('test_foo')
self.test = MockTest('test_foo')
self.suite = runner.TestSuite()
class PyUnitDryRunTest(DryRunMixin, unittest.TestCase):
"""
Check that 'dry run' mode works well with stdlib unittest tests.
"""
def makeTestFixtures(self):
class PyunitCase(pyunit.TestCase):
def test_foo(self):
pass
self.test = PyunitCase('test_foo')
self.suite = pyunit.TestSuite()
class TestRunner(unittest.TestCase):
def setUp(self):
self.config = trial.Options()
# whitebox hack a reporter in, because plugins are CACHED and will
# only reload if the FILE gets changed.
parts = reflect.qual(CapturingReporter).split('.')
package = '.'.join(parts[:-1])
klass = parts[-1]
plugins = [twisted_trial._Reporter(
"Test Helper Reporter",
package,
description="Utility for unit testing.",
longOpt="capturing",
shortOpt=None,
klass=klass)]
# XXX There should really be a general way to hook the plugin system
# for tests.
def getPlugins(iface, *a, **kw):
self.assertEqual(iface, IReporter)
return plugins + list(self.original(iface, *a, **kw))
self.original = plugin.getPlugins
plugin.getPlugins = getPlugins
self.standardReport = ['startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
def tearDown(self):
plugin.getPlugins = self.original
def parseOptions(self, args):
self.config.parseOptions(args)
def getRunner(self):
r = trial._makeRunner(self.config)
r.stream = StringIO.StringIO()
# XXX The runner should always take care of cleaning this up itself.
# It's not clear why this is necessary. The runner always tears down
# its log file.
self.addCleanup(r._tearDownLogFile)
# XXX The runner should always take care of cleaning this up itself as
# well. It's necessary because TrialRunner._setUpTestdir might raise
# an exception preventing Reporter.done from being run, leaving the
# observer added by Reporter.__init__ still present in the system.
# Something better needs to happen inside
# TrialRunner._runWithoutDecoration to remove the need for this cludge.
r._log = log.LogPublisher()
return r
def test_runner_can_get_reporter(self):
self.parseOptions([])
result = self.config['reporter']
runner = self.getRunner()
self.assertEqual(result, runner._makeResult().__class__)
def test_runner_get_result(self):
self.parseOptions([])
runner = self.getRunner()
result = runner._makeResult()
self.assertEqual(result.__class__, self.config['reporter'])
def test_uncleanWarningsOffByDefault(self):
"""
By default Trial sets the 'uncleanWarnings' option on the runner to
False. This means that dirty reactor errors will be reported as
errors. See L{test_reporter.TestDirtyReactor}.
"""
self.parseOptions([])
runner = self.getRunner()
self.assertNotIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_getsUncleanWarnings(self):
"""
Specifying '--unclean-warnings' on the trial command line will cause
reporters to be wrapped in a device which converts unclean errors to
warnings. See L{test_reporter.TestDirtyReactor} for implications.
"""
self.parseOptions(['--unclean-warnings'])
runner = self.getRunner()
self.assertIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_runner_working_directory(self):
self.parseOptions(['--temp-directory', 'some_path'])
runner = self.getRunner()
self.assertEquals(runner.workingDirectory, 'some_path')
def test_concurrentImplicitWorkingDirectory(self):
"""
If no working directory is explicitly specified and the default
working directory is in use by another runner, L{TrialRunner.run}
selects a different default working directory to use.
"""
self.parseOptions([])
# Make sure we end up with the same working directory after this test
# as we had before it.
self.addCleanup(os.chdir, os.getcwd())
# Make a new directory and change into it. This isolates us from state
# that other tests might have dumped into this process's temp
# directory.
runDirectory = FilePath(self.mktemp())
runDirectory.makedirs()
os.chdir(runDirectory.path)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
where = {}
class ConcurrentCase(unittest.TestCase):
def test_first(self):
"""
Start a second test run which will have a default working
directory which is the same as the working directory of the
test run already in progress.
"""
# Change the working directory to the value it had before this
# test suite was started.
where['concurrent'] = subsequentDirectory = os.getcwd()
os.chdir(runDirectory.path)
self.addCleanup(os.chdir, subsequentDirectory)
secondRunner.run(ConcurrentCase('test_second'))
def test_second(self):
"""
Record the working directory for later analysis.
"""
where['record'] = os.getcwd()
result = firstRunner.run(ConcurrentCase('test_first'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
self.assertEqual(
where, {
'concurrent': runDirectory.child('_trial_temp').path,
'record': runDirectory.child('_trial_temp-1').path})
def test_concurrentExplicitWorkingDirectory(self):
"""
If a working directory which is already in use is explicitly specified,
L{TrialRunner.run} raises L{_WorkingDirectoryBusy}.
"""
self.parseOptions(['--temp-directory', os.path.abspath(self.mktemp())])
initialDirectory = os.getcwd()
self.addCleanup(os.chdir, initialDirectory)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
class ConcurrentCase(unittest.TestCase):
def test_concurrent(self):
"""
Try to start another runner in the same working directory and
assert that it raises L{_WorkingDirectoryBusy}.
"""
self.assertRaises(
util._WorkingDirectoryBusy,
secondRunner.run, ConcurrentCase('test_failure'))
def test_failure(self):
"""
Should not be called, always fails.
"""
self.fail("test_failure should never be called.")
result = firstRunner.run(ConcurrentCase('test_concurrent'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
def test_runner_normal(self):
self.parseOptions(['--temp-directory', self.mktemp(),
'--reporter', 'capturing',
'twisted.trial.test.sample'])
my_runner = self.getRunner()
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
result = my_runner.run(suite)
self.assertEqual(self.standardReport, result._calls)
def test_runner_debug(self):
self.parseOptions(['--reporter', 'capturing',
'--debug', 'twisted.trial.test.sample'])
my_runner = self.getRunner()
debugger = CapturingDebugger()
def get_debugger():
return debugger
my_runner._getDebugger = get_debugger
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
result = my_runner.run(suite)
self.assertEqual(self.standardReport, result._calls)
self.assertEqual(['runcall'], debugger._calls)
class RemoveSafelyTests(unittest.TestCase):
"""
Tests for L{_removeSafely}.
"""
def test_removeSafelyNoTrialMarker(self):
"""
If a path doesn't contain a node named C{"_trial_marker"}, that path is
not removed by L{runner._removeSafely} and a L{runner._NoTrialMarker}
exception is raised instead.
"""
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
self.assertRaises(util._NoTrialMarker, util._removeSafely, dirPath)
def test_removeSafelyRemoveFailsMoveSucceeds(self):
"""
If an L{OSError} is raised while removing a path in
L{runner._removeSafely}, an attempt is made to move the path to a new
name.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path removal fails.
"""
raise OSError()
# Patch stdout so we can check the print statements in _removeSafely
out = StringIO.StringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child('_trial_marker').touch()
# Ensure that path.remove() raises an OSError
dirPath.remove = dummyRemove
util._removeSafely(dirPath)
self.assertIn("could not remove FilePath", out.getvalue())
def test_removeSafelyRemoveFailsMoveFails(self):
"""
If an L{OSError} is raised while removing a path in
L{runner._removeSafely}, an attempt is made to move the path to a new
name. If that attempt fails, the L{OSError} is re-raised.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path removal fails.
"""
raise OSError("path removal failed")
def dummyMoveTo(path):
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path movement fails.
"""
raise OSError("path movement failed")
# Patch stdout so we can check the print statements in _removeSafely
out = StringIO.StringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child('_trial_marker').touch()
# Ensure that path.remove() and path.moveTo() both raise OSErrors
dirPath.remove = dummyRemove
dirPath.moveTo = dummyMoveTo
error = self.assertRaises(OSError, util._removeSafely, dirPath)
self.assertEquals(str(error), "path movement failed")
self.assertIn("could not remove FilePath", out.getvalue())
class TestTrialSuite(unittest.TestCase):
def test_imports(self):
# FIXME, HTF do you test the reactor can be cleaned up ?!!!
from twisted.trial.runner import TrialSuite
class TestUntilFailure(unittest.TestCase):
class FailAfter(unittest.TestCase):
"""
A test case that fails when run 3 times in a row.
"""
count = []
def test_foo(self):
self.count.append(None)
if len(self.count) == 3:
self.fail('Count reached 3')
def setUp(self):
TestUntilFailure.FailAfter.count = []
self.test = TestUntilFailure.FailAfter('test_foo')
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream)
def test_runUntilFailure(self):
"""
Test that the runUntilFailure method of the runner actually fail after
a few runs.
"""
result = self.runner.runUntilFailure(self.test)
self.failUnlessEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.assertEquals(self._getFailures(result), 1)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result.
"""
return len(result.failures)
def test_runUntilFailureDecorate(self):
"""
C{runUntilFailure} doesn't decorate the tests uselessly: it does it one
time when run starts, but not at each turn.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
result = self.runner.runUntilFailure(self.test)
self.failUnlessEqual(result.testsRun, 1)
self.assertEquals(len(decorated), 1)
self.assertEquals(decorated, [(self.test, ITestCase)])
def test_runUntilFailureForceGCDecorate(self):
"""
C{runUntilFailure} applies the force-gc decoration after the standard
L{ITestCase} decoration, but only one time.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
self.runner._forceGarbageCollection = True
result = self.runner.runUntilFailure(self.test)
self.failUnlessEqual(result.testsRun, 1)
self.assertEquals(len(decorated), 2)
self.assertEquals(decorated,
[(self.test, ITestCase),
(self.test, unittest._ForceGarbageCollectionDecorator)])
class UncleanUntilFailureTests(TestUntilFailure):
"""
Test that the run-until-failure feature works correctly with the unclean
error suppressor.
"""
def setUp(self):
TestUntilFailure.setUp(self)
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream,
uncleanWarnings=True)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result that
is wrapped in an UncleanFailureWrapper.
"""
return len(result._originalReporter.failures)
class BreakingSuite(runner.TestSuite):
"""
A L{TestSuite} that logs an error when it is run.
"""
def run(self, result):
try:
raise RuntimeError("error that occurs outside of a test")
except RuntimeError:
log.err(failure.Failure())
class TestLoggedErrors(unittest.TestCase):
"""
It is possible for an error generated by a test to be logged I{outside} of
any test. The log observers constructed by L{TestCase} won't catch these
errors. Here we try to generate such errors and ensure they are reported to
a L{TestResult} object.
"""
def tearDown(self):
self.flushLoggedErrors(RuntimeError)
def test_construct(self):
"""
Check that we can construct a L{runner.LoggedSuite} and that it
starts empty.
"""
suite = runner.LoggedSuite()
self.assertEqual(suite.countTestCases(), 0)
def test_capturesError(self):
"""
Chek that a L{LoggedSuite} reports any logged errors to its result.
"""
result = reporter.TestResult()
suite = runner.LoggedSuite([BreakingSuite()])
suite.run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors[0][0].id(), runner.NOT_IN_TEST)
self.failUnless(result.errors[0][1].check(RuntimeError))
class TestTestHolder(unittest.TestCase):
def setUp(self):
self.description = "description"
self.holder = runner.TestHolder(self.description)
def test_holder(self):
"""
Check that L{runner.TestHolder} takes a description as a parameter
and that this description is returned by the C{id} and
C{shortDescription} methods.
"""
self.assertEqual(self.holder.id(), self.description)
self.assertEqual(self.holder.shortDescription(), self.description)
def test_holderImplementsITestCase(self):
"""
L{runner.TestHolder} implements L{ITestCase}.
"""
self.assertIdentical(self.holder, ITestCase(self.holder))
self.assertTrue(
verifyObject(ITestCase, self.holder),
"%r claims to provide %r but does not do so correctly."
% (self.holder, ITestCase))
def test_runsWithStandardResult(self):
"""
A L{runner.TestHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertTrue(result.wasSuccessful())
self.assertEquals(1, result.testsRun)
class TestErrorHolder(TestTestHolder):
"""
Test L{runner.ErrorHolder} shares behaviour with L{runner.TestHolder}.
"""
def setUp(self):
self.description = "description"
# make a real Failure so we can construct ErrorHolder()
try:
1/0
except ZeroDivisionError:
error = failure.Failure()
self.holder = runner.ErrorHolder(self.description, error)
def test_runsWithStandardResult(self):
"""
A L{runner.ErrorHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertFalse(result.wasSuccessful())
self.assertEquals(1, result.testsRun)
class TestMalformedMethod(unittest.TestCase):
"""
Test that trial manages when test methods don't have correct signatures.
"""
class ContainMalformed(unittest.TestCase):
"""
This TestCase holds malformed test methods that trial should handle.
"""
def test_foo(self, blah):
pass
def test_bar():
pass
test_spam = defer.deferredGenerator(test_bar)
def _test(self, method):
"""
Wrapper for one of the test method of L{ContainMalformed}.
"""
stream = StringIO.StringIO()
trialRunner = runner.TrialRunner(reporter.Reporter, stream=stream)
test = TestMalformedMethod.ContainMalformed(method)
result = trialRunner.run(test)
self.failUnlessEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.failUnlessEqual(len(result.errors), 1)
def test_extraArg(self):
"""
Test when the method has extra (useless) arguments.
"""
self._test('test_foo')
def test_noArg(self):
"""
Test when the method doesn't have even self as argument.
"""
self._test('test_bar')
def test_decorated(self):
"""
Test a decorated method also fails.
"""
self._test('test_spam')
class DestructiveTestSuiteTestCase(unittest.TestCase):
"""
Test for L{runner.DestructiveTestSuite}.
"""
def test_basic(self):
"""
Thes destructive test suite should run the tests normally.
"""
called = []
class MockTest(unittest.TestCase):
def test_foo(test):
called.append(True)
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEquals(called, [])
suite.run(result)
self.assertEquals(called, [True])
self.assertEquals(suite.countTestCases(), 0)
def test_shouldStop(self):
"""
Test the C{shouldStop} management: raising a C{KeyboardInterrupt} must
interrupt the suite.
"""
called = []
class MockTest(unittest.TestCase):
def test_foo1(test):
called.append(1)
def test_foo2(test):
raise KeyboardInterrupt()
def test_foo3(test):
called.append(2)
result = reporter.TestResult()
loader = runner.TestLoader()
loader.suiteFactory = runner.DestructiveTestSuite
suite = loader.loadClass(MockTest)
self.assertEquals(called, [])
suite.run(result)
self.assertEquals(called, [1])
# The last test shouldn't have been run
self.assertEquals(suite.countTestCases(), 1)
def test_cleanup(self):
"""
Checks that the test suite cleanups its tests during the run, so that
it ends empty.
"""
class MockTest(unittest.TestCase):
def test_foo(test):
pass
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEquals(suite.countTestCases(), 1)
suite.run(result)
self.assertEquals(suite.countTestCases(), 0)
class TestRunnerDeprecation(unittest.TestCase):
class FakeReporter(reporter.Reporter):
"""
Fake reporter that does *not* implement done() but *does* implement
printErrors, separator, printSummary, stream, write and writeln
without deprecations.
"""
done = None
separator = None
stream = None
def printErrors(self, *args):
pass
def printSummary(self, *args):
pass
def write(self, *args):
pass
def writeln(self, *args):
pass
def test_reporterDeprecations(self):
"""
The runner emits a warning if it is using a result that doesn't
implement 'done'.
"""
trialRunner = runner.TrialRunner(None)
result = self.FakeReporter()
trialRunner._makeResult = lambda: result
def f():
# We have to use a pyunit test, otherwise we'll get deprecation
# warnings about using iterate() in a test.
trialRunner.run(pyunit.TestCase('id'))
self.assertWarns(
DeprecationWarning,
"%s should implement done() but doesn't. Falling back to "
"printErrors() and friends." % reflect.qual(result.__class__),
__file__, f)
| apache-2.0 |
iminrhythm/iirmerl | p2btstmp.py | 1 | 2871 | # From http://gbeced.github.io/pyalgotrade/docs/v0.17/html/bitstamp_example.html
# Accessed 06/25/16 @ 1046Z
from pyalgotrade.bitstamp import barfeed
from pyalgotrade.bitstamp import broker
from pyalgotrade import strategy
from pyalgotrade.technical import ma
from pyalgotrade.technical import cross
class Strategy(strategy.BaseStrategy):
def __init__(self, feed, brk):
strategy.BaseStrategy.__init__(self, feed, brk)
smaPeriod = 20
self.__instrument = "BTC"
self.__prices = feed[self.__instrument].getCloseDataSeries()
self.__sma = ma.SMA(self.__prices, smaPeriod)
self.__bid = None
self.__ask = None
self.__position = None
self.__posSize = 0.05
# Subscribe to order book update events to get bid/ask prices to trade.
feed.getOrderBookUpdateEvent().subscribe(self.__onOrderBookUpdate)
def __onOrderBookUpdate(self, orderBookUpdate):
bid = orderBookUpdate.getBidPrices()[0]
ask = orderBookUpdate.getAskPrices()[0]
if bid != self.__bid or ask != self.__ask:
self.__bid = bid
self.__ask = ask
self.info("Order book updated. Best bid: %s. Best ask: %s" % (self.__bid, self.__ask))
def onEnterOk(self, position):
self.info("Position opened at %s" % (position.getEntryOrder().getExecutionInfo().getPrice()))
def onEnterCanceled(self, position):
self.info("Position entry canceled")
self.__position = None
def onExitOk(self, position):
self.__position = None
self.info("Position closed at %s" % (position.getExitOrder().getExecutionInfo().getPrice()))
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
self.__position.exitLimit(self.__bid)
def onBars(self, bars):
bar = bars[self.__instrument]
self.info("Price: %s. Volume: %s." % (bar.getClose(), bar.getVolume()))
# Wait until we get the current bid/ask prices.
if self.__ask is None:
return
# If a position was not opened, check if we should enter a long position.
if self.__position is None:
if cross.cross_above(self.__prices, self.__sma) > 0:
self.info("Entry signal. Buy at %s" % (self.__ask))
self.__position = self.enterLongLimit(self.__instrument, self.__ask, self.__posSize, True)
# Check if we have to close the position.
elif not self.__position.exitActive() and cross.cross_below(self.__prices, self.__sma) > 0:
self.info("Exit signal. Sell at %s" % (self.__bid))
self.__position.exitLimit(self.__bid)
def main():
barFeed = barfeed.LiveTradeFeed()
brk = broker.PaperTradingBroker(1000, barFeed)
strat = Strategy(barFeed, brk)
strat.run()
if __name__ == "__main__":
main()
| mit |
GiedriusM/openthread | tests/scripts/thread-cert/Cert_6_1_07_EDSynchronization.py | 5 | 4724 | #!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ROUTER1 = 2
ED = 3
ROUTER2 = 4
ROUTER3 = 5
class Cert_6_1_7_EDSynchronization(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ED].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ED].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[ED].enable_whitelist()
self.nodes[ROUTER2].set_panid(0xface)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[ROUTER3].set_panid(0xface)
self.nodes[ROUTER3].set_mode('rsdn')
self.nodes[ROUTER3].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[ROUTER3].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER3].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[ROUTER3].enable_whitelist()
self.nodes[ROUTER3].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED].start()
time.sleep(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ROUTER1].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ROUTER2].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ROUTER3].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER3].get_state(), 'router')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
PlutoniumHeart/ITK | Modules/ThirdParty/pygccxml/src/pygccxml/declarations/cpptypes.py | 1 | 25524 | # Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines classes, that describe C++ types
"""
from . import algorithms_cache
class type_t(object):
"""base class for all types"""
def __init__(self):
object.__init__(self)
self.cache = algorithms_cache.type_algs_cache_t()
self._byte_size = 0
self._byte_align = 0
def __str__(self):
res = self.decl_string
if res[:2] == "::":
res = res[2:]
return res
def __eq__(self, other):
if not isinstance(other, type_t):
return False
return self.decl_string == other.decl_string
def __hash__(self):
return hash(self.decl_string)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, self.__class__):
return self.__class__.__name__ < other.__class__.__name__
return self.decl_string < other.decl_string
def build_decl_string(self, with_defaults=True):
raise NotImplementedError()
@property
def decl_string(self):
if self.cache.decl_string is None:
self.cache.decl_string = self.build_decl_string()
return self.cache.decl_string
@property
def partial_decl_string(self):
if self.cache.partial_decl_string is None:
self.cache.partial_decl_string = self.build_decl_string(False)
return self.cache.partial_decl_string
def _clone_impl(self):
raise NotImplementedError()
def clone(self):
"""returns new instance of the type"""
answer = self._clone_impl()
return answer
@property
def byte_size(self):
"""Size of this type in bytes @type: int"""
return self._byte_size
@byte_size.setter
def byte_size(self, new_byte_size):
self._byte_size = new_byte_size
@property
def byte_align(self):
"""Alignment of this type in bytes @type: int"""
return self._byte_align
@byte_align.setter
def byte_align(self, new_byte_align):
self._byte_align = new_byte_align
# There are cases when GCC-XML reports something like this
# <Unimplemented id="_9482" tree_code="188" \
# tree_code_name="template_type_parm" node="0xcc4d5b0"/>
# In this case I will use this as type
class dummy_type_t(type_t):
"""provides :class:`type_t` interface for a string, that defines C++ type.
This class could be very useful in the code generator.
"""
def __init__(self, decl_string):
type_t.__init__(self)
self._decl_string = decl_string
def build_decl_string(self, with_defaults=True):
return self._decl_string
def _clone_impl(self):
return dummy_type_t(self._decl_string)
class unknown_t(type_t):
"""
type, that represents all C++ types,
that could not be parsed by GCC-XML
"""
def __init__(self):
type_t.__init__(self)
def build_decl_string(self, with_defaults=True):
return '?unknown?'
def _clone_impl(self):
return self
class ellipsis_t(type_t):
"""type, that represents "..." in function definition"""
def __init__(self):
type_t.__init__(self)
def build_decl_string(self, with_defaults=True):
return '...'
def _clone_impl(self):
return self
##########################################################################
# Fundamental types:
class fundamental_t(type_t):
"""base class for all fundamental, build-in types"""
def __init__(self, name):
type_t.__init__(self)
self._name = name
def build_decl_string(self, with_defaults=True):
return self._name
def _clone_impl(self):
return self
class java_fundamental_t(fundamental_t):
"""base class for all JNI defined fundamental types"""
def __init__(self, name):
fundamental_t.__init__(self, name)
class void_t(fundamental_t):
"""represents void type"""
CPPNAME = 'void'
def __init__(self):
fundamental_t.__init__(self, void_t.CPPNAME)
class char_t(fundamental_t):
"""represents char type"""
CPPNAME = 'char'
def __init__(self):
fundamental_t.__init__(self, char_t.CPPNAME)
class signed_char_t(fundamental_t):
"""represents signed char type"""
CPPNAME = 'signed char'
def __init__(self):
fundamental_t.__init__(self, signed_char_t.CPPNAME)
class unsigned_char_t(fundamental_t):
"""represents unsigned char type"""
CPPNAME = 'unsigned char'
def __init__(self):
fundamental_t.__init__(self, unsigned_char_t.CPPNAME)
class wchar_t(fundamental_t):
"""represents wchar_t type"""
CPPNAME = 'wchar_t'
def __init__(self):
fundamental_t.__init__(self, wchar_t.CPPNAME)
class short_int_t(fundamental_t):
"""represents short int type"""
CPPNAME = 'short int'
def __init__(self):
fundamental_t.__init__(self, short_int_t.CPPNAME)
class short_unsigned_int_t(fundamental_t):
"""represents short unsigned int type"""
CPPNAME = 'short unsigned int'
def __init__(self):
fundamental_t.__init__(self, short_unsigned_int_t.CPPNAME)
class bool_t(fundamental_t):
"""represents bool type"""
CPPNAME = 'bool'
def __init__(self):
fundamental_t.__init__(self, bool_t.CPPNAME)
class int_t(fundamental_t):
"""represents int type"""
CPPNAME = 'int'
def __init__(self):
fundamental_t.__init__(self, int_t.CPPNAME)
class unsigned_int_t(fundamental_t):
"""represents unsigned int type"""
CPPNAME = 'unsigned int'
def __init__(self):
fundamental_t.__init__(self, unsigned_int_t.CPPNAME)
class long_int_t(fundamental_t):
"""represents long int type"""
CPPNAME = 'long int'
def __init__(self):
fundamental_t.__init__(self, long_int_t.CPPNAME)
class long_unsigned_int_t(fundamental_t):
"""represents long unsigned int type"""
CPPNAME = 'long unsigned int'
def __init__(self):
fundamental_t.__init__(self, long_unsigned_int_t.CPPNAME)
class long_long_int_t(fundamental_t):
"""represents long long int type"""
CPPNAME = 'long long int'
def __init__(self):
fundamental_t.__init__(self, long_long_int_t.CPPNAME)
class long_long_unsigned_int_t(fundamental_t):
"""represents long long unsigned int type"""
CPPNAME = 'long long unsigned int'
def __init__(self):
fundamental_t.__init__(self, long_long_unsigned_int_t.CPPNAME)
class float_t(fundamental_t):
"""represents float type"""
CPPNAME = 'float'
def __init__(self):
fundamental_t.__init__(self, float_t.CPPNAME)
class double_t(fundamental_t):
"""represents double type"""
CPPNAME = 'double'
def __init__(self):
fundamental_t.__init__(self, double_t.CPPNAME)
class long_double_t(fundamental_t):
"""represents long double type"""
CPPNAME = 'long double'
def __init__(self):
fundamental_t.__init__(self, long_double_t.CPPNAME)
class complex_double_t(fundamental_t):
"""represents complex double type"""
CPPNAME = 'complex double'
def __init__(self):
fundamental_t.__init__(self, complex_double_t.CPPNAME)
class complex_long_double_t(fundamental_t):
"""represents complex long double type"""
CPPNAME = 'complex long double'
def __init__(self):
fundamental_t.__init__(self, complex_long_double_t.CPPNAME)
class complex_float_t(fundamental_t):
"""represents complex float type"""
CPPNAME = 'complex float'
def __init__(self):
fundamental_t.__init__(self, complex_float_t.CPPNAME)
class jbyte_t(java_fundamental_t):
"""represents jbyte type"""
JNAME = 'jbyte'
def __init__(self):
java_fundamental_t.__init__(self, jbyte_t.JNAME)
class jshort_t(java_fundamental_t):
"""represents jshort type"""
JNAME = 'jshort'
def __init__(self):
java_fundamental_t.__init__(self, jshort_t.JNAME)
class jint_t(java_fundamental_t):
"""represents jint type"""
JNAME = 'jint'
def __init__(self):
java_fundamental_t.__init__(self, jint_t.JNAME)
class jlong_t(java_fundamental_t):
"""represents jlong type"""
JNAME = 'jlong'
def __init__(self):
java_fundamental_t.__init__(self, jlong_t.JNAME)
class jfloat_t(java_fundamental_t):
"""represents jfloat type"""
JNAME = 'jfloat'
def __init__(self):
java_fundamental_t.__init__(self, jfloat_t.JNAME)
class jdouble_t(java_fundamental_t):
"""represents jdouble type"""
JNAME = 'jdouble'
def __init__(self):
java_fundamental_t.__init__(self, jdouble_t.JNAME)
class jchar_t(java_fundamental_t):
"""represents jchar type"""
JNAME = 'jchar'
def __init__(self):
java_fundamental_t.__init__(self, jchar_t.JNAME)
class jboolean_t(java_fundamental_t):
"""represents jboolean type"""
JNAME = 'jboolean'
def __init__(self):
java_fundamental_t.__init__(self, jboolean_t.JNAME)
class int128_t(fundamental_t):
"""represents __int128_t type"""
CPPNAME = '__int128_t'
def __init__(self):
fundamental_t.__init__(self, int128_t.CPPNAME)
class uint128_t(fundamental_t):
"""represents __uint128_t type"""
CPPNAME = '__uint128_t'
def __init__(self):
fundamental_t.__init__(self, uint128_t.CPPNAME)
FUNDAMENTAL_TYPES = {
# adding java types
void_t.CPPNAME: void_t(),
char_t.CPPNAME: char_t(),
signed_char_t.CPPNAME: signed_char_t(),
unsigned_char_t.CPPNAME: unsigned_char_t(),
wchar_t.CPPNAME: wchar_t(),
short_int_t.CPPNAME: short_int_t(),
'signed ' + short_int_t.CPPNAME: short_int_t(),
short_unsigned_int_t.CPPNAME: short_unsigned_int_t(),
bool_t.CPPNAME: bool_t(),
int_t.CPPNAME: int_t(),
'signed ' + int_t.CPPNAME: int_t(),
unsigned_int_t.CPPNAME: unsigned_int_t(),
long_int_t.CPPNAME: long_int_t(),
long_unsigned_int_t.CPPNAME: long_unsigned_int_t(),
long_long_int_t.CPPNAME: long_long_int_t(),
long_long_unsigned_int_t.CPPNAME: long_long_unsigned_int_t(),
int128_t.CPPNAME: int128_t(),
uint128_t.CPPNAME: uint128_t(),
float_t.CPPNAME: float_t(),
double_t.CPPNAME: double_t(),
long_double_t.CPPNAME: long_double_t(),
complex_long_double_t.CPPNAME: complex_long_double_t(),
complex_double_t.CPPNAME: complex_double_t(),
complex_float_t.CPPNAME: complex_float_t(),
jbyte_t.JNAME: jbyte_t(),
jshort_t.JNAME: jshort_t(),
jint_t.JNAME: jint_t(),
jlong_t.JNAME: jlong_t(),
jfloat_t.JNAME: jfloat_t(),
jdouble_t.JNAME: jdouble_t(),
jchar_t.JNAME: jchar_t(),
jboolean_t.JNAME: jboolean_t(),
'__java_byte': jbyte_t(),
'__java_short': jshort_t(),
'__java_int': jint_t(),
'__java_long': jlong_t(),
'__java_float': jfloat_t(),
'__java_double': jdouble_t(),
'__java_char': jchar_t(),
'__java_boolean': jboolean_t()
}
"""
defines a mapping between fundamental type name and its synonym to the instance
of class that describes the type
"""
def _f(x, with_defaults):
"""
A small helper function.
"""
return x.build_decl_string(with_defaults)
##########################################################################
# Compound types:
class compound_t(type_t):
"""class that allows to represent compound types like `const int*`"""
def __init__(self, base):
type_t.__init__(self)
self._base = base
@property
def base(self):
"""reference to internal/base class"""
return self._base
@base.setter
def base(self, new_base):
self._base = new_base
class volatile_t(compound_t):
"""represents `volatile whatever` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' volatile'
def _clone_impl(self):
return volatile_t(self.base.clone())
class restrict_t(compound_t):
"""represents `restrict whatever` type"""
# The restrict keyword can be considered an extension to the strict
# aliasing rule. It allows the programmer to declare that pointers which
# share the same type (or were otherwise validly created) do not alias
# eachother. By using restrict the programmer can declare that any loads
# and stores through the qualified pointer (or through another pointer
# copied either directly or indirectly from the restricted pointer) are
# the only loads and stores to the same address during the lifetime of
# the pointer. In other words, the pointer is not aliased by any pointers
# other than its own copies.
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return '__restrict__ ' + self.base.build_decl_string(with_defaults)
def _clone_impl(self):
return restrict_t(self.base.clone())
class const_t(compound_t):
"""represents `whatever const` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' const'
def _clone_impl(self):
return const_t(self.base.clone())
class pointer_t(compound_t):
"""represents `whatever*` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' *'
def _clone_impl(self):
return pointer_t(self.base.clone())
class reference_t(compound_t):
"""represents `whatever&` type"""
def __init__(self, base):
compound_t.__init__(self, base)
def build_decl_string(self, with_defaults=True):
return self.base.build_decl_string(with_defaults) + ' &'
def _clone_impl(self):
return reference_t(self.base.clone())
class array_t(compound_t):
"""represents C++ array type"""
SIZE_UNKNOWN = -1
def __init__(self, base, size):
compound_t.__init__(self, base)
self._size = size
@property
def size(self):
"""returns array size"""
return self._size
@size.setter
def size(self, size):
"""sometimes there is a need to update the size of the array"""
self.cache.reset()
self._size = size
def build_decl_string(self, with_defaults=True):
# return self.base.build_decl_string(with_defaults) + '[%d]' %
# self.size
return self.__bds_for_multi_dim_arrays(None, with_defaults)
def __bds_for_multi_dim_arrays(self, parent_dims=None, with_defaults=True):
if parent_dims:
parent_dims.append(self.size)
else:
parent_dims = [self.size]
if isinstance(self.base, array_t):
return self.base.__bds_for_multi_dim_arrays(
parent_dims,
with_defaults)
else:
tmp = []
for s in parent_dims:
tmp.append('[%d]' % s)
return self.base.build_decl_string(with_defaults) + ''.join(tmp)
def _clone_impl(self):
return array_t(self.base.clone(), self.size)
class calldef_type_t(object):
"""base class for all types that describes "callable" declaration"""
def __init__(self, return_type=None, arguments_types=None):
object.__init__(self)
self._return_type = return_type
if arguments_types is None:
arguments_types = []
self._arguments_types = arguments_types
@property
def return_type(self):
"""reference to :class:`return type <type_t>`"""
return self._return_type
@return_type.setter
def return_type(self, new_return_type):
self._return_type = new_return_type
@property
def arguments_types(self):
"""list of argument :class:`types <type_t>`"""
return self._arguments_types
@arguments_types.setter
def arguments_types(self, new_arguments_types):
self._arguments_types = new_arguments_types
@property
def has_ellipsis(self):
return self.arguments_types and isinstance(
self.arguments_types[-1], ellipsis_t)
class free_function_type_t(type_t, calldef_type_t):
"""describes free function type"""
NAME_TEMPLATE = '%(return_type)s (*)( %(arguments)s )'
TYPEDEF_NAME_TEMPLATE = (
'%(return_type)s ( *%(typedef_name)s )( %(arguments)s )')
def __init__(self, return_type=None, arguments_types=None):
type_t.__init__(self)
calldef_type_t.__init__(self, return_type, arguments_types)
@staticmethod
def create_decl_string(
return_type, arguments_types, with_defaults=True):
"""
Returns free function type
:param return_type: function return type
:type return_type: :class:`type_t`
:param arguments_types: list of argument :class:`type <type_t>`
:rtype: :class:`free_function_type_t`
"""
return free_function_type_t.NAME_TEMPLATE % {
'return_type': return_type.build_decl_string(with_defaults),
'arguments': ','.join(
[_f(x, with_defaults) for x in arguments_types])}
def build_decl_string(self, with_defaults=True):
return self.create_decl_string(
self.return_type,
self.arguments_types,
with_defaults)
def _clone_impl(self):
rt_clone = None
if self.return_type:
rt_clone = self.return_type.clone()
return free_function_type_t(
rt_clone, [
arg.clone() for arg in self.arguments_types])
# TODO: create real typedef
def create_typedef(self, typedef_name, unused=None, with_defaults=True):
"""returns string, that contains valid C++ code, that defines typedef
to function type
:param name: the desired name of typedef
"""
return free_function_type_t.TYPEDEF_NAME_TEMPLATE % {
'typedef_name': typedef_name,
'return_type': self.return_type.build_decl_string(with_defaults),
'arguments': ','.join(
[_f(x, with_defaults) for x in self.arguments_types])}
class member_function_type_t(type_t, calldef_type_t):
"""describes member function type"""
NAME_TEMPLATE = (
'%(return_type)s ( %(class)s::* )( %(arguments)s )%(has_const)s')
TYPEDEF_NAME_TEMPLATE = (
'%(return_type)s ( %(class)s::*%(typedef_name)s' +
')( %(arguments)s ) %(has_const)s')
def __init__(
self,
class_inst=None,
return_type=None,
arguments_types=None,
has_const=False):
type_t.__init__(self)
calldef_type_t.__init__(self, return_type, arguments_types)
self._has_const = has_const
self._class_inst = class_inst
@property
def has_const(self):
"""describes, whether function has const modifier"""
return self._has_const
@has_const.setter
def has_const(self, has_const):
self._has_const = has_const
@property
def class_inst(self):
"""reference to parent :class:`class <declaration_t>`"""
return self._class_inst
@class_inst.setter
def class_inst(self, class_inst):
self._class_inst = class_inst
# TODO: create real typedef
def create_typedef(
self,
typedef_name,
class_alias=None,
with_defaults=True):
"""creates typedef to the function type
:param typedef_name: desired type name
:rtype: string
"""
has_const_str = ''
if self.has_const:
has_const_str = 'const'
if None is class_alias:
if with_defaults:
class_alias = self.class_inst.decl_string
else:
class_alias = self.class_inst.partial_decl_string
return member_function_type_t.TYPEDEF_NAME_TEMPLATE % {
'typedef_name': typedef_name,
'return_type': self.return_type.build_decl_string(with_defaults),
'class': class_alias,
'arguments': ','.join(
[_f(x, with_defaults) for x in self.arguments_types]),
'has_const': has_const_str}
def create(self):
return self.build_decl_string(
self.return_type,
self.class_inst.decl_string,
self.arguments_types,
self.has_const)
@staticmethod
def create_decl_string(
return_type,
class_decl_string,
arguments_types,
has_const,
with_defaults=True):
has_const_str = ''
if has_const:
has_const_str = 'const'
return_type_decl_string = ''
if return_type:
return_type_decl_string = return_type.build_decl_string(
with_defaults)
return member_function_type_t.NAME_TEMPLATE % {
'return_type': return_type_decl_string,
'class': class_decl_string,
'arguments': ','.join(
[_f(x, with_defaults) for x in arguments_types]),
'has_const': has_const_str}
def build_decl_string(self, with_defaults=True):
return self.create_decl_string(
self.return_type,
self.class_inst.decl_string,
self.arguments_types,
self.has_const,
with_defaults)
def _clone_impl(self):
rt_clone = None
if self.return_type:
rt_clone = self.return_type.clone()
return member_function_type_t(
self.class_inst, rt_clone, [
arg.clone() for arg in self.arguments_types], self.has_const)
class member_variable_type_t(compound_t):
"""describes member variable type"""
NAME_TEMPLATE = '%(type)s ( %(class)s::* )'
def __init__(self, class_inst=None, variable_type=None):
compound_t.__init__(self, class_inst)
self._mv_type = variable_type
@property
def variable_type(self):
"""describes member variable :class:`type <type_t>`"""
return self._mv_type
@variable_type.setter
def variable_type(self, new_type):
self._mv_type = new_type
def build_decl_string(self, with_defaults=True):
return self.NAME_TEMPLATE % {
'type': self.variable_type.build_decl_string(with_defaults),
'class': self.base.build_decl_string(with_defaults)}
def _clone_impl(self):
return member_variable_type_t(
class_inst=self.base,
variable_type=self.variable_type.clone())
##########################################################################
# declarated types:
class declarated_t(type_t):
"""class that binds between to hierarchies: :class:`type_t`
and :class:`declaration_t`"""
def __init__(self, declaration):
type_t.__init__(self)
self._declaration = declaration
@property
def declaration(self):
"""reference to :class:`declaration_t`"""
return self._declaration
@declaration.setter
def declaration(self, new_declaration):
self._declaration = new_declaration
def build_decl_string(self, with_defaults=True):
if with_defaults:
return self._declaration.decl_string
else:
return self._declaration.partial_decl_string
def _clone_impl(self):
return declarated_t(self._declaration)
@property
def byte_size(self):
"""Size of this type in bytes @type: int"""
return self._declaration.byte_size
@property
def byte_align(self):
"""alignment of this type in bytes @type: int"""
return self._declaration.byte_align
class type_qualifiers_t(object):
"""contains additional information about type: mutable, static, extern"""
def __init__(self, has_static=False, has_mutable=False):
self._has_static = has_static
self._has_mutable = has_mutable
def __eq__(self, other):
if not isinstance(other, type_qualifiers_t):
return False
return self.has_static == other.has_static \
and self.has_mutable == other.has_mutable
def __hash__(self):
return super.__hash__(self)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, type_qualifiers_t):
return object.__lt__(self, other)
return self.has_static < other.has_static \
and self.has_mutable < other.has_mutable
@property
def has_static(self):
return self._has_static
@has_static.setter
def has_static(self, has_static):
self._has_static = has_static
@property
def has_extern(self):
"""synonym to static"""
return self.has_static
@has_extern.setter
def has_extern(self, has_extern):
self.has_static = has_extern
@property
def has_mutable(self):
return self._has_mutable
@has_mutable.setter
def has_mutable(self, has_mutable):
self._has_mutable = has_mutable
| apache-2.0 |
brainstorm-ai/DIGITS | digits/model/images/classification/test_views.py | 1 | 39432 | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import itertools
import json
import os
import re
import shutil
import tempfile
import time
import unittest
import urllib
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bs4 import BeautifulSoup
import flask
import mock
import PIL.Image
from urlparse import urlparse
from digits.config import config_value
import digits.dataset.images.classification.test_views
import digits.test_views
import digits.webapp
# Must import after importing digit.config
import caffe_pb2
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
-- adjust to number of classes
local nclasses = p.nclasses or 1
-- model should adjust to any 3D input
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- c*h*w -> chw (flattened)
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(nDim, nclasses)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
model:add(linearLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model
}
end
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTest, cls).setUpClass()
if cls.FRAMEWORK=='torch' and not config_value('torch_root'):
raise unittest.SkipTest('Torch not found')
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
return cls.TORCH_NETWORK if cls.FRAMEWORK=='torch' else cls.CAFFE_NETWORK
class BaseViewsTestWithDataset(BaseViewsTest,
digits.dataset.images.classification.test_views.BaseViewsTestWithDataset):
"""
Provides a dataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
TRAIN_EPOCHS = 1
SHUFFLE = False
LR_POLICY = None
LR_MULTISTEP_VALUES = None
LEARNING_RATE = None
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithDataset, cls).tearDownClass()
@classmethod
def create_model(cls, network=None, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
if network is None:
network = cls.network()
data = {
'model_name': 'test_model',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': network,
'batch_size': 10,
'train_epochs': cls.TRAIN_EPOCHS,
'framework' : cls.FRAMEWORK,
'random_seed': 0xCAFEBABE,
'shuffle': 'true' if cls.SHUFFLE else 'false'
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
if cls.LR_POLICY is not None:
data['lr_policy'] = cls.LR_POLICY
if cls.LEARNING_RATE is not None:
data['learning_rate'] = cls.LEARNING_RATE
if cls.LR_MULTISTEP_VALUES is not None:
data['lr_multistep_values'] = cls.LR_MULTISTEP_VALUES
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
data = json.loads(rv.data)
if 'jobs' in data.keys():
return [j['id'] for j in data['jobs']]
else:
return data['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithModel(BaseViewsTestWithDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithModel, cls).setUpClass()
cls.model_id = cls.create_model(json=True)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework='+self.FRAMEWORK,
data = {'custom_network': self.network()}
)
s = BeautifulSoup(rv.data, 'html.parser')
if rv.status_code != 200:
body = s.select('body')[0]
if 'InvocationException' in str(body):
raise unittest.SkipTest('GraphViz not installed')
raise AssertionError('POST failed with %s\n\n%s' % (rv.status_code, body))
image = s.select('img')
assert image is not None, "didn't return an image"
def test_customize(self):
rv = self.app.post('/models/customize?network=lenet&framework='+self.FRAMEWORK)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe_root')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe_root')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i+1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def classify_one_for_job(self, job_id, test_misclassification = True):
# carry out one inference test per category in dataset
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % job_id,
data = {
'image_file': image_upload,
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
if test_misclassification:
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_mean_image(self):
# test the creation
job_id = self.create_model(use_mean = 'image')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_pixel(self):
# test the creation
job_id = self.create_model(use_mean = 'pixel')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_none(self):
# test the creation
job_id = self.create_model(use_mean = 'none')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id, False)
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
def test_bad_network_definition(self):
if self.FRAMEWORK == 'caffe':
bogus_net = """
layer {
name: "hidden"
type: 'BogusCode'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
elif self.FRAMEWORK == 'torch':
bogus_net = """
local model = BogusCode(0)
return function(params)
return {
model = model
}
end
"""
job_id = self.create_model(json=True, network=bogus_net)
assert self.model_wait_completion(job_id) == 'Error', 'job should have failed'
job_info = self.job_info_html(job_id=job_id, job_type='models')
assert 'BogusCode' in job_info, "job_info: \n%s" % str(job_info)
def test_clone(self):
options_1 = {
'shuffle': True,
'snapshot_interval': 2.0,
'lr_step_size': 33.0,
'lr_inv_power': 0.5,
'lr_inv_gamma': 0.1,
'lr_poly_power': 3.0,
'lr_exp_gamma': 0.9,
'use_mean': 'image',
'lr_multistep_gamma': 0.5,
'lr_policy': 'exp',
'val_interval': 3.0,
'random_seed': 123,
'learning_rate': 0.0125,
'lr_step_gamma': 0.1,
'lr_sigmoid_step': 50.0,
'lr_sigmoid_gamma': 0.1,
'lr_multistep_values': '50,85',
}
job1_id = self.create_model(**options_1)
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
## Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/models/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
## These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class BaseTestCreated(BaseViewsTestWithModel):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_models_page(self):
rv = self.app.get('/models', follow_redirects=True)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'Models' in rv.data, 'unexpected page format'
def test_model_json(self):
rv = self.app.get('/models/%s.json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'id %s != %s' % (content['id'], self.model_id)
assert content['dataset_id'] == self.dataset_id, 'dataset_id %s != %s' % (content['dataset_id'], self.dataset_id)
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_classify_one(self):
# test first image in first category
category = self.imageset_paths.keys()[0]
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_json(self):
# test last image in last category
category = self.imageset_paths.keys()[-1]
image_path = self.imageset_paths[category][-1]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one.json?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['predictions'][0][0] == category, 'image misclassified'
def test_classify_many(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_invalid_ground_truth(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
# test label_id with -1 and >len(labels)
textfile_images += '%s %s\n' % (image_path, 3*label_id-1)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_json(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many.json?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'classifications' in data, 'invalid response'
# verify classification of first image in each category
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
prediction = data['classifications'][image_path][0][0]
assert prediction == category, 'image misclassified- predicted %s - expected %s' % (prediction, category)
def test_top_n(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
def test_top_n_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data = {'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
model_id = self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedWide(BaseTestCreated):
IMAGE_WIDTH = 20
class BaseTestCreatedTall(BaseTestCreated):
IMAGE_HEIGHT = 20
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local nclasses = p.nclasses or 1
local croplen = 8, channels
if p.inputShape then channels=p.inputShape[1] else channels=1 end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- flatten
local linLayer = nn.Linear(channels*croplen*croplen, nclasses)
linLayer.weight:fill(0)
linLayer.bias:fill(0)
model:add(linLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model,
croplen = croplen
}
end
"""
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews):
FRAMEWORK = 'caffe'
class TestCaffeCreation(BaseTestCreation):
FRAMEWORK = 'caffe'
class TestCaffeCreatedWideMoreNumOutput(BaseTestCreatedWide):
FRAMEWORK = 'caffe'
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
inner_product_param {
num_output: 1000
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'caffe'
class TestCaffeCreatedTallMultiStepLR(BaseTestCreatedTall):
FRAMEWORK = 'caffe'
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchViews(BaseTestViews):
FRAMEWORK = 'torch'
class TestTorchCreation(BaseTestCreation):
FRAMEWORK = 'torch'
class TestTorchCreatedUnencodedShuffle(BaseTestCreated):
FRAMEWORK = 'torch'
ENCODING = 'none'
SHUFFLE = True
class TestTorchCreatedHdf5(BaseTestCreated):
FRAMEWORK = 'torch'
BACKEND = 'hdf5'
class TestTorchCreatedTallHdf5Shuffle(BaseTestCreatedTall):
FRAMEWORK = 'torch'
BACKEND = 'hdf5'
SHUFFLE = True
class TestTorchDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'torch'
class TestCaffeLeNet(BaseTestCreated):
FRAMEWORK = 'caffe'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
CAFFE_NETWORK=open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'caffe', 'lenet.prototxt')
).read()
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'torch'
class TestTorchCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'torch'
class TestTorchCreatedWideMultiStepLR(BaseTestCreatedWide):
FRAMEWORK = 'torch'
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchLeNet(BaseTestCreated):
FRAMEWORK = 'torch'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
TRAIN_EPOCHS = 20
# need more aggressive learning rate
# on such a small dataset
LR_POLICY = 'fixed'
LEARNING_RATE = 0.1
# standard lenet model will adjust to color
# or grayscale images
TORCH_NETWORK=open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'torch', 'lenet.lua')
).read()
class TestTorchLeNetHdf5Shuffle(TestTorchLeNet):
BACKEND = 'hdf5'
SHUFFLE = True
class TestPythonLayer(BaseViewsTestWithDataset):
FRAMEWORK = 'caffe'
CAFFE_NETWORK = """\
layer {
name: "hidden"
type: 'InnerProduct'
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "py_test"
type: "Python"
bottom: "output"
top: "py_test"
python_param {
module: "digits_python_layers"
layer: "PythonLayer"
}
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
def write_python_layer_script(self, filename):
with open(filename, 'w') as f:
f.write("""\
import caffe
import numpy as np
class PythonLayer(caffe.Layer):
def setup(self, bottom, top):
print 'PythonLayer::setup'
if len(bottom) != 1:
raise Exception("Need one input.")
def reshape(self, bottom, top):
print 'PythonLayer::reshape'
top[0].reshape(1)
def forward(self, bottom, top):
print 'PythonLayer::forward'
top[0].data[...] = np.sum(bottom[0].data) / 2. / bottom[0].num
""")
## This test makes a temporary python layer file whose path is set
## as py_layer_server_file. The job creation process copies that
## file to the job_dir. The CAFFE_NETWORK above, requires that
## python script to be in the correct spot. If there is an error
## in the script or if the script is named incorrectly, or does
## not exist in the job_dir, then the test will fail.
def test_python_layer(self):
tmpdir = tempfile.mkdtemp()
py_file = tmpdir + '/py_test.py'
self.write_python_layer_script(py_file)
job_id = self.create_model(python_layer_server_file=py_file)
# remove the temporary python script.
shutil.rmtree(tmpdir)
assert self.model_wait_completion(job_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
class TestSweepCreation(BaseViewsTestWithDataset):
FRAMEWORK = 'caffe'
"""
Model creation tests
"""
def test_sweep(self):
job_ids = self.create_model(json=True, learning_rate='[0.01, 0.02]', batch_size='[8, 10]')
for job_id in job_ids:
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
| bsd-3-clause |
j00bar/ansible | lib/ansible/modules/network/nxos/nxos_file_copy.py | 26 | 7640 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_file_copy
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Copy a file to a remote NXOS device over SCP.
description:
- Copy a file to the flash (or bootflash) remote network device
on NXOS devices.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- The feature must be enabled with feature scp-server.
- If the file is already present (md5 sums match), no transfer will
take place.
- Check mode will tell you if the file would be copied.
options:
local_file:
description:
- Path to local file. Local directory must exist.
required: true
remote_file:
description:
- Remote file path of the copy. Remote directories must exist.
If omitted, the name of the local file will be used.
required: false
default: null
file_system:
description:
- The remote file system of the device. If omitted,
devices that support a file_system parameter will use
their default values.
required: false
default: null
'''
EXAMPLES = '''
- nxos_file_copy:
local_file: "./test_file.txt"
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
transfer_status:
description: Whether a file was transferred. "No Transfer" or "Sent".
returned: success
type: string
sample: 'Sent'
local_file:
description: The path of the local file.
returned: success
type: string
sample: '/path/to/local/file'
remote_file:
description: The path of the remote file.
returned: success
type: string
sample: '/path/to/remote/file'
'''
import os
import re
import time
import paramiko
from ansible.module_utils.nxos import run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def remote_file_exists(module, dst, file_system='bootflash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
body = execute_show_command(command, module, command_type='cli_show_ascii')
if 'No such file' in body[0]:
return False
return True
def verify_remote_file_exists(module, dst, file_system='bootflash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
body = execute_show_command(command, module, command_type='cli_show_ascii')
if 'No such file' in body[0]:
return 0
return body[0].split()[0].strip()
def local_file_exists(module):
return os.path.isfile(module.params['local_file'])
def get_flash_size(module):
command = 'dir {}'.format(module.params['file_system'])
body = execute_show_command(command, module, command_type='cli_show_ascii')
match = re.search(r'(\d+) bytes free', body[0])
bytes_free = match.group(1)
return int(bytes_free)
def enough_space(module):
flash_size = get_flash_size(module)
file_size = os.path.getsize(module.params['local_file'])
if file_size > flash_size:
return False
return True
def transfer_file(module, dest):
file_size = os.path.getsize(module.params['local_file'])
if not local_file_exists(module):
module.fail_json(msg='Could not transfer file. Local file doesn\'t exist.')
if not enough_space(module):
module.fail_json(msg='Could not transfer file. Not enough space on device.')
hostname = module.params['host']
username = module.params['username']
password = module.params['password']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
hostname=hostname,
username=username,
password=password)
full_remote_path = '{}{}'.format(module.params['file_system'], dest)
scp = SCPClient(ssh.get_transport())
try:
scp.put(module.params['local_file'], full_remote_path)
except:
time.sleep(10)
temp_size = verify_remote_file_exists(
module, dest, file_system=module.params['file_system'])
if int(temp_size) == int(file_size):
pass
else:
module.fail_json(msg='Could not transfer file. There was an error '
'during transfer. Please make sure remote '
'permissions are set.', temp_size=temp_size,
file_size=file_size)
scp.close()
return True
def main():
argument_spec = dict(
local_file=dict(required=True),
remote_file=dict(required=False),
file_system=dict(required=False, default='bootflash:'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_SCP:
module.fail_json(
msg='library scp is required but does not appear to be '
'installed. It can be installed using `pip install scp`'
)
warnings = list()
check_args(module, warnings)
local_file = module.params['local_file']
remote_file = module.params['remote_file']
file_system = module.params['file_system']
changed = False
transfer_status = 'No Transfer'
if not os.path.isfile(local_file):
module.fail_json(msg="Local file {} not found".format(local_file))
dest = remote_file or os.path.basename(local_file)
remote_exists = remote_file_exists(module, dest, file_system=file_system)
if not remote_exists:
changed = True
file_exists = False
else:
file_exists = True
if not module.check_mode and not file_exists:
try:
transfer_file(module, dest)
transfer_status = 'Sent'
except ShellError:
clie = get_exception()
module.fail_json(msg=str(clie))
if remote_file is None:
remote_file = os.path.basename(local_file)
module.exit_json(changed=changed,
transfer_status=transfer_status,
local_file=local_file,
remote_file=remote_file,
warnings=warnings,
file_system=file_system)
if __name__ == '__main__':
main()
| gpl-3.0 |
bright-tools/varints | varints/setup.py | 1 | 2012 | #!/usr/bin/python
# Copyright 2017 John Bailey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import unittest
def varints_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
setup(name='varints',
version='0.1.7',
description='Variable-length encoding of integers',
url='http://github.com/bright-tools/varints',
author='John Bailey',
author_email='[email protected]',
license='Apache',
long_description=open('README', 'rt').read(),
packages=['varints','varints.leb128u','varints.leb128s','varints.dlugoszu','varints.sqliteu'],
test_suite='setup.varints_test_suite',
keywords = ['varint', 'integer', 'variable', 'length' ],
# See https://PyPI.python.org/PyPI?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False)
| apache-2.0 |
catmaid/CATMAID | scripts/graphdb/test_bulbs.py | 2 | 4024 | # -*- coding: utf-8 -*-
from bulbs.neo4jserver import Graph
from bulbs.model import Node, Relationship
from bulbs.property import Property, String, Integer, Float, DateTime
from bulbs.utils import current_datetime
# TODO: check latest gist's
# Requirements (installable with pip)
# pytz ujson six
# You need: sudo apt-get install python-dev
# Reason for GraphDB over relational databases
#
####
# Nodes
####
class Neuron(Node):
element_type = "neuron"
name = String(nullable=False)
creation_time = DateTime(default=current_datetime, nullable=False)
edition_time = DateTime(default=current_datetime, nullable=False)
#creation_time = Property(Float,default="current_timestamp", nullable=False)
#edition_time = Float(default="current_timestamp", nullable=False)
class Treenode(Node):
element_type = "treenode"
x = Float(nullable=True)
y = Float(nullable=False)
z = Float(nullable=False)
class Connector(Node):
element_type = "connector"
####
# Relationships
####
class Relation(Relationship):
label = "relation"
#creation_time = Float(default="current_timestamp", nullable=False)
#edition_time = Float(default="current_timestamp", nullable=False)
#def current_timestamp(self):
# return time.time()
class HasChild(Relationship):
label = "has_child"
class HasTreenode(Relationship):
label = "has_treenode"
class PresynapticTo(Relationship):
label = "presynaptic_to"
class PostsynapticTo(Relationship):
label = "postsynaptic_to"
####
# Test
####
g = Graph()
g.add_proxy("neuron", Neuron)
g.add_proxy("treenode", Treenode)
g.add_proxy("connector", Connector)
g.add_proxy("has_child", HasChild)
g.add_proxy("has_treenode", HasTreenode)
g.add_proxy("presynaptic_to", PresynapticTo)
g.add_proxy("postsynaptic_to", PostsynapticTo)
# create a few objects
neuron1 = g.neuron.create(name="MyNeuron1")
neuron2 = g.neuron.create(name="MyNeuron2")
neuron3 = g.neuron.create(name="MyNeuron3")
treenode1 = g.treenode.create(x=3.3,y=4.3,z=3.2)
treenode11 = g.treenode.create(x=3.3,y=4.3,z=3.2)
treenode2 = g.treenode.create(x=3.3,y=4.3,z=3.2)
treenode3 = g.treenode.create(x=3.3,y=4.3,z=3.2)
connector1 = g.connector.create()
g.presynaptic_to.create(treenode11, connector1)
g.postsynaptic_to.create(treenode2, connector1)
g.has_treenode.create(neuron1, treenode1)
#g.has_treenode.create(treenode11, neuron1)
#g.has_treenode.create(treenode2, neuron2)
#g.has_treenode.create(treenode3, neuron3)
g.has_child.create(treenode1, treenode11)
print('Show treenodes of neuron 1')
print(list(neuron1.inV('element_of')))
# update
neur = g.vertices.get(neuron1.eid)
neur.name = 'New name'
neur.save()
"""
eid = neuron1.eid
dic = neuron1.map()
print('dictionary', eid, dic)
dic['aha'] = 10
g.vertices.update(eid,dic)
print('get it anew', eid, g.vertices.get(eid).map())
"""
# get edge attributes
edg = list(neur.outE('has_treenode'))[0]
# TODO: why is the relationship label not accessible? edge_type?
print('edge label', edg._label, edg.map())
#g.vertices.delete(neuron1.eid)
import sys
sys.exit(1)
"""
print(neuron1.eid)
neuronid = 1000
old_tn = None
for i in range(6005):
print('i', i)
if i % 5000 == 0:
print('past 1000',i)
neuronid+=1
neuron1 = g.neuron.create(name="MyNeuron {0}".format(i))
print('new neuron with id', neuron1.eid)
treenode_new = g.treenode.create(x=3.3,y=4.3,z=3.2)
g.has_treenode.create(neuron1, treenode_new)
if not old_tn is None:
g.has_child.create(old_tn, treenode_new)
old_tn = treenode_new
"""
import time
start=time.time()
r=g.client.gremlin("g.v(2072).out('has_treenode')")
a,b=r.get_results()
TN=[]
for e in a:
TN.append( e.get_id() )
print('time', time.time()-start)
#print('result', TN)
start=time.time()
TN2=[]
for i in g.neuron.get(2072).outE('has_treenode'):
TN2.append( i.eid )
print('time2', time.time()-start)
# TODO: how to update with nodes with gremlin?
| gpl-3.0 |
Scille/parsec-cloud | parsec/api/data/invite.py | 1 | 6077 | # Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
import re
from typing import Optional, Tuple, List, Dict, Any
from random import randint, shuffle
from parsec.crypto import VerifyKey, PublicKey, PrivateKey, SecretKey
from parsec.serde import fields, post_load
from parsec.api.protocol import DeviceID, DeviceIDField, HumanHandle, HumanHandleField
from parsec.api.data.base import BaseAPIData, BaseSchema
from parsec.api.data.entry import EntryID, EntryIDField
from parsec.api.data.certif import UserProfile, UserProfileField
import attr
class SASCode(str):
__slots__ = ()
length = 4
symbols = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789"
regex = re.compile(rf"^[{symbols}]{{{length}}}$")
def __init__(self, raw: str):
if not isinstance(raw, str) or not self.regex.match(raw):
raise ValueError("Invalid SAS code")
def __repr__(self) -> str:
return f"<SASCode {super().__repr__()}>"
@classmethod
def from_int(cls, num: int) -> "SASCode":
if num < 0:
raise ValueError("Provided integer is negative")
result = ""
for _ in range(cls.length):
result += cls.symbols[num % len(cls.symbols)]
num //= len(cls.symbols)
if num != 0:
raise ValueError("Provided integer is too large")
return cls(result)
def generate_sas_codes(
claimer_nonce: bytes, greeter_nonce: bytes, shared_secret_key: SecretKey
) -> Tuple[SASCode, SASCode]:
# Computes combined HMAC
combined_nonce = claimer_nonce + greeter_nonce
# Digest size of 5 bytes so we can split it beween two 20bits SAS
combined_hmac = shared_secret_key.hmac(combined_nonce, digest_size=5)
hmac_as_int = int.from_bytes(combined_hmac, "big")
# Big endian number extracted from bits [0, 20[
claimer_sas = hmac_as_int % 2 ** 20
# Big endian number extracted from bits [20, 40[
greeter_sas = (hmac_as_int >> 20) % 2 ** 20
return SASCode.from_int(claimer_sas), SASCode.from_int(greeter_sas)
def generate_sas_code_candidates(valid_sas: SASCode, size: int = 3) -> List[SASCode]:
candidates = {valid_sas}
while len(candidates) < size:
candidates.add(SASCode.from_int(randint(0, 2 ** 20 - 1)))
ordered_candidates = list(candidates)
shuffle(ordered_candidates)
return ordered_candidates
@attr.s(slots=True, frozen=True, auto_attribs=True, kw_only=True, eq=False)
class InviteUserData(BaseAPIData):
class SCHEMA_CLS(BaseSchema):
type = fields.CheckedConstant("invite_user_data", required=True)
# Claimer ask for device_label/human_handle, but greeter has final word on this
requested_device_label = fields.String(allow_none=True, missing=None)
requested_human_handle = HumanHandleField(allow_none=True, missing=None)
# Note claiming user also imply creating a first device
public_key = fields.PublicKey(required=True)
verify_key = fields.VerifyKey(required=True)
@post_load
def make_obj(self, data: Dict[str, Any]) -> "InviteUserData": # type: ignore[misc]
data.pop("type")
return InviteUserData(**data)
requested_device_label: Optional[str]
requested_human_handle: Optional[HumanHandle]
public_key: PublicKey
verify_key: VerifyKey
@attr.s(slots=True, frozen=True, auto_attribs=True, kw_only=True, eq=False)
class InviteUserConfirmation(BaseAPIData):
class SCHEMA_CLS(BaseSchema):
type = fields.CheckedConstant("invite_user_confirmation", required=True)
device_id = DeviceIDField(required=True)
device_label = fields.String(allow_none=True, missing=None)
human_handle = HumanHandleField(allow_none=True, missing=None)
profile = UserProfileField(required=True)
root_verify_key = fields.VerifyKey(required=True)
@post_load
def make_obj(self, data: Dict[str, Any]) -> "InviteUserConfirmation": # type: ignore[misc]
data.pop("type")
return InviteUserConfirmation(**data)
device_id: DeviceID
device_label: Optional[str]
human_handle: Optional[HumanHandle]
profile: UserProfile
root_verify_key: VerifyKey
@attr.s(slots=True, frozen=True, auto_attribs=True, kw_only=True, eq=False)
class InviteDeviceData(BaseAPIData):
class SCHEMA_CLS(BaseSchema):
type = fields.CheckedConstant("invite_device_data", required=True)
# Claimer ask for device_label, but greeter has final word on this
requested_device_label = fields.String(allow_none=True, missing=None)
verify_key = fields.VerifyKey(required=True)
@post_load
def make_obj(self, data: Dict[str, Any]) -> "InviteDeviceData": # type: ignore[misc]
data.pop("type")
return InviteDeviceData(**data)
requested_device_label: Optional[str]
verify_key: VerifyKey
@attr.s(slots=True, frozen=True, auto_attribs=True, kw_only=True, eq=False)
class InviteDeviceConfirmation(BaseAPIData):
class SCHEMA_CLS(BaseSchema):
type = fields.CheckedConstant("invite_device_confirmation", required=True)
device_id = DeviceIDField(required=True)
device_label = fields.String(allow_none=True, missing=None)
human_handle = HumanHandleField(allow_none=True, missing=None)
profile = UserProfileField(required=True)
private_key = fields.PrivateKey(required=True)
user_manifest_id = EntryIDField(required=True)
user_manifest_key = fields.SecretKey(required=True)
root_verify_key = fields.VerifyKey(required=True)
@post_load
def make_obj( # type: ignore[misc]
self, data: Dict[str, Any]
) -> "InviteDeviceConfirmation":
data.pop("type")
return InviteDeviceConfirmation(**data)
device_id: DeviceID
device_label: Optional[str]
human_handle: Optional[HumanHandle]
profile: UserProfile
private_key: PrivateKey
user_manifest_id: EntryID
user_manifest_key: SecretKey
root_verify_key: VerifyKey
| agpl-3.0 |
bryx-inc/boto | boto/pyami/__init__.py | 396 | 1107 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
ahye/FYS2140-Resources | examples/animation/func_animate_sin.py | 1 | 1284 | #!/usr/bin/env python
"""
Created on Mon 2 Dec 2013
Eksempelscript som viser hvordan en sinusboelge kan animeres med
funksjonsanimasjon.
@author Benedicte Emilie Braekken
"""
from numpy import *
from matplotlib.pyplot import *
from matplotlib import animation
def wave( x, t ):
'''
Funksjonen beskriver en sinusboelge ved tiden t og punktet x.
'''
omega = 1 # Vinkelhastighet
k = 1 # Boelgetall
return sin( k * x - omega * t )
T = 10
dt = 0.01
nx = 1e3
nt = int( T / dt ) # Antall tidssteg
t = 0
all_waves = [] # Tom liste for aa ta vare paa boelgetilstandene
x = linspace( -pi, pi, nx )
while t < T:
# Legger til en ny boelgetilstand for hver kjoering
all_waves.append( wave( x, t ) )
t += dt
# Tegner initialtilstanden
fig = figure() # Passer paa aa ta vare paa figuren
line, = plot( x, all_waves[0] )
draw()
# Konstanter til animasjonen
FPS = 60 # Bilder i sekundet
inter = 1. / FPS # Tid mellom hvert bilde
def init():
'''
'''
line.set_data( [], [] )
return line,
def get_frame( frame ):
'''
'''
line.set_data( x, all_waves[ frame ] )
return line,
anim = animation.FuncAnimation( fig, get_frame, init_func=init,
frames=nt, interval=inter, blit=True )
show()
| mit |
aneeshusa/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py | 652 | 5978 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base stream class.
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import socket
from mod_pywebsocket import util
# Exceptions
class ConnectionTerminatedException(Exception):
"""This exception will be raised when a connection is terminated
unexpectedly.
"""
pass
class InvalidFrameException(ConnectionTerminatedException):
"""This exception will be raised when we received an invalid frame we
cannot parse.
"""
pass
class BadOperationException(Exception):
"""This exception will be raised when send_message() is called on
server-terminated connection or receive_message() is called on
client-terminated connection.
"""
pass
class UnsupportedFrameException(Exception):
"""This exception will be raised when we receive a frame with flag, opcode
we cannot handle. Handlers can just catch and ignore this exception and
call receive_message() again to continue processing the next frame.
"""
pass
class InvalidUTF8Exception(Exception):
"""This exception will be raised when we receive a text frame which
contains invalid UTF-8 strings.
"""
pass
class StreamBase(object):
"""Base stream class."""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
self._logger = util.get_class_logger(self)
self._request = request
def _read(self, length):
"""Reads length bytes from connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
try:
read_bytes = self._request.connection.read(length)
if not read_bytes:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Peer (%r) closed connection' %
(length, (self._request.connection.remote_addr,)))
return read_bytes
except socket.error, e:
# Catch a socket.error. Because it's not a child class of the
# IOError prior to Python 2.6, we cannot omit this except clause.
# Use %s rather than %r for the exception to use human friendly
# format.
raise ConnectionTerminatedException(
'Receiving %d byte failed. socket.error (%s) occurred' %
(length, e))
except IOError, e:
# Also catch an IOError because mod_python throws it.
raise ConnectionTerminatedException(
'Receiving %d byte failed. IOError (%s) occurred' %
(length, e))
def _write(self, bytes_to_write):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
self._request.connection.write(bytes_to_write)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._request.connection.remote_addr,),
e)
raise
def receive_bytes(self, length):
"""Receives multiple bytes. Retries read when we couldn't receive the
specified amount.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while length > 0:
new_read_bytes = self._read(length)
read_bytes.append(new_read_bytes)
length -= len(new_read_bytes)
return ''.join(read_bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
contain delim_char.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
read_bytes.append(ch)
return ''.join(read_bytes)
# vi:sts=4 sw=4 et
| mpl-2.0 |
guyskk/validater | tests/validators/test_union.py | 2 | 4635 | import pytest
from validr import T, SchemaError, Invalid
from . import case, compiler
@case({
T.union([T.int, T.list(T.int)]): [
(111, 111),
([111, 222], [111, 222]),
([], []),
[None, '', 'xxx'],
],
T.union([T.str, T.list(T.str)]).optional: [
([], []),
(None, ''),
('', ''),
[object],
],
T.union([T.list(T.int)]).optional: [
([111, 222], [111, 222]),
(None, None),
['xxx', '', ['yyy']],
],
T.union([
T.int,
T.list(T.int),
T.dict(key=T.int),
]): [
(111, 111),
([111, 222], [111, 222]),
({'key': 333}, {'key': 333}),
]
})
def test_union_list():
pass
@case({
T.union(
type1=T.dict(key=T.str),
type2=T.dict(key=T.list(T.int)),
).by('type'): [
({'type': 'type1', 'key': 'xxx'}, {'type': 'type1', 'key': 'xxx'}),
({'type': 'type2', 'key': [1, 2, 3]}, {'type': 'type2', 'key': [1, 2, 3]}),
[
{'type': 'xxx', 'key': 'xxx'},
{'key': 'xxx'},
'xxx',
None,
]
],
T.union(
type1=T.dict(key=T.str),
).by('type').optional: [
({'type': 'type1', 'key': 'xxx'}, {'type': 'type1', 'key': 'xxx'}),
(None, None),
[
{'type': 'xxx', 'key': 'xxx'},
]
]
})
def test_union_dict():
pass
def test_compile_union():
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union)
assert 'union schemas not provided' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.str]).default('xxx'))
assert 'default' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.str]).by('type'))
assert 'by' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union(t1=T.dict(k1=T.str), t2=T.dict(k2=T.str)))
assert 'by' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union(t1=T.dict(k1=T.str)).by(123))
assert 'by' in exinfo.value.message
def test_compile_union_list():
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.union([T.str])]))
assert 'ambiguous' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.str.optional]))
assert 'optional' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.str.default('xxx')]))
assert 'default' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.int, T.str]))
assert 'ambiguous' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.list(T.int), T.list(T.int)]))
assert 'ambiguous' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union([T.dict(k1=T.str), T.dict(k2=T.int)]))
assert 'ambiguous' in exinfo.value.message
def test_compile_union_dict():
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union(k1=T.str).by('type'))
assert 'dict' in exinfo.value.message
with pytest.raises(SchemaError) as exinfo:
compiler.compile(T.union(k1=T.dict.optional).by('type'))
assert 'optional' in exinfo.value.message
def test_union_list_error_position():
f = compiler.compile(T.list(T.union([
T.int,
T.list(T.int),
T.dict(k=T.int),
])))
with pytest.raises(Invalid) as exinfo:
f([123, 'xxx'])
assert exinfo.value.position == '[1]'
with pytest.raises(Invalid) as exinfo:
f([123, [456, 'xxx']])
assert exinfo.value.position == '[1][1]'
with pytest.raises(Invalid) as exinfo:
f([123, {'k': 'xxx'}])
assert exinfo.value.position == '[1].k'
def test_union_dict_error_position():
f = compiler.compile(T.union(
t1=T.dict(k1=T.int),
t2=T.dict(k2=T.list(T.int)),
).by('type'))
with pytest.raises(Invalid) as exinfo:
f({'k1': 123})
assert exinfo.value.position == 'type'
with pytest.raises(Invalid) as exinfo:
f({'k1': 'xxx', 'type': 'txxx'})
assert exinfo.value.position == 'type'
with pytest.raises(Invalid) as exinfo:
f({'k1': 'xxx', 'type': 't1'})
assert exinfo.value.position == 'k1'
with pytest.raises(Invalid) as exinfo:
f({'k2': ['xxx'], 'type': 't2'})
assert exinfo.value.position == 'k2[0]'
| mit |
liwangmj/omaha | plugins/update/generate_plugin_idls.py | 67 | 3325 | #!/usr/bin/python2.4
#
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""
Generates IDL file for the OneClick ActiveX control from the passed-in IDL
template. The input template is a complete IDL file in all but one respect;
It has one replaceable entry for the CLSID for GoopdateOneClickControl.
We generate a GUID using UUIDGEN.EXE, and write out an IDL with a new CLSID.
"""
import sys
import os
import getopt
import commands
def _GetStatusOutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
if os.name == "nt":
pipe = os.popen(cmd + " 2>&1", 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
else:
return commands.getstatusoutput(cmd)
def _GenerateIDLText(idl_template):
(status, guid) = _GetStatusOutput("uuidgen.exe")
if status != 0:
raise SystemExit("Failed to get GUID: %s" % guid)
return idl_template % guid
def _GenerateIDLFile(idl_template_filename, idl_output_filename):
f_in = open(idl_template_filename, 'r')
idl_template = f_in.read()
f_in.close()
idl_output = _GenerateIDLText(idl_template)
f_out = open(idl_output_filename, 'w')
f_out.write("""
// ** AUTOGENERATED FILE. DO NOT HAND-EDIT **
""")
f_out.write(idl_output)
f_out.close()
def _Usage():
"""Prints out script usage information."""
print """
generate_oneclick_idl.py: Write out the given IDL file.
Usage:
generate_oneclick_idl.py [--help
| --idl_template_file filename
--idl_output_file filename]
Options:
--help Show this information.
--idl_output_file filename Path/name of output IDL filename.
--idl_template_file filename Path/name of input IDL template.
"""
def _Main():
"""Generates IDL file."""
# use getopt to parse the option and argument list; this may raise, but
# don't catch it
_ARGUMENT_LIST = ["help", "idl_template_file=", "idl_output_file="]
(opts, args) = getopt.getopt(sys.argv[1:], "", _ARGUMENT_LIST)
if not opts or ("--help", "") in opts:
_Usage()
sys.exit()
idl_template_filename = ""
idl_output_filename = ""
for (o, v) in opts:
if o == "--idl_template_file":
idl_template_filename = v
if o == "--idl_output_file":
idl_output_filename = v
# make sure we have work to do
if not idl_template_filename:
raise SystemExit("no idl_template_filename specified")
if not idl_output_filename:
raise SystemExit("no idl_output_filename specified")
_GenerateIDLFile(idl_template_filename, idl_output_filename)
sys.exit()
if __name__ == "__main__":
_Main()
| apache-2.0 |
okumura/gyp | test/defines/gyptest-defines-env-regyp.py | 268 | 1350 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
try:
os.environ['GYP_DEFINES'] = 'value=50'
test.run_gyp('defines.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
os.environ['GYP_DEFINES'] = ''
del os.environ['GYP_DEFINES']
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
2*PAREN_VALUE is 12
HASH_VALUE is a#1
"""
test.run_built_executable('defines', stdout=expect)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('defines.gyp', test.read('defines-env.gyp'))
test.build('defines.gyp', test.ALL)
expect = """\
VALUE is 50
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
| bsd-3-clause |
bioinformatist/Gao-s-SB | src/gaosb/core/app.py | 1 | 17034 | import os
import re
import subprocess
import sys
from collections import defaultdict
from Bio import Entrez
from Bio import SeqIO
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import QMessageBox
from gaosb.gui.mainwindow import Ui_MainWindow
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
class Application(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(Application, self).__init__()
self.setupUi(self)
self._set_connections()
self._working_directory = os.getcwd()
def raise_warnings(self, warning_content, warning_details, warning_title="Warning"):
def show_warnings(warning_content, warning_details, warning_title="Warning"):
"""
Raise a warning dialog when the app meets a handled error.
:param warning_content: the main text of dialog
:param warning_details: the detailed text
:param warning_title: the title of the dialog
:return: None
"""
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText(warning_content)
msg.setInformativeText("You may check usage or "
"send email to Yu Sun <[email protected]> to ask for help."
"Choose <b>Show Details</b> for additional information...")
msg.setWindowTitle(warning_title)
msg.setDetailedText("The details are as follows:\n{}\n"
.format(warning_details))
msg.setTextFormat(Qt.RichText)
msg.setTextInteractionFlags(Qt.TextSelectableByMouse)
msg.exec_()
self.statusbar.showMessage('Warning: halted now')
tmp_excepthook = sys.excepthook
sys.excepthook = show_warnings(warning_content, warning_details)
sys.excepthook = tmp_excepthook
def set_working_directory(self, _working_directory):
self._working_directory = QtWidgets.QFileDialog.getExistingDirectory(self, 'Choosing working directory')
self.statusbar.showMessage('Working directory has been set to {}'.format(self._working_directory))
def choose_file_name(self, box, type='output'):
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Choose {} file...'.format(type))[0]
box.setText(filename)
self.statusbar.showMessage('{} file has been set as {}'.format(type.capitalize(), filename))
def _set_connections(self):
"""
Build signal/slot connections once the app started.
:return: None
"""
self.action_about.triggered.connect(self.init_about)
# To make the link in About page clickable
self.label_2.linkActivated.connect(open_url)
def open_help_url():
url = QtCore.QUrl('https://github.com/bioinformatist/Gao-s-SB#examples')
try:
QDesktopServices.openUrl(url)
except:
self.raise_warnings('Network error: No connection', 'Please check your network connection.')
return
self.action_help.triggered.connect(open_help_url)
self.action_setWorkingDirectory.triggered.connect(self.set_working_directory)
self.action_queryAligner.triggered.connect(self.init_align_queries)
self.action_downloadSequence.triggered.connect(self.init_download_sequences)
self.action_filtering.triggered.connect(self.init_filtering)
def init_about(self):
self.stackedWidget_main.setCurrentWidget(self.page_about)
def monitor_text_edit(self, button, *text_edit_list):
"""
Apply empty check on certain textEdit boxes.
:param button: the button which should be set
:param text_edit_list: the textEdit boxes which should be monitored
:return: None
"""
button.setDisabled(True)
def set_button(button, text_edit_list):
if all(t.toPlainText() for t in text_edit_list):
button.setEnabled(True)
else:
button.setEnabled(False)
checking = lambda: set_button(button, text_edit_list)
[t.textChanged.connect(checking) for t in text_edit_list]
def link_checkbox(self, primary_box, attached_box):
def set_checkbox(primary_box, attached_box):
if primary_box.isChecked():
attached_box.setChecked(True)
else:
attached_box.setChecked(False)
setting = lambda: set_checkbox(primary_box, attached_box)
primary_box.stateChanged.connect(setting)
def init_align_queries(self):
self.stackedWidget_main.setCurrentWidget(self.page_alignQuery)
choose = lambda: self.choose_file_name(self.textBrowser_alignmentDestination)
self.pushButton_changeFileAlignment.clicked.connect(choose)
self.pushButton_doAligning.clicked.connect(self.do_align_query)
self.monitor_text_edit(self.pushButton_doAligning, self.textEdit_query, self.textEdit_subject)
def init_download_sequences(self):
self.stackedWidget_main.setCurrentWidget(self.page_downloadSeq)
choose = lambda: self.choose_file_name(self.textBrowser_downloadSeqDestination)
self.pushButton_changeFileDownloadSeq.clicked.connect(choose)
self.pushButton_doDownloadSeq.clicked.connect(self.do_download_seq)
self.link_checkbox(self.checkBox_oneFile, self.checkBox_removeVersion)
self.monitor_text_edit(self.pushButton_doDownloadSeq, self.textEdit_accessionList)
def init_filtering(self):
self.stackedWidget_main.setCurrentWidget(self.page_filtering)
self.pushButton_doSuperUniq.clicked.connect(self.do_super_uniq)
self.monitor_text_edit(self.pushButton_doSuperUniq, self.textEdit_uniqInput)
def do_align_query(self):
# Transfer subject in Text Edit and make a tmp file
self.statusbar.showMessage('Parsing parameters and creating temp files...')
try:
whole_subject = str(self.textEdit_subject.toPlainText()).splitlines()
ref_id = whole_subject[0]
ref_seq = whole_subject[1]
except:
self.raise_warnings('Format error: Wrong fasta input',
'Only one subject sequence is supported. '
'You provide less or more than one sequence '
'or sequences not in fasta format.')
return
try:
with open('tmp_ref', 'w') as f:
f.writelines('\n'.join(whole_subject))
# Transfer query in Text Edit and make a tmp file
whole_query = str(self.textEdit_query.toPlainText()).splitlines()
with open('tmp_query', 'w') as f:
f.writelines('\n'.join(whole_query))
except:
self.raise_warnings('I/O error: Can\'t create temp file',
'You should check your privilege and remaining disk space.')
return
# Transfer filename in Text Edit
destination_file = str(self.textBrowser_alignmentDestination.toPlainText())
# Call magicblast and redirect its output (SAM format in default)
self.statusbar.showMessage('Running magicblast...')
process_magicblast = subprocess.run(
[resource_path('dependencies') + os.sep + 'magicblast', '-query', 'tmp_query', '-subject', "tmp_ref"],
**subprocess_args())
self.statusbar.showMessage('Removing temp files...')
tmp_list = ['tmp_ref', 'tmp_query']
[os.remove(x) for x in tmp_list]
self.statusbar.showMessage('Parsing blast results...')
query_id = []
result_list = []
for line in process_magicblast.stdout.decode('utf-8').splitlines():
# Ignore SAM header and empty line
if line.startswith('@') or not line.strip():
continue
else:
line = line.split('\t')
# ID is the 1st field in SAM file
query_id.append(line[0])
# Reserve all result for further parsing
result_list.append(line)
with open(destination_file, 'w') as f_result:
# Pre-compile regex
cigar_regex = re.compile(r'(\d+)(\w)')
# Get the length of the longest id
max_id_len = max([len(x) for x in query_id])
# Use the length to reformat id then use double tabular to separate id and sequence
f_result.writelines(('\t' * 2).join(['{:{max_id_len}}'.format(ref_id, max_id_len=max_id_len), ref_seq]))
f_result.writelines('\n')
for result in result_list:
result_id = '>' + result[0]
cigar_list = cigar_regex.findall(result[5])
seq = result[9]
# Initialize the index of sequence and offset
position = offset = 0
if cigar_list[0][1] == "S":
position = offset = int(cigar_list[0][0])
del cigar_list[0]
seq = seq[:position] + '(' + seq[position:]
for cigar in cigar_list:
if cigar[1] == 'M':
pass
elif cigar[1] == 'I':
seq = seq[:position] + seq[(position + int(cigar[0])):]
elif cigar[1] == 'D':
seq = seq[:position] + '-' * int(cigar[0]) + seq[position:]
elif cigar[1] == 'S':
seq = seq[:position] + ')' + seq[position:]
position += int(cigar[0])
# Since the 4th column of sam file is the start position of alignment, add an offset of one base
# Another one-base offset for left parenthesis, so we got minus 2
seq = (int(result[3]) - 2 - offset) * ' ' + seq
f_result.writelines(('\t' * 2).join(['{:{max_id_len}}'.format(result_id, max_id_len=max_id_len), seq]))
f_result.writelines('\n')
self.statusbar.showMessage('done.')
def do_download_seq(self):
self.statusbar.showMessage('Parsing parameters...')
Entrez.email = "[email protected]"
accession_list = str(self.textEdit_accessionList.toPlainText()).splitlines()
destination_file = str(self.textBrowser_downloadSeqDestination.toPlainText())
try:
[accession_list.remove(s) for s in accession_list if s == '']
accession_pool_spec = defaultdict(list)
accession_pool_whole = []
blank_regex = re.compile('\s+')
for accession in accession_list:
if accession.startswith('#'):
continue
if blank_regex.search(accession):
if self.checkBox_oneFile.isChecked():
raise Exception
accession = blank_regex.split(accession.strip())
[print(x) for x in accession[-2:]]
accession_pool_spec[accession[0]].append([int(x) for x in accession[-2:]])
else:
accession_pool_whole.append(accession)
accession_pool_whole = list(set(accession_pool_whole))
except:
self.raise_warnings('Format error: Wrong input',
'Please check your output mode or input content as well as separator.')
return
try:
self.statusbar.showMessage('Fetching sequences from NCBI databases...')
handle = Entrez.efetch(db="nuccore", rettype='gb', id=list(accession_pool_spec.keys()))
records_spec = list(SeqIO.parse(handle, "gb"))
handle = Entrez.efetch(db="nuccore", rettype='gb', id=accession_pool_whole)
records_whole = list(SeqIO.parse(handle, "gb"))
except:
self.raise_warnings('Network error: No connection', 'Please check your network connection.')
return
try:
self.statusbar.showMessage('Parsing downloaded sequences...')
if self.checkBox_oneFile.isChecked():
os.chdir(self._working_directory)
for record in records_whole:
with open(record.id.split('.')[0] + '.fa', 'w') as f:
f.write('>{}\n{}\n'.format(record.id.split('.')[0], record.seq))
else:
with open(destination_file, 'w') as f:
for record in records_whole:
if self.checkBox_removeVersion.isChecked():
f.write('>{}\n{}\n'.format(record.id.split('.')[0], record.seq))
else:
f.write('>{}\n{}\n'.format(record.id, record.seq))
for record in records_spec:
for location in accession_pool_spec[record.id]:
if self.checkBox_removeVersion.isChecked():
f.write('>{} | {}-{}\n{}\n'.format(record.id.split('.')[0], location[0], location[1],
record.seq[location[0] - 1:location[1]]))
else:
f.write('>{} | {}-{}\n{}\n'.format(record.id, location[0], location[1],
record.seq[location[0] - 1:location[1]]))
self.statusbar.showMessage("Done.")
except:
self.raise_warnings('I/O error: Can\'t create output file',
'You should check your privilege and remaining disk space.')
return
def do_super_uniq(self):
self.statusbar.showMessage('Running...')
# Clear current content in textBrowser (maybe exists)
self.textBrowser_uniqResult.setText('')
redundant_list = str(self.textEdit_uniqInput.toPlainText()).splitlines()
custom_regex = re.compile(str(self.textEdit_uniqSep.toPlainText()))
def remove_redundant(input_list):
redundant_pool = []
for item in input_list:
if item.startswith('#') or item == 'NA':
continue
item = custom_regex.split(item)
redundant_pool.extend(item)
redundant_pool = list(set(redundant_pool))
[self.textBrowser_uniqResult.append(r) for r in redundant_pool]
remove_redundant(redundant_list)
self.statusbar.showMessage('Done.')
def open_url(url):
QDesktopServices.openUrl(QtCore.QUrl(url))
# Create a set of arguments which make a ``subprocess.Popen`` (and
# variants) call work with or without Pyinstaller, ``--noconsole`` or
# not, on Windows and Linux. Typical use::
#
# subprocess.call(['program_to_run', 'arg_1'], **subprocess_args())
#
# When calling ``check_output``::
#
# subprocess.check_output(['program_to_run', 'arg_1'],
# **subprocess_args(False))
def subprocess_args(include_stdout=True):
# The following is true only on Windows.
if hasattr(subprocess, 'STARTUPINFO'):
# On Windows, subprocess calls will pop up a command window by default
# when run from Pyinstaller with the ``--noconsole`` option. Avoid this
# distraction.
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Windows doesn't search the path by default. Pass it an environment so
# it will.
env = os.environ
else:
si = None
env = None
# ``subprocess.check_output`` doesn't allow specifying ``stdout``::
#
# Traceback (most recent call last):
# File "test_subprocess.py", line 58, in <module>
# **subprocess_args(stdout=None))
# File "C:\Python27\lib\subprocess.py", line 567, in check_output
# raise ValueError('stdout argument not allowed, it will be overridden.')
# ValueError: stdout argument not allowed, it will be overridden.
#
# So, add it only if it's needed.
if include_stdout:
ret = {'stdout': subprocess.PIPE}
else:
ret = {}
# On Windows, running this from the binary produced by Pyinstaller
# with the ``--noconsole`` option requires redirecting everything
# (stdin, stdout, stderr) to avoid an OSError exception
# "[Error 6] the handle is invalid."
ret.update({'stdin': subprocess.PIPE,
'stderr': subprocess.PIPE,
'startupinfo': si,
'env': env})
return ret
| mit |
keithlee/shakeAppPyDev | django/contrib/messages/tests/base.py | 152 | 17772 | import warnings
from django import http
from django.test import TestCase
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django.utils.unittest import skipIf
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
def skipUnlessAuthIsInstalled(func):
return skipIf(
'django.contrib.auth' not in settings.INSTALLED_APPS,
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class BaseTest(TestCase):
storage_class = default_storage
restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS']
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self._remembered_settings = {}
for setting in self.restore_settings:
if hasattr(settings, setting):
self._remembered_settings[setting] = getattr(settings, setting)
delattr(settings._wrapped, setting)
# Backup these manually because we do not want them deleted.
self._middleware_classes = settings.MIDDLEWARE_CLASSES
self._template_context_processors = \
settings.TEMPLATE_CONTEXT_PROCESSORS
self._installed_apps = settings.INSTALLED_APPS
self._message_storage = settings.MESSAGE_STORAGE
settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth.models')
def tearDown(self):
for setting in self.restore_settings:
self.restore_setting(setting)
# Restore these manually (see above).
settings.MIDDLEWARE_CLASSES = self._middleware_classes
settings.TEMPLATE_CONTEXT_PROCESSORS = \
self._template_context_processors
settings.INSTALLED_APPS = self._installed_apps
settings.MESSAGE_STORAGE = self._message_storage
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
self.restore_warnings_state()
def restore_setting(self, setting):
if setting in self._remembered_settings:
value = self._remembered_settings.pop(setting)
setattr(settings, setting, value)
elif hasattr(settings, setting):
delattr(settings._wrapped, setting)
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_with_template_response(self):
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
for level in self.levels.keys():
add_url = reverse('django.contrib.messages.tests.urls.add_template_response',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@skipUnlessAuthIsInstalled
def test_middleware_disabled_auth_user(self):
"""
Tests that the messages API successfully falls back to using
user.message_set to store messages directly when the middleware is
disabled.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
user = User.objects.create_user('test', '[email protected]', 'test')
self.client.login(username='test', password='test')
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
context_messages = list(response.context['messages'])
for msg in data['messages']:
self.assertTrue(msg in context_messages)
self.assertContains(response, msg)
def test_middleware_disabled_anon_user(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is raised when one attempts to store a message.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
def test_middleware_disabled_anon_user_fail_silently(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is not raised if 'fail_silently' = True
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), [])
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
settings.MESSAGE_LEVEL = 29
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_custom_tags(self):
settings.MESSAGE_TAGS = {
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
base.LEVEL_TAGS = utils.get_level_tags()
try:
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
finally:
# Ensure the level tags constant is put back like we found it.
self.restore_setting('MESSAGE_TAGS')
base.LEVEL_TAGS = utils.get_level_tags()
| bsd-3-clause |
maziara/deluge-feed-inoreader | delugeapi.py | 1 | 3737 | import config
from deluge_client import DelugeRPCClient
import werkzeug as wz
import base64
########################################################
########### Deluge Methods ################
########################################################
def connect_to_deluge():
client = DelugeRPCClient(config.DLGD_HOST, config.DLGD_PORT, config.DLGD_USER, config.DLGD_PASS)
client.connect()
if client.connected: print "Connected to deluge daemon"
from types import MethodType
def add_torr_url(self, url):
return self.call('core.add_torrent_url', wz.urls.url_fix(url), {})
client.add_torr_url = MethodType(add_torr_url, client, DelugeRPCClient)
def add_torr_file(self, file):
f = open(file, 'rb')
filedump = base64.encodestring(f.read())
f.close()
return self.call('core.add_torrent_file', file, filedump, {})
client.add_torr_file = MethodType(add_torr_file, client, DelugeRPCClient)
def add_label(self, label, options={}):
label = normalize_label(label)
self.call('label.add', label)
if options:
if options['move_completed_path']:
options.update({'move_completed': True, 'apply_move_completed': True})
self.call('label.set_options', label, options)
client.add_label = MethodType(add_label, client, DelugeRPCClient)
def label_exist(self, label):
label = normalize_label(label)
if label in self.list_labels():
return True
else:
return False
client.label_exist = MethodType(label_exist, client, DelugeRPCClient)
def list_labels(self):
return self.call('label.get_labels')
client.list_labels = MethodType(list_labels, client, DelugeRPCClient)
def add_tor_label(self, tor_id, label):
return self.call('label.set_torrent', tor_id, normalize_label(label))
client.add_tor_label = MethodType(add_tor_label, client, DelugeRPCClient)
def session_state(self):
return self.call('core.get_session_state')
client.session_state = MethodType(session_state, client, DelugeRPCClient)
def torrent_status(self, tid, fields = {}):
return self.call('core.get_torrent_status', tid, fields)
client.torrent_status = MethodType(torrent_status, client, DelugeRPCClient)
def torrents_status(self, filters = {}, fields = []):
return self.call('core.get_torrents_status', filters, fields)
client.torrents_status = MethodType(torrents_status, client, DelugeRPCClient)
def get_finished(self):
torrs = torrents_status(self)
for k,v in torrs.items():
#print(k,v['name'])
if v['is_finished'] == False:
#print("Removing unfinished: " + v['name'] + " " + str(v['is_finished']))
torrs.pop(k)
elif v['tracker_host'] in config.REMOVE_SEEDS_EXCEPTION_TRACKERS:
#print("Removing exception_tracker: " + v['name'])
torrs.pop(k)
elif not is_all_files_done(v):
#print("Removing not_all_done: " + v['name'])
torrs.pop(k)
return torrs
client.get_finished = MethodType(get_finished, client, DelugeRPCClient)
def remove_finished(self):
for k in get_finished(self):
self.call('core.remove_torrent', k, False)
client.remove_finished = MethodType(remove_finished, client, DelugeRPCClient)
def is_all_files_done(tor):
for i in tor['file_progress']:
if i != 1.0:
return False
return True
return client
def normalize_label(label):
return label.replace(' ','_').lower()
| mit |
LChristakis/chalice-hunter | lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| mit |
mcgill-robotics/Firmware | src/lib/parameters/px4params/markdownout.py | 1 | 5359 | from xml.sax.saxutils import escape
import codecs
class MarkdownTablesOutput():
def __init__(self, groups):
result = ("# Parameter Reference\n"
"> **Note** **This list is auto-generated from the source code** and contains the most recent parameter documentation.\n"
"\n")
for group in groups:
result += '## %s\n\n' % group.GetName()
#Check if scope (module where parameter is defined) is the same for all parameters in the group.
# If so then display just once about the table.
scope_set = set()
for param in group.GetParams():
scope_set.add(param.GetFieldValue("scope"))
if len(scope_set)==1:
result+='\nThe module where these parameters are defined is: *%s*.\n\n' % list(scope_set)[0]
result += '<table style="width: 100%; table-layout:fixed; font-size:1.5rem; overflow: auto; display:block;">\n'
result += ' <colgroup><col style="width: 23%"><col style="width: 46%"><col style="width: 11%"><col style="width: 11%"><col style="width: 9%"></colgroup>\n'
result += ' <thead>\n'
result += ' <tr><th>Name</th><th>Description</th><th>Min > Max (Incr.)</th><th>Default</th><th>Units</th></tr>\n'
result += ' </thead>\n'
result += '<tbody>\n'
for param in group.GetParams():
code = param.GetName()
name = param.GetFieldValue("short_desc") or ''
long_desc = param.GetFieldValue("long_desc") or ''
min_val = param.GetFieldValue("min") or ''
max_val = param.GetFieldValue("max") or ''
increment = param.GetFieldValue("increment") or ''
def_val = param.GetDefault() or ''
unit = param.GetFieldValue("unit") or ''
type = param.GetType()
reboot_required = param.GetFieldValue("reboot_required") or ''
#board = param.GetFieldValue("board") or '' ## Disabled as no board values are defined in any parameters!
#decimal = param.GetFieldValue("decimal") or '' #Disabled as is intended for GCS not people
#field_codes = param.GetFieldCodes() ## Disabled as not needed for display.
#boolean = param.GetFieldValue("boolean") # or '' # Disabled - does not appear useful.
# Format values for display.
# Display min/max/increment value based on what values are defined.
max_min_combined = ''
if min_val or max_val:
if not min_val:
min_val='?'
if not max_val:
max_val='?'
max_min_combined+='%s > %s ' % (min_val, max_val)
if increment:
max_min_combined+='(%s)' % increment
if long_desc is not '':
long_desc = '<p><strong>Comment:</strong> %s</p>' % long_desc
if name == code:
name = ""
code='<strong id="%s">%s</strong>' % (code, code)
if reboot_required:
reboot_required='<p><b>Reboot required:</b> %s</p>\n' % reboot_required
scope=''
if not len(scope_set)==1 or len(scope_set)==0:
scope = param.GetFieldValue("scope") or ''
if scope:
scope='<p><b>Module:</b> %s</p>\n' % scope
enum_codes=param.GetEnumCodes() or '' # Gets numerical values for parameter.
enum_output=''
# Format codes and their descriptions for display.
if enum_codes:
enum_output+='<strong>Values:</strong><ul>'
enum_codes=sorted(enum_codes,key=float)
for item in enum_codes:
enum_output+='\n<li><strong>%s:</strong> %s</li> \n' % (item, param.GetEnumValue(item))
enum_output+='</ul>\n'
bitmask_list=param.GetBitmaskList() #Gets bitmask values for parameter
bitmask_output=''
#Format bitmask values
if bitmask_list:
bitmask_output+='<strong>Bitmask:</strong><ul>'
for bit in bitmask_list:
bit_text = param.GetBitmaskBit(bit)
bitmask_output+=' <li><strong>%s:</strong> %s</li> \n' % (bit, bit_text)
bitmask_output+='</ul>\n'
result += '<tr>\n <td style="vertical-align: top;">%s (%s)</td>\n <td style="vertical-align: top;"><p>%s</p>%s %s %s %s %s</td>\n <td style="vertical-align: top;">%s</td>\n <td style="vertical-align: top;">%s </td>\n <td style="vertical-align: top;">%s</td>\n</tr>\n' % (code,type,name, long_desc, enum_output, bitmask_output, reboot_required, scope, max_min_combined,def_val,unit)
#Close the table.
result += '</tbody></table>\n\n'
self.output = result
def Save(self, filename):
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(self.output)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.