repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
socialwareinc/Diamond | src/collectors/mountstats/test/testmountstats.py | 31 | 3401 | #!/usr/bin/python
# coding=utf-8
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import MagicMock, Mock
from mock import patch
from diamond.collector import Collector
from mountstats import MountStatsCollector
class TestMountStatsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MountStatsCollector', {
'exclude_filters': ['^/mnt/path2'],
'interval': 1
})
self.collector = MountStatsCollector(config, None)
def test_import(self):
self.assertTrue(MountStatsCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_mountstats(self, publish_mock, open_mock):
open_mock.return_value = MagicMock()
self.collector.collect()
open_mock.assert_called_once_with(self.collector.MOUNTSTATS)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Test the first and last metric of each type
published_metrics = {
'_mnt_path1.events.inoderevalidates': 27110.0,
'_mnt_path1.events.delay': 0.0,
'_mnt_path1.bytes.normalreadbytes': 1424269.0,
'_mnt_path1.bytes.serverwritebytes': 69460.0,
'_mnt_path1.xprt.tcp.port': 0.0,
'_mnt_path1.xprt.tcp.backlogutil': 11896527.0,
'_mnt_path1.rpc.access.ops': 2988.0,
'_mnt_path1.rpc.write.ops': 16.0
}
unpublished_metrics = {
'_mnt_path2.events.delay': 0.0
}
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_2')
self.collector.collect()
self.assertPublishedMany(publish_mock, published_metrics)
self.assertUnpublishedMany(publish_mock, unpublished_metrics)
@patch.object(Collector, 'publish')
def test_include_filter(self, publish_mock):
config = get_collector_config('MountStatsCollector', {
'include_filters': ['^/mnt/path2'],
'interval': 1
})
self.collector = MountStatsCollector(config, None)
# Test the first and last metric of each type
published_metrics = {
'_mnt_path2.bytes.directwritebytes': 0.0,
'_mnt_path2.bytes.normalreadbytes': 1424269.0,
'_mnt_path2.bytes.normalwritebytes': 66589.0,
'_mnt_path2.bytes.serverreadbytes': 757.0,
'_mnt_path2.bytes.serverwritebytes': 69460.0,
'_mnt_path2.events.attrinvalidates': 144.0,
'_mnt_path2.events.datainvalidates': 23.0,
}
unpublished_metrics = {
'_mnt_path1.events.inoderevalidates': 27110.0,
}
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_2')
self.collector.collect()
self.assertPublishedMany(publish_mock, published_metrics)
self.assertUnpublishedMany(publish_mock, unpublished_metrics)
if __name__ == "__main__":
unittest.main()
| mit |
ImageEngine/gaffer | python/GafferUITest/LayoutsTest.py | 8 | 7337 | ##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import Gaffer
import GafferUI
import GafferUITest
class LayoutsTest( GafferUITest.TestCase ) :
def testAcquire( self ) :
a = Gaffer.ApplicationRoot( "testApp" )
self.assertIsInstance( GafferUI.Layouts.acquire( a ), GafferUI.Layouts )
self.assertIs( GafferUI.Layouts.acquire( a ), GafferUI.Layouts.acquire( a ) )
def testAddAndRemove( self ) :
a = Gaffer.ApplicationRoot( "testApp" )
l = GafferUI.Layouts.acquire( a )
self.assertEqual( l.names(), [] )
l.add( "JustTheGraphEditor", "GafferUI.GraphEditor( script )" )
self.assertEqual( l.names(), [ "JustTheGraphEditor" ] )
l.add( "JustTheNodeEditor", "GafferUI.NodeEditor( script )" )
self.assertEqual( l.names(), [ "JustTheGraphEditor", "JustTheNodeEditor" ] )
l.remove( "JustTheGraphEditor" )
self.assertEqual( l.names(), [ "JustTheNodeEditor" ] )
l.remove( "JustTheNodeEditor" )
self.assertEqual( l.names(), [] )
def testPersistence( self ) :
a = Gaffer.ApplicationRoot( "testApp" )
l = GafferUI.Layouts.acquire( a )
self.assertEqual( l.names(), [] )
l.add( "JustTheGraphEditor", "GafferUI.GraphEditor( script )" )
self.assertEqual( l.names(), [ "JustTheGraphEditor" ] )
self.assertEqual( l.names( persistent = False ), [ "JustTheGraphEditor" ] )
self.assertEqual( l.names( persistent = True ), [] )
l.add( "JustTheNodeEditor", "GafferUI.NodeEditor( script )", persistent = True )
self.assertEqual( l.names(), [ "JustTheGraphEditor", "JustTheNodeEditor" ] )
self.assertEqual( l.names( persistent = False ), [ "JustTheGraphEditor" ] )
self.assertEqual( l.names( persistent = True ), [ "JustTheNodeEditor" ] )
def testNoPersistentLayoutsInDefaultConfigs( self ) :
app = Gaffer.Application()
# Load the GUI config, making sure we only use the standard
# startup files, and not any others from the current environment
# (the user running these tests may have their own personal configs).
startupPaths = os.environ["GAFFER_STARTUP_PATHS"]
try :
os.environ["GAFFER_STARTUP_PATHS"] = os.path.expandvars( "$GAFFER_ROOT/startup" )
app._executeStartupFiles( "gui" )
finally :
os.environ["GAFFER_STARTUP_PATHS"] = startupPaths
self.assertEqual( os.environ["GAFFER_STARTUP_PATHS"], startupPaths )
layouts = GafferUI.Layouts.acquire( app )
self.assertEqual( layouts.names( persistent = True ), [] )
self.assertGreater( len( layouts.names() ), 0 )
def testRestore( self ) :
s = Gaffer.ScriptNode()
c = GafferUI.CompoundEditor( s )
editors = list((
GafferUI.NodeEditor( s ),
GafferUI.AnimationEditor( s ),
GafferUI.GraphEditor( s ),
GafferUI.PythonEditor( s )
))
editorTypes = [ type(e) for e in editors ]
for e in editors[:2] :
c.addEditor( e )
p = c._createDetachedPanel()
for e in editors[2:] :
p.addEditor( e )
self.assertEqual( len(c._detachedPanels()), 1 )
self.assertEqual( c.editors(), editors )
a = Gaffer.ApplicationRoot( "testApp" )
l = GafferUI.Layouts.acquire( a )
l.add( "ReprTest", repr(c), persistent = False )
cc = l.create( "ReprTest", s )
self.assertEqual( len(cc._detachedPanels()), 1 )
ct = [ type(e) for e in cc.editors() ]
self.assertEqual( ct, editorTypes )
self.assertEqual( repr(cc.editors()), repr(editors) )
def testNodeSetRestore( self ) :
s = Gaffer.ScriptNode()
c = GafferUI.CompoundEditor( s )
editors = list((
GafferUI.NodeEditor( s ),
GafferUI.NodeEditor( s ),
GafferUI.AnimationEditor( s ),
GafferUI.NodeEditor( s )
))
editors[0].setNodeSet( Gaffer.NumericBookmarkSet( s, 1 ) )
editors[1].setNodeSet( Gaffer.NumericBookmarkSet( s, 2 ) )
editors[2].setNodeSet( Gaffer.NumericBookmarkSet( s, 3 ) )
for e in editors :
c.addEditor( e )
a = Gaffer.ApplicationRoot( "testApp" )
l = GafferUI.Layouts.acquire( a )
l.add( "ReprNodeSetTest", repr(c), persistent = False )
cc = l.create( "ReprNodeSetTest", s )
editors = cc.editors()
ns = editors[0].getNodeSet()
self.assertTrue( isinstance( ns, Gaffer.NumericBookmarkSet ) )
self.assertTrue( ns.getBookmark(), 1 )
ns = editors[1].getNodeSet()
self.assertTrue( isinstance( ns, Gaffer.NumericBookmarkSet ) )
self.assertTrue( ns.getBookmark(), 2 )
ns = editors[2].getNodeSet()
self.assertTrue( isinstance( ns, Gaffer.NumericBookmarkSet ) )
self.assertTrue( ns.getBookmark(), 3 )
ns = editors[3].getNodeSet()
self.assertTrue( isinstance( ns, Gaffer.StandardSet ) )
def testSetNodeSetDriverRestore( self ) :
s = Gaffer.ScriptNode()
c = GafferUI.CompoundEditor( s )
GafferUI.NodeSetEditor.registerNodeSetDriverMode( "testMode", lambda e, t : t.getNodeSet() )
editors = list((
GafferUI.NodeEditor( s ),
GafferUI.NodeEditor( s ),
GafferUI.AnimationEditor( s ),
GafferUI.NodeEditor( s )
))
editors[0].setNodeSetDriver( editors[1] )
editors[2].setNodeSetDriver( editors[3], "testMode" )
for e in editors :
c.addEditor( e )
a = Gaffer.ApplicationRoot( "testApp" )
l = GafferUI.Layouts.acquire( a )
l.add( "ReprDriverTest", repr(c), persistent = False )
cc = l.create( "ReprDriverTest", s )
editors = cc.editors()
driver, mode = editors[0].getNodeSetDriver()
self.assertTrue( driver is editors[1] )
self.assertTrue( mode is GafferUI.NodeSetEditor.DriverModeNodeSet )
driver, mode = editors[2].getNodeSetDriver()
self.assertTrue( driver is editors[3] )
self.assertTrue( mode is "testMode" )
driver, mode = editors[3].getNodeSetDriver()
self.assertIsNone( driver )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
xiaojunwu/crosswalk-test-suite | wrt/wrt-rtcoremanu-android-tests/inst.xpk.py | 187 | 5919 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
Quantipy/quantipy | quantipy/core/quantify/engine.py | 1 | 98357 | import pandas as pd
import numpy as np
from scipy.stats.stats import _ttest_finish as get_pval
from itertools import combinations, chain, product
from collections import defaultdict, OrderedDict
import quantipy as qp
import pandas as pd
import numpy as np
from operator import add, sub, mul, div
from quantipy.core.view import View
from quantipy.core.cache import Cache
from quantipy.core.tools.view.logic import (
has_any, has_all, has_count,
not_any, not_all, not_count,
is_lt, is_ne, is_gt,
is_le, is_eq, is_ge,
union, intersection, get_logic_index)
from quantipy.core.helpers.functions import emulate_meta
from quantipy.core.tools.dp.prep import recode
import copy
import time
np.seterr(invalid='ignore')
class Quantity(object):
"""
The Quantity object is the main Quantipy aggregation engine.
Consists of a link's data matrix representation and sectional defintion
of weight vector (wv), x-codes section (xsect) and y-codes section
(ysect). The instance methods handle creation, retrieval and manipulation
of the data input matrices and section definitions as well as the majority
of statistical calculations.
"""
# -------------------------------------------------
# Instance initialization
# -------------------------------------------------
def __init__(self, link, weight=None, base_all=False, ignore_flags=False):
# Collect information on wv, x- and y-section
self._ignore_flags = ignore_flags
self.ds = self._convert_to_dataset(link)
self.d = self._data
self.base_all = base_all
self._dataidx = link.get_data().index
self.meta = self._meta
if self.meta().values() == [None] * len(self.meta().values()):
self._uses_meta = False
self.meta = None
else:
self._uses_meta = True
self._cache = link.get_cache()
self.f = link.filter
self.x = link.x
if not self.x == '@':
ds_type = self.ds._get_type(self.x)
if ds_type in ['date', 'string']:
msg = "Cannot aggregate {} of type '{}'. Categorize first!"
msg = msg.format(self.x, ds_type)
raise NotImplementedError(msg)
self.y = link.y
self.w = weight if weight is not None else '@1'
self.is_weighted = False
self.type = self._get_type()
if self.type == 'nested':
self.nest_def = Nest(self.y, self.d(), self.meta()).nest()
if not self.x == '@':
self.leveled = self.ds.get_property(self.x, 'level')
else:
self.leveled = False
self._squeezed = False
self.idx_map = None
self.xdef = self.ydef = None
self.miss_x = self.miss_y = None
self.matrix = self._get_matrix()
self.is_empty = self.matrix.sum() == 0
self.switched = False
self.factorized = None
self.result = None
self.logical_conditions = []
self.cbase = self.rbase = None
self.comb_x = self.comb_y = None
self.calc_x = self.calc_y = None
self._has_x_margin = self._has_y_margin = False
def __repr__(self):
if self.result is not None:
return '%s' % (self.result)
else:
return 'Quantity - x: {}, xdef: {} y: {}, ydef: {}, w: {}'.format(
self.x, self.xdef, self.y, self.ydef, self.w)
# -------------------------------------------------
# Matrix creation and retrievel
# -------------------------------------------------
def _convert_to_dataset(self, link):
ds = qp.DataSet('')
ds._data = link.stack[link.data_key].data
ds._meta = link.get_meta()
return ds
def _data(self):
return self.ds._data
def _meta(self):
return self.ds._meta
def _get_type(self):
"""
Test variable type that can be 'simple', 'nested' or 'array'.
"""
if self._uses_meta:
masks = [self.x, self.y]
if any(mask in self.meta()['masks'].keys() for mask in masks):
mask = {
True: self.x,
False: self.y}.get(self.x in self.meta()['masks'].keys())
if self.meta()['masks'][mask]['type'] == 'array':
if self.x == '@':
self.x, self.y = self.y, self.x
return 'array'
elif '>' in self.y:
return 'nested'
else:
return 'simple'
else:
return 'simple'
def _get_wv(self):
"""
Returns the weight vector of the matrix.
"""
return self.d()[[self.w]].values
def weight(self):
"""
Weight by multiplying the indicator entries with the weight vector.
"""
self.matrix *= np.atleast_3d(self.wv)
return None
def unweight(self):
"""
Remove any weighting by dividing the matrix by itself.
"""
self.matrix /= self.matrix
return None
def _get_total(self):
"""
Return a vector of 1s for the matrix.
"""
return self.d()[['@1']].values
def _copy(self):
"""
Copy the Quantity instance, i.e. its data matrix, into a new object.
"""
m_copy = np.empty_like(self.matrix)
m_copy[:] = self.matrix
c = copy.copy(self)
c.matrix = m_copy
return c
def _switch_axes(self):
"""
"""
if self.switched:
self.switched = False
self.matrix = self.matrix.swapaxes(1, 2)
else:
self.switched = True
self.matrix = self.matrix.swapaxes(2, 1)
self.xdef, self.ydef = self.ydef, self.xdef
self._x_indexers, self._y_indexers = self._y_indexers, self._x_indexers
self.comb_x, self.comb_y = self.comb_y, self.comb_x
self.miss_x, self.miss_y = self.miss_y, self.miss_x
return self
def _reset(self):
for prop in self.__dict__.keys():
if prop in ['_uses_meta', 'base_all', '_dataidx', 'meta', '_cache',
'd', 'idx_map', 'ds', 'logical_conditions']:
pass
elif prop in ['_squeezed', 'switched']:
self.__dict__[prop] = False
else:
self.__dict__[prop] = None
self.result = None
return None
def swap(self, var, axis='x', update_axis_def=True, inplace=True):
"""
Change the Quantity's x- or y-axis keeping filter and weight setup.
All edits and aggregation results will be removed during the swap.
Parameters
----------
var : str
New variable's name used in axis swap.
axis : {'x', 'y'}, default ``'x'``
The axis to swap.
update_axis_def : bool, default False
If self is of type ``'array'``, the name and item definitions
(that are e.g. used in the ``to_df()`` method) can be updated to
reflect the swapped axis variable or kept to show the original's
ones.
inplace : bool, default True
Whether to modify the Quantity inplace or return a new instance.
Returns
-------
swapped : New Quantity instance with exchanged x- or y-axis.
"""
array_swap = self.ds.is_array(self.x)
if array_swap and not axis == 'x':
err = "Cannot swap y-axis on array type Quantity!"
raise NotImplementedError(err)
test_arrays = self.ds._is_array_item(self.x) or self.ds.is_array(self.x)
if test_arrays:
new_sources = self.ds.sources(var)
if self.ds._is_array_item(self.x):
org_parent = self.ds.parents(self.x)[0].split('@')[-1]
org_sources = self.ds.sources(org_parent)
else:
org_sources = self.ds.sources(self.x)
if not len(org_sources) == len(new_sources) and array_swap:
err = "Cannot swap array-type Quantity with array of different "
err += "source items length ({} vs. {})!"
err = err.format(len(org_sources), len(new_sources))
raise ValueError(err)
if not update_axis_def and array_swap:
org_name = self.x
org_ydef = self.ydef
if self.ds._is_array_item(self.x) and self.ds.is_array(var):
org_no = self.ds.item_no(self.x)
var = self.ds.sources(var)[org_no-1]
elif self.ds.is_array(self.x) and not self.ds.is_array(var):
err = "Cannot swap array-type Quantity with non-array variable '{}'!"
raise TypeError(err.format(var))
if axis == 'x':
x = var
y = self.y
else:
x = self.x
y = var
f, w = self.f, self.w
if inplace:
swapped = self
else:
swapped = self._copy()
swapped._reset()
swapped.x, swapped.y = x, y
swapped.f, swapped.w = f, w
swapped.type = swapped._get_type()
if swapped.type == 'nested':
swapped.nest_def = Nest(swapped.y, swapped.d(), swapped.meta()).nest()
swapped._get_matrix()
if not update_axis_def and array_swap:
swapped.x = org_name
swapped.ydef = org_ydef
if not inplace:
return swapped
def rescale(self, scaling, drop=False):
"""
Modify the object's ``xdef`` property reflecting new value defintions.
Parameters
----------
scaling : dict
Mapping of old_code: new_code, given as of type int or float.
drop : bool, default False
If True, codes not included in the scaling dict will be excluded.
Returns
-------
self
"""
proper_scaling = {old_code: new_code for old_code, new_code
in scaling.items() if old_code in self.xdef}
xdef_ref = [proper_scaling[code] if code in proper_scaling.keys()
else code for code in self.xdef]
if drop:
to_drop = [code for code in self.xdef if code not in
proper_scaling.keys()]
self.exclude(to_drop, axis='x')
self.xdef = xdef_ref
return self
def exclude(self, codes, axis='x'):
"""
Wrapper for _missingfy(...keep_codes=False, ..., keep_base=False, ...)
Excludes specified codes from aggregation.
"""
self._missingfy(codes, axis=axis, keep_base=False, inplace=True)
return self
def limit(self, codes, axis='x'):
"""
Wrapper for _missingfy(...keep_codes=True, ..., keep_base=True, ...)
Restrict the data matrix entires to contain the specified codes only.
"""
self._missingfy(codes, axis=axis, keep_codes=True, keep_base=True,
inplace=True)
return self
def filter(self, condition, keep_base=True, inplace=False):
"""
Use a Quantipy conditional expression to filter the data matrix entires.
"""
if inplace:
filtered = self
else:
filtered = self._copy()
qualified_rows = self._get_logic_qualifiers(condition)
valid_rows = self.idx_map[self.idx_map[:, 0] == 1][:, 1]
filter_idx = np.in1d(valid_rows, qualified_rows)
if keep_base:
filtered.matrix[~filter_idx, 1:, :] = np.NaN
else:
filtered.matrix[~filter_idx, :, :] = np.NaN
if not inplace:
return filtered
def _get_logic_qualifiers(self, condition):
if not isinstance(condition, dict):
column = self.x
logic = condition
else:
column = condition.keys()[0]
logic = condition.values()[0]
idx, logical_expression = get_logic_index(self.d()[column], logic, self.d())
logical_expression = logical_expression.split(':')[0]
if not column == self.x:
logical_expression = logical_expression.replace('x[', column+'[')
self.logical_conditions.append(logical_expression)
return idx
def _missingfy(self, codes, axis='x', keep_codes=False, keep_base=True,
indices=False, inplace=True):
"""
Clean matrix from entries preserving or modifying the weight vector.
Parameters
----------
codes : list
A list of codes to be considered in cleaning.
axis : {'x', 'y'}, default 'x'
The axis to clean codes on. Refers to the Link object's x- and y-
axes.
keep_codes : bool, default False
Controls whether the passed codes are kept or erased from the
Quantity matrix data entries.
keep_base: bool, default True
Controls whether the weight vector is set to np.NaN alongside
the x-section rows or remains unmodified.
indices: bool, default False
If ``True``, the data matrix indicies of the corresponding codes
will be returned as well.
inplace : bool, default True
Will overwrite self.matrix with the missingfied matrix by default.
If ``False``, the method will return a new np.array with the
modified entries.
Returns
-------
self or numpy.array (and optionally a list of int when ``indices=True``)
Either a new matrix is returned as numpy.array or the ``matrix``
property is modified inplace.
"""
if inplace:
missingfied = self
else:
missingfied = self._copy()
if axis == 'y' and self.y == '@' and not self.type == 'array':
return self
elif axis == 'y' and self.type == 'array':
ni_err = 'Cannot missingfy array mask element sections!'
raise NotImplementedError(ni_err)
else:
if axis == 'y':
missingfied._switch_axes()
mis_ix = missingfied._get_drop_idx(codes, keep_codes)
mis_ix = [code + 1 for code in mis_ix]
if mis_ix is not None:
for ix in mis_ix:
np.place(missingfied.matrix[:, ix],
missingfied.matrix[:, ix] > 0, np.NaN)
if not keep_base:
if axis == 'x':
missingfied.miss_x = codes
else:
missingfied.miss_y = codes
if self.type == 'array':
mask = np.nansum(missingfied.matrix[:, missingfied._x_indexers],
axis=1, keepdims=True)
mask /= mask
mask = mask > 0
else:
mask = np.nansum(np.sum(missingfied.matrix,
axis=1, keepdims=False),
axis=1, keepdims=True) > 0
mask = np.where(~mask)
missingfied.matrix[mask] = np.NaN
if axis == 'y':
missingfied._switch_axes()
if inplace:
self = missingfied
if indices:
return mis_ix
else:
if indices:
return missingfied, mis_ix
else:
return missingfied
def _autodrop_stats_missings(self):
if self.x == '@':
pass
elif self.ds._has_missings(self.x):
to_drop = self.ds._get_missing_list(self.x, globally=False)
self.exclude(to_drop)
return None
def _clean_from_global_missings(self):
if self.x == '@':
pass
elif self.ds._has_missings(self.x):
excluded = self.ds._get_missing_list(self.x, globally=True)
excluded_codes = excluded
excluded_idxer = self._missingfy(excluded, keep_base=False,
indices=True)
self.xdef = [x_c for x_c in self.xdef if x_c not in excluded_codes]
get_rows = sorted([x_idx for x_idx in self._x_indexers
if x_idx not in excluded_idxer])
self.matrix = self.matrix[:, [0] + get_rows]
self._x_indexers = self._get_x_indexers()
else:
pass
return None
def _drop_pairwise(self):
if self.ds._has_missings(self.y):
to_drop = self.ds._get_missing_list(self.y, globally=False)
self.exclude(to_drop, axis='y')
return self
def _get_drop_idx(self, codes, keep):
"""
Produces a list of indices referring to the given input matrix's axes
sections in order to erase data entries.
Parameters
----------
codes : list
Data codes that should be dropped from or kept in the matrix.
keep : boolean
Controls if the the passed code defintion is interpreted as
"codes to keep" or "codes to drop".
Returns
-------
drop_idx : list
List of x section matrix indices.
"""
if codes is None:
return None
else:
if keep:
return [self.xdef.index(code) for code in self.xdef
if code not in codes]
else:
return [self.xdef.index(code) for code in codes
if code in self.xdef]
@classmethod
def _is_complex_logic(cls, logic):
return isinstance(logic, (tuple, dict))
def group(self, groups, axis='x', expand=None, complete=False):
"""
Build simple or logical net vectors, optionally keeping orginating codes.
Parameters
----------
groups : list, dict of lists or logic expression
The group/net code defintion(s) in form of...
* a simple list: ``[1, 2, 3]``
* a dict of list: ``{'grp A': [1, 2, 3], 'grp B': [4, 5, 6]}``
* a logical expression: ``not_any([1, 2])``
axis : {``'x'``, ``'y'``}, default ``'x'``
The axis to group codes on.
expand : {None, ``'before'``, ``'after'``}, default ``None``
If ``'before'``, the codes that are grouped will be kept and placed
before the grouped aggregation; vice versa for ``'after'``. Ignored
on logical expressions found in ``groups``.
complete : bool, default False
If True, codes that define the Link on the given ``axis`` but are
not present in the ``groups`` defintion(s) will be placed in their
natural position within the aggregation, respecting the value of
``expand``.
Returns
-------
None
"""
# check validity and clean combine instructions
if axis == 'y' and self.type == 'array':
ni_err_array = 'Array mask element sections cannot be combined.'
raise NotImplementedError(ni_err_array)
elif axis == 'y' and self.y == '@':
val_err = 'Total link has no y-axis codes to combine.'
raise ValueError(val_err)
grp_def = self._organize_grp_def(groups, expand, complete, axis)
combines = []
names = []
if self.type == 'array' and any(self._is_complex_logic(l[1])
for l in grp_def):
msg = ('Cannot use complex logic for array summary grouping')
raise NotImplementedError(msg)
# generate the net vectors (+ possible expanded originating codes)
for grp in grp_def:
name, group, exp, logical = grp[0], grp[1], grp[2], grp[3]
one_code = len(group) == 1
if one_code and not logical:
vec = self._slice_vec(group[0], axis=axis)
elif not logical and not one_code:
vec, idx = self._grp_vec(group, axis=axis)
else:
vec = self._logic_vec(group)
if one_code and exp:
exp = None
if axis == 'y':
self._switch_axes()
if exp is not None:
m_idx = [ix for ix in self._x_indexers if ix not in idx]
m_idx = self._sort_indexer_as_codes(m_idx, group)
if exp == 'after':
names.extend(name)
names.extend([c for c in group])
combines.append(
np.concatenate([vec, self.matrix[:, m_idx]], axis=1))
else:
names.extend([c for c in group])
names.extend(name)
combines.append(
np.concatenate([self.matrix[:, m_idx], vec], axis=1))
else:
names.extend(name)
combines.append(vec)
if axis == 'y':
self._switch_axes()
# re-construct the combined data matrix
combines = np.concatenate(combines, axis=1)
if axis == 'y':
self._switch_axes()
combined_matrix = np.concatenate([self.matrix[:, [0]],
combines], axis=1)
if axis == 'y':
combined_matrix = combined_matrix.swapaxes(1, 2)
self._switch_axes()
# update the sectional information
new_sect_def = range(0, combined_matrix.shape[1] - 1)
if axis == 'x':
self.xdef = new_sect_def
self._x_indexers = self._get_x_indexers()
self.comb_x = names
else:
self.ydef = new_sect_def
self._y_indexers = self._get_y_indexers()
self.comb_y = names
self.matrix = combined_matrix
return self
def _slice_vec(self, code, axis='x'):
'''
'''
if axis == 'x':
code_idx = self.xdef.index(code) + 1
else:
code_idx = self.ydef.index(code) + 1
if axis == 'x':
m_slice = self.matrix[:, [code_idx]]
else:
self._switch_axes()
m_slice = self.matrix[:, [code_idx]]
self._switch_axes()
return m_slice
def _grp_vec(self, codes, axis='x'):
netted, idx = self._missingfy(codes=codes, axis=axis,
keep_codes=True, keep_base=True,
indices=True, inplace=False)
if axis == 'y':
netted._switch_axes()
net_vec = np.nansum(netted.matrix[:, netted._x_indexers],
axis=1, keepdims=True)
net_vec /= net_vec
return net_vec, idx
def _logic_vec(self, condition):
"""
Create net vector of qualified rows based on passed condition.
"""
filtered = self.filter(condition=condition, inplace=False)
net_vec = np.nansum(filtered.matrix[:, self._x_indexers], axis=1,
keepdims=True)
net_vec /= net_vec
return net_vec
def _grp_type(self, grp_def):
if isinstance(grp_def, list):
if not isinstance(grp_def[0], (int, float)):
return 'block'
else:
return 'list'
elif isinstance(grp_def, tuple):
return 'logical'
elif isinstance(grp_def, dict):
return 'wildcard'
def _add_unused_codes(self, grp_def_list, axis):
'''
'''
query_codes = self.xdef if axis == 'x' else self.ydef
frame_lookup = {c: [[c], [c], None, False] for c in query_codes}
frame = [[code] for code in query_codes]
for grpdef_idx, grpdef in enumerate(grp_def_list):
for code in grpdef[1]:
if [code] in frame:
if grpdef not in frame:
frame[frame.index([code])] = grpdef
else:
frame[frame.index([code])] = '-'
frame = [code for code in frame if not code == '-']
for code in frame:
if isinstance(code[0], list):
check = code[0][0]
else:
check = code[0]
if check in frame_lookup.keys():
frame[frame.index([code[0]])] = frame_lookup[code[0]]
return frame
def _organize_grp_def(self, grp_def, method_expand, complete, axis):
"""
Sanitize a combine instruction list (of dicts): names, codes, expands.
"""
organized_def = []
codes_used = []
any_extensions = complete
any_logical = False
if method_expand is None and complete:
method_expand = 'before'
if not self._grp_type(grp_def) == 'block':
grp_def = [{'net': grp_def, 'expand': method_expand}]
for grp in grp_def:
if any(isinstance(val, (tuple, dict)) for val in grp.values()):
if complete:
ni_err = ('Logical expr. unsupported when complete=True. '
'Only list-type nets/groups can be completed.')
raise NotImplementedError(ni_err)
if 'expand' in grp.keys():
del grp['expand']
expand = None
logical = True
else:
if 'expand' in grp.keys():
grp = copy.deepcopy(grp)
expand = grp['expand']
if expand is None and complete:
expand = 'before'
del grp['expand']
else:
expand = method_expand
logical = False
organized_def.append([grp.keys(), grp.values()[0], expand, logical])
if expand:
any_extensions = True
if logical:
any_logical = True
codes_used.extend(grp.values()[0])
if not any_logical:
if len(set(codes_used)) != len(codes_used) and any_extensions:
ni_err_extensions = ('Same codes in multiple groups unsupported '
'with expand and/or complete =True.')
raise NotImplementedError(ni_err_extensions)
if complete:
return self._add_unused_codes(organized_def, axis)
else:
return organized_def
def _force_to_nparray(self):
"""
Convert the aggregation result into its numpy array equivalent.
"""
if isinstance(self.result, pd.DataFrame):
self.result = self.result.values
return True
else:
return False
def _attach_margins(self):
"""
Force margins back into the current Quantity.result if none are found.
"""
if not self._res_is_stat():
values = self.result
if not self._has_y_margin and not self.y == '@':
margins = False
values = np.concatenate([self.rbase[1:, :], values], 1)
else:
margins = True
if not self._has_x_margin:
margins = False
values = np.concatenate([self.cbase, values], 0)
else:
margins = True
self.result = values
return margins
else:
return False
def _organize_expr_def(self, expression, axis):
"""
"""
# Prepare expression parts and lookups for indexing the agg. result
val1, op, val2 = expression[0], expression[1], expression[2]
if self._res_is_stat():
idx_c = [self.current_agg]
offset = 0
else:
if axis == 'x':
idx_c = self.xdef if not self.comb_x else self.comb_x
else:
idx_c = self.ydef if not self.comb_y else self.comb_y
offset = 1
# Test expression validity and find np.array indices / prepare scalar
# values of the expression
idx_err = '"{}" not found in {}-axis.'
# [1] input is 1. scalar, 2. vector from the agg. result
if isinstance(val1, list):
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = val1[0]
val2 = idx_c.index(val2) + offset
expr_type = 'scalar_1'
# [2] input is 1. vector from the agg. result, 2. scalar
elif isinstance(val2, list):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
val1 = idx_c.index(val1) + offset
val2 = val2[0]
expr_type = 'scalar_2'
# [3] input is two vectors from the agg. result
elif not any(isinstance(val, list) for val in [val1, val2]):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = idx_c.index(val1) + offset
val2 = idx_c.index(val2) + offset
expr_type = 'vectors'
return val1, op, val2, expr_type, idx_c
@staticmethod
def constant(num):
return [num]
def calc(self, expression, axis='x', result_only=False):
"""
Compute (simple) aggregation level arithmetics.
"""
unsupported = ['cbase', 'ebase', 'rbase', 'summary', 'x_sum', 'y_sum']
if self.result is None:
raise ValueError('No aggregation to base calculation on.')
elif self.current_agg in unsupported:
ni_err = 'Aggregation type "{}" not supported.'
raise NotImplementedError(ni_err.format(self.current_agg))
elif axis not in ['x', 'y']:
raise ValueError('Invalid axis parameter: {}'.format(axis))
is_df = self._force_to_nparray()
has_margin = self._attach_margins()
values = self.result
expr_name = expression.keys()[0]
if axis == 'x':
self.calc_x = expr_name
else:
self.calc_y = expr_name
values = values.T
expr = expression.values()[0]
v1, op, v2, exp_type, index_codes = self._organize_expr_def(expr, axis)
# ====================================================================
# TODO: generalize this calculation part so that it can "parse"
# arbitrary calculation rules given as nested or concatenated
# operators/codes sequences.
if exp_type == 'scalar_1':
val1, val2 = v1, values[[v2], :]
elif exp_type == 'scalar_2':
val1, val2 = values[[v1], :], v2
elif exp_type == 'vectors':
val1, val2 = values[[v1], :], values[[v2], :]
calc_res = op(val1, val2)
if op.__name__ == 'div':
calc_res = np.multiply(calc_res, 100)
# ====================================================================
if axis == 'y':
calc_res = calc_res.T
ap_axis = 0 if axis == 'x' else 1
if result_only:
if not self._res_is_stat():
self.result = np.concatenate([self.result[[0], :], calc_res],
ap_axis)
else:
self.result = calc_res
else:
self.result = np.concatenate([self.result, calc_res], ap_axis)
if axis == 'x':
self.calc_x = index_codes + [self.calc_x]
else:
self.calc_y = index_codes + [self.calc_y]
self.cbase = self.result[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = self.result[:, [0]]
else:
self.rbase = None
if not self._res_is_stat():
self.current_agg = 'calc'
self._organize_margins(has_margin)
else:
self.current_agg = 'calc'
if is_df:
self.to_df()
return self
def count(self, axis=None, raw_sum=False, cum_sum=False, effective=False,
margin=True, as_df=True):
"""
Count entries over all cells or per axis margin.
Parameters
----------
axis : {None, 'x', 'y'}, deafult None
When axis is None, the frequency of all cells from the uni- or
multivariate distribution is presented. If the axis is specified
to be either 'x' or 'y' the margin per axis becomes the resulting
aggregation.
raw_sum : bool, default False
If True will perform a simple summation over the cells given the
axis parameter. This ignores net counting of qualifying answers in
favour of summing over all answers given when considering margins.
cum_sum : bool, default False
If True a cumulative sum of the elements along the given axis is
returned.
effective : bool, default False
If True, compute effective counts instead of traditional (weighted)
counts.
margin : bool, deafult True
Controls whether the margins of the aggregation result are shown.
This also applies to margin aggregations themselves, since they
contain a margin in (form of the total number of cases) as well.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of cell or margin counts
to the ``result`` property.
"""
if effective and (axis != 'x' or raw_sum or cum_sum):
msg = 'Can currently only calculate effective counts across x-axis!'
raise NotImplementedError(msg)
if axis is None and raw_sum:
msg = 'Cannot calculate raw sum without axis.'
raise ValueError(msg)
if raw_sum and cum_sum:
msg = 'Can only apply raw sum or cumulative sum, not both.'
raise ValueError(msg)
if cum_sum and axis is not None:
msg = "Cumulative frequencies do not support the 'axis' argument."
raise ValueError(msg)
if axis is None:
self.current_agg = 'freq'
elif axis == 'x':
if raw_sum:
self.current_agg = 'x_sum'
elif effective:
self.current_agg = 'ebase'
else:
self.current_agg = 'cbase'
elif axis == 'y':
self.current_agg = 'rbase' if not raw_sum else 'y_sum'
if not self.w == '@1' and not effective:
self.weight()
if not self.is_empty or (self._uses_meta and not self._blank_numeric()):
if not effective:
counts = np.nansum(self.matrix, axis=0)
else:
counts = self._effective_n(axis=axis)
else:
counts = self._empty_result()
self.cbase = counts[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = counts[:, [0]]
else:
self.rbase = None
if axis is None:
self.result = counts
if cum_sum:
np.cumsum(counts[1:, :], axis=0, out=counts[1:, :])
# updating margins!
if self.rbase is not None: self.rbase = counts[:, [0]]
if self.cbase is not None: self.cbase = counts[[0], :]
self.result = counts
elif axis == 'x':
if raw_sum:
self.result = np.nansum(counts[1:, :], axis=0, keepdims=True)
else:
self.result = counts[[0], :]
elif axis == 'y':
if raw_sum:
if self.x == '@' or self.y == '@':
self.result = counts[:, [0]]
else:
self.result = np.nansum(counts[:, 1:], axis=1, keepdims=True)
else:
self.result = counts[:, [0]]
self._organize_margins(margin)
if as_df:
self.to_df()
self.unweight()
return self
def _blank_numeric(self):
"""
"""
blank_x = False
blank_y = False
numeric = ['int', 'float']
if not self._get_type() == 'array':
if self._meta()['columns'][self.x]['type'] in numeric:
if len(self.xdef) == 0:
blank_x = True
if not self.y == '@':
if self._meta()['columns'][self.y]['type'] in numeric:
if len(self.ydef) == 0:
blank_y = True
blank_numeric = True if (blank_x or blank_y) else False
return blank_numeric
def _empty_result(self):
if self._res_is_stat() or self.current_agg == 'summary':
self.factorized = 'x'
xdim = 1 if self._res_is_stat() else 8
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef)
if not self.type == 'array': ydim += 1
else:
if self.xdef is not None:
if len(self.xdef) == 0:
xdim = 2
else:
xdim = len(self.xdef) + 1
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
elif self.xdef is None:
xdim = 2
if self.ydef is None:
ydim = 1
elif self.ydef is not None and len(self.ydef) == 0:
ydim = 2
else:
ydim = len(self.ydef) + 1
return np.zeros((xdim, ydim))
def _effective_n(self, axis=None, margin=True):
self.weight()
effective = (np.nansum(self.matrix, axis=0)**2 /
np.nansum(self.matrix**2, axis=0))
self.unweight()
start_on = 0 if margin else 1
if axis is None:
return effective[start_on:, start_on:]
elif axis == 'x':
return effective[[0], start_on:]
else:
return effective[start_on:, [0]]
def summarize(self, stat='summary', axis='x', margin=True, as_df=True):
"""
Calculate distribution statistics across the given axis.
Parameters
----------
stat : {'summary', 'mean', 'median', 'var', 'stddev', 'sem', varcoeff',
'min', 'lower_q', 'upper_q', 'max'}, default 'summary'
The measure to calculate. Defaults to a summary output of the most
important sample statistics.
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, e.g. column vs. row
means.
margin : bool, default True
Controls whether statistic(s) of the marginal distribution are
shown.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of the descriptive (summary)
statistic(s) to the ``result`` property.
"""
self.current_agg = stat
if self.is_empty:
self.result = self._empty_result()
else:
self._autodrop_stats_missings()
if stat == 'summary':
stddev, mean, base = self._dispersion(axis, measure='sd',
_return_mean=True,
_return_base=True)
self.result = np.concatenate([
base, mean, stddev,
self._min(axis),
self._percentile(perc=0.25),
self._percentile(perc=0.50),
self._percentile(perc=0.75),
self._max(axis)
], axis=0)
elif stat == 'mean':
self.result = self._means(axis)
elif stat == 'var':
self.result = self._dispersion(axis, measure='var')
elif stat == 'stddev':
self.result = self._dispersion(axis, measure='sd')
elif stat == 'sem':
self.result = self._dispersion(axis, measure='sem')
elif stat == 'varcoeff':
self.result = self._dispersion(axis, measure='varcoeff')
elif stat == 'min':
self.result = self._min(axis)
elif stat == 'lower_q':
self.result = self._percentile(perc=0.25)
elif stat == 'median':
self.result = self._percentile(perc=0.5)
elif stat == 'upper_q':
self.result = self._percentile(perc=0.75)
elif stat == 'max':
self.result = self._max(axis)
self._organize_margins(margin)
if as_df:
self.to_df()
return self
def _factorize(self, axis='x', inplace=True):
self.factorized = axis
if inplace:
factorized = self
else:
factorized = self._copy()
if axis == 'y':
factorized._switch_axes()
np.copyto(factorized.matrix[:, 1:, :],
np.atleast_3d(factorized.xdef),
where=factorized.matrix[:, 1:, :]>0)
if not inplace:
return factorized
def _means(self, axis, _return_base=False):
fact = self._factorize(axis=axis, inplace=False)
if not self.w == '@1':
fact.weight()
fact_prod = np.nansum(fact.matrix, axis=0)
fact_prod_sum = np.nansum(fact_prod[1:, :], axis=0, keepdims=True)
bases = fact_prod[[0], :]
means = fact_prod_sum/bases
if axis == 'y':
self._switch_axes()
means = means.T
bases = bases.T
if _return_base:
return means, bases
else:
return means
def _dispersion(self, axis='x', measure='sd', _return_mean=False,
_return_base=False):
"""
Extracts measures of dispersion from the incoming distribution of
X vs. Y. Can return the arithm. mean by request as well. Dispersion
measure supported are standard deviation, variance, coeffiecient of
variation and standard error of the mean.
"""
means, bases = self._means(axis, _return_base=True)
unbiased_n = bases - 1
self.unweight()
factorized = self._factorize(axis, inplace=False)
factorized.matrix[:, 1:] -= means
factorized.matrix[:, 1:] *= factorized.matrix[:, 1:, :]
if not self.w == '@1':
factorized.weight()
diff_sqrt = np.nansum(factorized.matrix[:, 1:], axis=1)
disp = np.nansum(diff_sqrt/unbiased_n, axis=0, keepdims=True)
disp[disp <= 0] = np.NaN
disp[np.isinf(disp)] = np.NaN
if measure == 'sd':
disp = np.sqrt(disp)
elif measure == 'sem':
disp = np.sqrt(disp) / np.sqrt((unbiased_n + 1))
elif measure == 'varcoeff':
disp = np.sqrt(disp) / means
self.unweight()
if _return_mean and _return_base:
return disp, means, bases
elif _return_mean:
return disp, means
elif _return_base:
return disp, bases
else:
return disp
def _max(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
return np.nanmax(vals, axis=0, keepdims=True)
def _min(self, axis='x'):
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(factorized.matrix[:, 1:, :], axis=1)
if 0 not in factorized.xdef: np.place(vals, vals == 0, np.inf)
return np.nanmin(vals, axis=0, keepdims=True)
def _percentile(self, axis='x', perc=0.5):
"""
Computes percentiles from the incoming distribution of X vs.Y and the
requested percentile value. The implementation mirrors the algorithm
used in SPSS Dimensions and the EXAMINE procedure in SPSS Statistics.
It based on the percentile defintion #6 (adjusted for survey weights)
in:
Hyndman, Rob J. and Fan, Yanan (1996) -
"Sample Quantiles in Statistical Packages",
The American Statistician, 50, No. 4, 361-365.
Parameters
----------
axis : {'x', 'y'}, default 'x'
The axis which is reduced in the aggregation, i.e. column vs. row
medians.
perc : float, default 0.5
Defines the percentile to be computed. Defaults to 0.5,
the sample median.
Returns
-------
percs : np.array
Numpy array storing percentile values.
"""
percs = []
w = self.matrix * np.atleast_3d(self.wv)
w = np.nansum(np.nansum(w[:, 1:, :], axis=1, keepdims=True), axis=1)
factorized = self._factorize(axis, inplace=False)
vals = np.nansum(np.nansum(factorized.matrix[:, 1:, :], axis=1,
keepdims=True), axis=1)
for shape_i in range(0, vals.shape[1]):
iter_weights = w[:, shape_i]
iter_vals = vals[:, shape_i]
mask = ~np.isnan(iter_weights)
iter_weights = iter_weights[mask]
iter_vals = iter_vals[mask]
sorter = np.argsort(iter_vals)
iter_vals = np.take(iter_vals, sorter)
iter_weights = np.take(iter_weights, sorter)
iter_wsum = np.nansum(iter_weights, axis=0)
iter_wcsum = np.cumsum(iter_weights, axis=0)
k = (iter_wsum + 1.0) * perc
if iter_vals.shape[0] == 0:
percs.append(0.00)
elif iter_vals.shape[0] == 1:
percs.append(iter_vals[0])
elif iter_wcsum[0] > k:
wcsum_k = iter_wcsum[0]
percs.append(iter_vals[0])
elif iter_wcsum[-1] <= k:
percs.append(iter_vals[-1])
else:
wcsum_k = iter_wcsum[iter_wcsum <= k][-1]
p_k_idx = np.searchsorted(np.ndarray.flatten(iter_wcsum), wcsum_k)
p_k = iter_vals[p_k_idx]
p_k1 = iter_vals[p_k_idx+1]
w_k1 = iter_weights[p_k_idx+1]
excess = k - wcsum_k
if excess >= 1.0:
percs.append(p_k1)
else:
if w_k1 >= 1.0:
percs.append((1.0-excess)*p_k + excess*p_k1)
else:
percs.append((1.0-(excess/w_k1))*p_k +
(excess/w_k1)*p_k1)
return np.array(percs)[None, :]
def _organize_margins(self, margin):
if self._res_is_stat():
if self.type == 'array' or self.y == '@' or self.x == '@':
self._has_y_margin = self._has_x_margin = False
else:
if self.factorized == 'x':
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
else:
if not margin:
self._has_x_margin = False
self._has_y_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
if self._res_is_margin():
if self.y == '@' or self.x == '@':
if self.current_agg in ['cbase', 'x_sum', 'ebase']:
self._has_y_margin = self._has_x_margin = False
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if self.current_agg in ['cbase', 'x_sum', 'ebase']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[:, 1:]
else:
self._has_x_margin = False
self._has_y_margin = True
if self.current_agg in ['rbase', 'y_sum']:
if not margin:
self._has_y_margin = self._has_x_margin = False
self.result = self.result[1:, :]
else:
self._has_x_margin = True
self._has_y_margin = False
elif self.current_agg in ['freq', 'summary', 'calc']:
if self.type == 'array' or self.y == '@' or self.x == '@':
if not margin:
self.result = self.result[1:, :]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = False
else:
if not margin:
self.result = self.result[1:, 1:]
self._has_x_margin = False
self._has_y_margin = False
else:
self._has_x_margin = True
self._has_y_margin = True
else:
pass
def _sort_indexer_as_codes(self, indexer, codes):
mapping = sorted(zip(indexer, codes), key=lambda l: l[1])
return [i[0] for i in mapping]
def _get_y_indexers(self):
if self._squeezed or self.type in ['simple', 'nested']:
if self.ydef is not None:
idxs = range(1, len(self.ydef)+1)
return self._sort_indexer_as_codes(idxs, self.ydef)
else:
return [1]
else:
y_indexers = []
xdef_len = len(self.xdef)
zero_based_ys = [idx for idx in xrange(0, xdef_len)]
for y_no in xrange(0, len(self.ydef)):
if y_no == 0:
y_indexers.append(zero_based_ys)
else:
y_indexers.append([idx + y_no * xdef_len
for idx in zero_based_ys])
return y_indexers
def _get_x_indexers(self):
if self._squeezed or self.type in ['simple', 'nested']:
idxs = range(1, len(self.xdef)+1)
return self._sort_indexer_as_codes(idxs, self.xdef)
else:
x_indexers = []
upper_x_idx = len(self.ydef)
start_x_idx = [len(self.xdef) * offset
for offset in range(0, upper_x_idx)]
for x_no in range(0, len(self.xdef)):
x_indexers.append([idx + x_no for idx in start_x_idx])
return x_indexers
def _squeeze_dummies(self):
"""
Reshape and replace initial 2D dummy matrix into its 3D equivalent.
"""
self.wv = self.matrix[:, [-1]]
sects = []
if self.type == 'array':
x_sections = self._get_x_indexers()
y_sections = self._get_y_indexers()
y_total = np.nansum(self.matrix[:, x_sections], axis=1)
y_total /= y_total
y_total = y_total[:, None, :]
for sect in y_sections:
sect = self.matrix[:, sect]
sects.append(sect)
sects = np.dstack(sects)
self._squeezed = True
sects = np.concatenate([y_total, sects], axis=1)
self.matrix = sects
self._x_indexers = self._get_x_indexers()
self._y_indexers = []
elif self.type in ['simple', 'nested']:
x = self.matrix[:, :len(self.xdef)+1]
y = self.matrix[:, len(self.xdef)+1:-1]
for i in range(0, y.shape[1]):
sects.append(x * y[:, [i]])
sects = np.dstack(sects)
self._squeezed = True
self.matrix = sects
self._x_indexers = self._get_x_indexers()
self._y_indexers = self._get_y_indexers()
def _get_matrix(self):
wv = self._cache.get_obj('weight_vectors', self.w)
if wv is None:
wv = self._get_wv()
self._cache.set_obj('weight_vectors', self.w, wv)
total = self._cache.get_obj('weight_vectors', '@1')
if total is None:
total = self._get_total()
self._cache.set_obj('weight_vectors', '@1', total)
if self.type == 'array':
xm, self.xdef, self.ydef = self.ds.make_dummy(self.x, True)
self.matrix = np.concatenate((xm, wv), 1)
else:
if self.y == '@' or self.x == '@':
section = self.x if self.y == '@' else self.y
xm, self.xdef = self._cache.get_obj('matrices', section)
if xm is None:
xm, self.xdef = self.ds.make_dummy(section, True)
self._cache.set_obj('matrices', section, (xm, self.xdef))
self.ydef = None
self.matrix = np.concatenate((total, xm, total, wv), 1)
else:
xm, self.xdef = self._cache.get_obj('matrices', self.x)
if xm is None:
xm, self.xdef = self.ds.make_dummy(self.x, True)
self._cache.set_obj('matrices', self.x, (xm, self.xdef))
ym, self.ydef = self._cache.get_obj('matrices', self.y)
if ym is None:
ym, self.ydef = self.ds.make_dummy(self.y, True)
self._cache.set_obj('matrices', self.y, (ym, self.ydef))
self.matrix = np.concatenate((total, xm, total, ym, wv), 1)
self.matrix = self.matrix[self._dataidx]
self.matrix = self._clean()
self._squeeze_dummies()
if not self._ignore_flags:
self._clean_from_global_missings()
return self.matrix
def _clean(self):
"""
Drop empty sectional rows from the matrix.
"""
mat = self.matrix.copy()
mat_indexer = np.expand_dims(self._dataidx, 1)
if not self.type == 'array':
xmask = (np.nansum(mat[:, 1:len(self.xdef)+1], axis=1) > 0)
if self.ydef is not None:
if self.base_all:
ymask = (np.nansum(mat[:, len(self.xdef)+1:-1], axis=1) > 0)
else:
ymask = (np.nansum(mat[:, len(self.xdef)+2:-1], axis=1) > 0)
self.idx_map = np.concatenate(
[np.expand_dims(xmask & ymask, 1), mat_indexer], axis=1)
return mat[xmask & ymask]
else:
self.idx_map = np.concatenate(
[np.expand_dims(xmask, 1), mat_indexer], axis=1)
return mat[xmask]
else:
mask = (np.nansum(mat[:, :-1], axis=1) > 0)
self.idx_map = np.concatenate(
[np.expand_dims(mask, 1), mat_indexer], axis=1)
return mat[mask]
def _res_from_count(self):
return self._res_is_margin() or self.current_agg == 'freq'
def _res_from_summarize(self):
return self._res_is_stat() or self.current_agg == 'summary'
def _res_is_margin(self):
return self.current_agg in ['tbase', 'cbase', 'rbase', 'ebase', 'x_sum',
'y_sum']
def _res_is_stat(self):
return self.current_agg in ['mean', 'min', 'max', 'varcoeff', 'sem',
'stddev', 'var', 'median', 'upper_q',
'lower_q']
def to_df(self):
if self.current_agg == 'freq':
if not self.comb_x:
self.x_agg_vals = self.xdef
else:
self.x_agg_vals = self.comb_x
if not self.comb_y:
self.y_agg_vals = self.ydef
else:
self.y_agg_vals = self.comb_y
elif self.current_agg == 'calc':
if self.calc_x:
self.x_agg_vals = self.calc_x
self.y_agg_vals = self.ydef if not self.comb_y else self.comb_y
else:
self.x_agg_vals = self.xdef if not self.comb_x else self.comb_x
self.y_agg_vals = self.calc_y
elif self.current_agg == 'summary':
summary_vals = ['mean', 'stddev', 'min', '25%',
'median', '75%', 'max']
self.x_agg_vals = summary_vals
self.y_agg_vals = self.ydef
elif self.current_agg in ['x_sum', 'cbase', 'ebase']:
if self.current_agg == 'cbase':
self.x_agg_vals = 'All'
elif self.current_agg == 'ebase':
self.x_agg_vals = 'All (eff.)'
else:
self.x_agg_vals = 'sum'
self.y_agg_vals = self.ydef
elif self.current_agg in ['y_sum', 'rbase']:
self.x_agg_vals = self.xdef
self.y_agg_vals = 'All' if self.current_agg == 'rbase' else 'sum'
elif self._res_is_stat():
if self.factorized == 'x':
self.x_agg_vals = self.current_agg
self.y_agg_vals = self.ydef if not self.comb_y else self.comb_y
else:
self.x_agg_vals = self.xdef if not self.comb_x else self.comb_x
self.y_agg_vals = self.current_agg
# can this made smarter WITHOUT 1000000 IF-ELSEs above?:
ignore = ['freq', 'cbase', 'x_sum', 'summary', 'calc', 'ebase']
if ((self.current_agg in ignore or self._res_is_stat()) and
not self.type == 'array'):
if self.y == '@' or self.x == '@':
self.y_agg_vals = '@'
df = pd.DataFrame(self.result)
idx, cols = self._make_multiindex()
df.index = idx
df.columns = cols
self.result = df if not self.x == '@' else df.T
if self.type == 'nested':
self._format_nested_axis()
return self
def _make_multiindex(self):
x_grps = self.x_agg_vals
y_grps = self.y_agg_vals
if not isinstance(x_grps, list):
x_grps = [x_grps]
if not isinstance(y_grps, list):
y_grps = [y_grps]
if not x_grps: x_grps = [None]
if not y_grps: y_grps = [None]
if self._has_x_margin:
x_grps = ['All'] + x_grps
if self._has_y_margin:
y_grps = ['All'] + y_grps
if self.type == 'array':
x_unit = y_unit = self.x
x_names = ['Question', 'Values']
y_names = ['Array', 'Questions']
else:
x_unit = self.x if not self.x == '@' else self.y
y_unit = self.y if not self.y == '@' else self.x
x_names = y_names = ['Question', 'Values']
if not isinstance(x_unit, list): x_unit = [x_unit]
if not isinstance(y_unit, list): y_unit = [y_unit]
x = [x_unit, x_grps]
y = [y_unit, y_grps]
index = pd.MultiIndex.from_product(x, names=x_names)
columns = pd.MultiIndex.from_product(y, names=y_names)
return index, columns
def _format_nested_axis(self):
nest_mi = self._make_nest_multiindex()
if not len(self.result.columns) > len(nest_mi.values):
self.result.columns = nest_mi
else:
total_mi_values = []
for var in self.nest_def['variables']:
total_mi_values += [var, -1]
total_mi = pd.MultiIndex.from_product(total_mi_values,
names=nest_mi.names)
full_nest_mi = nest_mi.union(total_mi)
for lvl, c in zip(range(1, len(full_nest_mi)+1, 2),
self.nest_def['level_codes']):
full_nest_mi.set_levels(['All'] + c, level=lvl, inplace=True)
self.result.columns = full_nest_mi
return None
def _make_nest_multiindex(self):
values = []
names = ['Question', 'Values'] * (self.nest_def['levels'])
for lvl_var, lvl_c in zip(self.nest_def['variables'],
self.nest_def['level_codes']):
values.append([lvl_var])
values.append(lvl_c)
mi = pd.MultiIndex.from_product(values, names=names)
return mi
def _get_other_base(self, other):
"""
"""
swapped = self.swap(other, inplace=False)
return swapped.count().cbase
def _normalize_on_cells(self, other):
"""
"""
is_df = self._force_to_nparray()
other_q = self.swap(other, update_axis_def=False, inplace=False)
other_len = len(other_q.xdef)
q_len = len(self.xdef)
if not other_len == q_len:
err = "Cannot normalize on '{}', shapes do not match! ({} vs. {})"
raise ValueError(err.format(other, q_len, other_len))
has_margin = self._attach_margins()
counts = other_q.count(as_df=False, margin=has_margin).result
self._organize_margins(has_margin)
self.result = (self.result / counts) * 100
if is_df: self.to_df()
return None
def normalize(self, on='y', per_cell=False):
"""
Convert a raw cell count result to its percentage representation.
Parameters
----------
on : {'y', 'x', 'counts_sum', str}, default 'y'
Defines the base to normalize the result on. ``'y'`` will
produce column percentages, ``'x'`` will produce row percentages.
It is also possible to use another question's frequencies to
compute rebased percentages providing its name instead.
per_cell : bool, default False
Compute percentages on a cell-per-cell basis, effectively treating
each categorical row as a base figure on its own. Only possible if the
``on`` argument does not indidcate an axis result (``'x'``, ``'y'``,
``'counts_sum'``), but instead another variable's name. The related
``xdef`` codes collection length must be identical for this for work,
otherwise a ``ValueError`` is raised.
Returns
-------
self
Updates a count-based aggregation in the ``result`` property.
"""
rebase = on not in ['x', 'y', 'counts_sum']
other_counts = rebase and per_cell
other_base = rebase and not per_cell
if on == 'counts_sum' and (self.comb_x or self.comb_y):
raise ValueError("Groups cannot be normalized on 'counts_sum'")
if on == 'counts_sum':
is_df = self._force_to_nparray()
has_margin = self._attach_margins()
org_agg = self.current_agg
org_res = self.result
base = self.count(raw_sum=True, axis='x', as_df=False).result
self.result, self.current_agg = org_res, org_agg
if has_margin: self.result[0, :] = base[0, :]
self.result = self.result / base * 100
self._organize_margins(has_margin)
if is_df: self.to_df()
elif other_counts:
self._normalize_on_cells(on)
else:
if self.x == '@': on = 'y' if on == 'x' else 'x'
if on == 'y' or other_base:
if self._has_y_margin or self.y == '@' or self.x == '@':
if not other_base:
base = self.cbase
else:
base = self._get_other_base(on)
else:
if not other_base:
base = self.cbase
else:
base = self._get_other_base(on)
if self._get_type() != 'array':
base = base[:, 1:]
elif on == 'x':
if self._has_x_margin:
base = self.rbase
else:
base = self.rbase[1:, :]
if isinstance(self.result, pd.DataFrame):
if self.x == '@':
self.result = self.result.T
if on == 'y' or other_base:
base = np.repeat(base, self.result.shape[0], axis=0)
else:
base = np.repeat(base, self.result.shape[1], axis=1)
self.result = self.result / base * 100
if self.x == '@':
self.result = self.result.T
return self
@staticmethod
def _sects_identical(axdef1, axdef2):
return axdef1 == axdef2
@staticmethod
def _sects_different_order(axdef1, axdef2):
if not len(axdef1) == len(axdef2):
return False
else:
if (x for x in axdef1 if x in axdef2):
return True
else:
return False
@staticmethod
def _sect_is_subset(axdef1, axdef2):
return set(axdef1).intersection(set(axdef2)) > 0
class Test(object):
"""
The Quantipy Test object is a defined by a Link and the view name notation
string of a counts or means view. All auxiliary figures needed to arrive
at the test results are computed inside the instance of the object.
"""
def __init__(self, link, view_name_notation, test_total=False):
super(Test, self).__init__()
# Infer whether a mean or proportion test is being performed
view = link[view_name_notation]
if view.meta()['agg']['method'] == 'descriptives':
self.metric = 'means'
else:
self.metric = 'proportions'
self.invalid = None
self.no_pairs = None
self.no_diffs = None
self.parameters = None
self.test_total = test_total
self.mimic = None
self.level = None
# Calculate the required baseline measures for the test using the
# Quantity instance
self.Quantity = qp.Quantity(link, view.weights(), base_all=self.test_total)
if self.Quantity.type == 'array':
err = "Cannot compute significance tests on array summary!"
raise NotImplementedError(err)
if view.has_other_source():
orgx = self.Quantity.x
self.Quantity.swap(var=view.has_other_source())
cond = {orgx: not_count(0)}
self.Quantity.filter(cond, keep_base=False, inplace=True)
self.rebased = view._kwargs.get('rebased', False)
self._set_baseline_aggregates(view)
# Set information about the incoming aggregation
# to be able to route correctly through the algorithms
# and re-construct a Quantipy-indexed pd.DataFrame
self.is_weighted = view.meta()['agg']['is_weighted']
self.has_calc = view.has_calc()
self.x = view.meta()['x']['name']
self.xdef = view.dataframe.index.get_level_values(1).tolist()
self.y = view.meta()['y']['name']
self.is_nested = view.meta()['y']['is_nested']
self.y_is_multi = view.meta()['y']['is_multi']
# Figure out the test's pairs structure (regular vs. nested, etc.)
self._get_testpairs_definitons(view)
# Original pd.MultiIndex setup for both index and columns axis:
self.multiindex = (view.dataframe.index, view.dataframe.columns)
def __repr__(self):
return ('%s, total included: %s, test metric: %s, parameters: %s, '
'mimicked: %s, level: %s ')\
% (Test, self.test_total, self.metric, self.parameters,
self.mimic, self.level)
def _get_testpairs_definitons(self, view):
if not self.is_nested:
self.ydef = view.dataframe.columns.get_level_values(-1).tolist()
else:
codes = view.dataframe.columns.get_level_values(-1).tolist()
repeat = codes.count(codes[-1]) + 1
no_items = len(set(codes))
codes = codes[:no_items]
self.ydef = []
self.idmap = {}
self._valid_pairs = []
for i in range(1, repeat):
sect_codes = [int((str(c) * i)) for c in codes]
for old, new in zip(codes, sect_codes):
self.idmap[new] = old
self.ydef.extend(sect_codes)
self._valid_pairs.extend(combinations(sect_codes, 2))
columns_to_pair = ['@'] + self.ydef if self.test_total else self.ydef
self.ypairs = list(combinations(columns_to_pair, 2))
if not self.is_nested:
self.idmap = {}
self._valid_pairs = self.ypairs
return None
def _set_baseline_aggregates(self, view):
"""
Derive or recompute the basic values required by the ``Test`` instance.
"""
grps, exp, compl, calc, exclude, rescale = view.get_edit_params()
if exclude is not None:
self.Quantity.exclude(exclude)
if rescale is not None:
self.Quantity.rescale(rescale)
if self.metric == 'proportions' and self.test_total and view._has_code_expr():
self.Quantity.group(grps, expand=exp, complete=compl)
if self.metric == 'means':
aggs = self.Quantity._dispersion(_return_mean=True,
_return_base=True)
self.sd, self.values, self.cbases = aggs[0], aggs[1], aggs[2]
if not self.test_total:
self.sd = self.sd[:, 1:]
self.values = self.values[:, 1:]
self.cbases = self.cbases[:, 1:]
elif self.metric == 'proportions':
if not self.test_total or self.rebased:
if view.is_cumulative():
agg = self.Quantity.count(
margin=False, as_df=False, cum_sum=False)
self.values = agg.result
self.cbases = agg.cbase[:, 1:]
self.rbases = agg.rbase[1:, :]
self.tbase = agg.cbase[0, 0]
else:
self.values = view.dataframe.values.copy()
self.cbases = view.cbases[:, 1:]
self.rbases = view.rbases[1:, :]
self.tbase = view.cbases[0, 0]
else:
agg = self.Quantity.count(margin=True, as_df=False)
if calc is not None:
calc_only = view._kwargs.get('calc_only', False)
self.Quantity.calc(calc, axis='x', result_only=calc_only)
self.values = agg.result[1:, :]
self.cbases = agg.cbase
self.rbases = agg.rbase[1:, :]
self.tbase = agg.cbase[0, 0]
def set_params(self, test_total=False, level='mid', mimic='Dim', testtype='pooled',
use_ebase=True, ovlp_correc=True, cwi_filter=False,
flag_bases=None):
"""
Sets the test algorithm parameters and defines the type of test.
This method sets the test's global parameters and derives the
necessary measures for the computation of the test statistic.
The default values correspond to the SPSS Dimensions Column Tests
algorithms that control for bias introduced by weighting and
overlapping samples in the column pairs of multi-coded questions.
.. note:: The Dimensions implementation uses variance pooling.
Parameters
----------
test_total : bool, default False
If set to True, the test algorithms will also include an existent
total (@-) version of the original link and test against the
unconditial data distribution.
level : str or float, default 'mid'
The level of significance given either as per 'low' = 0.1,
'mid' = 0.05, 'high' = 0.01 or as specific float, e.g. 0.15.
mimic : {'askia', 'Dim'} default='Dim'
Will instruct the mimicking of a software specific test.
testtype : str, default 'pooled'
Global definition of the tests.
use_ebase : bool, default True
If True, will use the effective sample sizes instead of the
the simple weighted ones when testing a weighted aggregation.
ovlp_correc : bool, default True
If True, will consider and correct for respondent overlap when
testing between multi-coded column pairs.
cwi_filter : bool, default False
If True, will check an incoming count aggregation for cells that
fall below a treshhold comparison aggregation that assumes counts
to be independent.
flag_bases : list of two int, default None
If provided, the output dataframe will replace results that have
been calculated on (eff.) bases below the first int with ``'**'``
and mark results in columns with bases below the second int with
``'*'``
Returns
-------
self
"""
# Check if the aggregation is non-empty
# and that there are >1 populated columns
if not self.test_total:
if np.nansum(self.values) == 0 or len(self.ydef) == 1:
self.invalid = True
if np.nansum(self.values) == 0:
self.no_diffs = True
if len(self.ydef) == 1:
self.no_pairs = True
else:
if np.nansum(self.values) == 0 or self.y == '@':
self.invalid = True
if np.nansum(self.values) == 0:
self.no_diffs = True
if self.y == '@':
self.no_pairs = True
if self.rebased:
self.invalid = True
self.no_pairs = True
if self.invalid:
self.mimic = mimic
self.comparevalue, self.level = self._convert_level(level)
else:
# Set global test algorithm parameters
self.invalid = False
self.no_diffs = False
self.no_pairs = False
valid_mimics = ['Dim', 'askia']
if mimic not in valid_mimics:
raise ValueError('Failed to mimic: "%s". Select from: %s\n'
% (mimic, valid_mimics))
else:
self.mimic = mimic
if self.mimic == 'askia':
self.parameters = {'testtype': 'unpooled',
'use_ebase': False,
'ovlp_correc': False,
'cwi_filter': True,
'base_flags': None}
self.test_total = False
elif self.mimic == 'Dim':
self.parameters = {'testtype': 'pooled',
'use_ebase': True,
'ovlp_correc': True,
'cwi_filter': False,
'base_flags': flag_bases}
self.level = level
self.comparevalue, self.level = self._convert_level(level)
# Get value differences between column pairings
if self.metric == 'means':
self.valdiffs = np.array(
[m1 - m2 for m1, m2 in combinations(self.values[0], 2)])
if self.metric == 'proportions':
# special to askia testing: counts-when-independent filtering
if cwi_filter:
self.values = self._cwi()
props = (self.values / self.cbases).T
self.valdiffs = np.array([p1 - p2 for p1, p2
in combinations(props, 2)]).T
# Set test specific measures for Dimensions-like testing:
# [1] effective base usage
if use_ebase and self.is_weighted:
if not self.test_total:
self.ebases = self.Quantity._effective_n(axis='x', margin=False)
else:
self.ebases = self.Quantity._effective_n(axis='x', margin=True)
else:
self.ebases = self.cbases
# [2] overlap correction
if self.y_is_multi and self.parameters['ovlp_correc']:
self.overlap = self._overlap()
else:
self.overlap = np.zeros(self.valdiffs.shape)
# [3] base flags
if flag_bases:
self.flags = {'min': flag_bases[0],
'small': flag_bases[1]}
self.flags['flagged_bases'] = self._get_base_flags()
else:
self.flags = None
return self
# -------------------------------------------------
# Main algorithm methods to compute test statistics
# -------------------------------------------------
def run(self):
"""
Performs the testing algorithm and creates an output pd.DataFrame.
The output is indexed according to Quantipy's Questions->Values
convention. Significant results between columns are presented as
lists of integer y-axis codes where the column with the higher value
is holding the codes of the columns with the lower values. NaN is
indicating that a cell is not holding any sig. higher values
compared to the others.
"""
if not self.invalid:
sigs = self.get_sig()
return self._output(sigs)
else:
return self._empty_output()
def get_sig(self):
"""
TODO: implement returning tstats only.
"""
stat = self.get_statistic()
stat = self._convert_statistic(stat)
if self.metric == 'means':
diffs = pd.DataFrame(self.valdiffs, index=self.ypairs, columns=self.xdef).T
elif self.metric == 'proportions':
stat = pd.DataFrame(stat, index=self.xdef, columns=self.ypairs)
diffs = pd.DataFrame(self.valdiffs, index=self.xdef, columns=self.ypairs)
if self.mimic == 'Dim':
return diffs[(diffs != 0) & (stat < self.comparevalue)]
elif self.mimic == 'askia':
return diffs[(diffs != 0) & (stat > self.comparevalue)]
def get_statistic(self):
"""
Returns the test statistic of the algorithm.
"""
return self.valdiffs / self.get_se()
def get_se(self):
"""
Compute the standard error (se) estimate of the tested metric.
The calculation of the se is defined by the parameters of the setup.
The main difference is the handling of variances. **unpooled**
implicitly assumes variance inhomogenity between the column pairing's
samples. **pooled** treats variances effectively as equal.
"""
if self.metric == 'means':
if self.parameters['testtype'] == 'unpooled':
return self._se_mean_unpooled()
elif self.parameters['testtype'] == 'pooled':
return self._se_mean_pooled()
elif self.metric == 'proportions':
if self.parameters['testtype'] == 'unpooled':
return self._se_prop_unpooled()
if self.parameters['testtype'] == 'pooled':
return self._se_prop_pooled()
# -------------------------------------------------
# Conversion methods for levels and statistics
# -------------------------------------------------
def _convert_statistic(self, teststat):
"""
Convert test statistics to match the decision rule of the test logic.
Either transforms to p-values or returns the absolute value of the
statistic, depending on the decision rule of the test.
This is used to mimic other software packages as some tests'
decision rules check test-statistic against pre-defined treshholds
while others check sig. level against p-value.
"""
if self.mimic == 'Dim':
ebases_pairs = [eb1 + eb2 for eb1, eb2
in combinations(self.ebases[0], 2)]
dof = ebases_pairs - self.overlap - 2
dof[dof <= 1] = np.NaN
return get_pval(dof, teststat)[1]
elif self.mimic == 'askia':
return abs(teststat)
def _convert_level(self, level):
"""
Determines the comparison value for the test's decision rule.
Checks whether the level of test is a string that defines low, medium,
or high significance or an "actual" level of significance and
converts it to a comparison level/significance level tuple.
This is used to mimic other software packages as some test's
decision rules check test-statistic against pre-defined treshholds
while others check sig. level against p-value.
"""
if isinstance(level, (str, unicode)):
if level == 'low':
if self.mimic == 'Dim':
comparevalue = siglevel = 0.10
elif self.mimic == 'askia':
comparevalue = 1.65
siglevel = 0.10
elif level == 'mid':
if self.mimic == 'Dim':
comparevalue = siglevel = 0.05
elif self.mimic == 'askia':
comparevalue = 1.96
siglevel = 0.05
elif level == 'high':
if self.mimic == 'Dim':
comparevalue = siglevel = 0.01
elif self.mimic == 'askia':
comparevalue = 2.576
siglevel = 0.01
else:
if self.mimic == 'Dim':
comparevalue = siglevel = level
elif self.mimic == 'askia':
comparevalue = 1.65
siglevel = 0.10
return comparevalue, siglevel
# -------------------------------------------------
# Standard error estimates calculation methods
# -------------------------------------------------
def _se_prop_unpooled(self):
"""
Estimated standard errors of prop. diff. (unpool. var.) per col. pair.
"""
props = self.values/self.cbases
unp_sd = ((props*(1-props))/self.cbases).T
return np.array([np.sqrt(cat1 + cat2)
for cat1, cat2 in combinations(unp_sd, 2)]).T
def _se_mean_unpooled(self):
"""
Estimated standard errors of mean diff. (unpool. var.) per col. pair.
"""
sd_base_ratio = self.sd / self.cbases
return np.array([np.sqrt(sd_b_r1 + sd_b_r2)
for sd_b_r1, sd_b_r2
in combinations(sd_base_ratio[0], 2)])[None, :]
def _se_prop_pooled(self):
"""
Estimated standard errors of prop. diff. (pooled var.) per col. pair.
Controlling for effective base sizes and overlap responses is
supported and applied as defined by the test's parameters setup.
"""
ebases_correc_pairs = np.array([1 / x + 1 / y
for x, y
in combinations(self.ebases[0], 2)])
if self.y_is_multi and self.parameters['ovlp_correc']:
ovlp_correc_pairs = ((2 * self.overlap) /
[x * y for x, y
in combinations(self.ebases[0], 2)])
else:
ovlp_correc_pairs = self.overlap
counts_sum_pairs = np.array(
[c1 + c2 for c1, c2 in combinations(self.values.T, 2)])
bases_sum_pairs = np.expand_dims(
[b1 + b2 for b1, b2 in combinations(self.cbases[0], 2)], 1)
pooled_props = (counts_sum_pairs/bases_sum_pairs).T
return (np.sqrt(pooled_props * (1 - pooled_props) *
(np.array(ebases_correc_pairs - ovlp_correc_pairs))))
def _se_mean_pooled(self):
"""
Estimated standard errors of mean diff. (pooled var.) per col. pair.
Controlling for effective base sizes and overlap responses is
supported and applied as defined by the test's parameters setup.
"""
ssw_base_ratios = self._sum_sq_w(base_ratio=True)
enum = np.nan_to_num((self.sd ** 2) * (self.cbases-1))
denom = self.cbases-ssw_base_ratios
enum_pairs = np.array([enum1 + enum2
for enum1, enum2
in combinations(enum[0], 2)])
denom_pairs = np.array([denom1 + denom2
for denom1, denom2
in combinations(denom[0], 2)])
ebases_correc_pairs = np.array([1/x + 1/y
for x, y
in combinations(self.ebases[0], 2)])
if self.y_is_multi and self.parameters['ovlp_correc']:
ovlp_correc_pairs = ((2*self.overlap) /
[x * y for x, y
in combinations(self.ebases[0], 2)])
else:
ovlp_correc_pairs = self.overlap[None, :]
return (np.sqrt((enum_pairs/denom_pairs) *
(ebases_correc_pairs - ovlp_correc_pairs)))
# -------------------------------------------------
# Specific algorithm values & test option measures
# -------------------------------------------------
def _sum_sq_w(self, base_ratio=True):
"""
"""
if not self.Quantity.w == '@1':
self.Quantity.weight()
if not self.test_total:
ssw = np.nansum(self.Quantity.matrix ** 2, axis=0)[[0], 1:]
else:
ssw = np.nansum(self.Quantity.matrix ** 2, axis=0)[[0], :]
if base_ratio:
return ssw/self.cbases
else:
return ssw
def _cwi(self, threshold=5, as_df=False):
"""
Derives the count distribution assuming independence between columns.
"""
c_col_n = self.cbases
c_cell_n = self.values
t_col_n = self.tbase
if self.rbases.shape[1] > 1:
t_cell_n = self.rbases[1:, :]
else:
t_cell_n = self.rbases[0]
np.place(np.array(t_col_n), t_col_n == 0, np.NaN)
np.place(t_cell_n, t_cell_n == 0, np.NaN)
np.place(c_col_n, c_col_n == 0, np.NaN)
np.place(c_cell_n, c_cell_n == 0, np.NaN)
cwi = (t_cell_n * c_col_n) / t_col_n
cwi[cwi < threshold] = np.NaN
if as_df:
return pd.DataFrame(c_cell_n + cwi - cwi,
index=self.xdef, columns=self.ydef)
else:
return c_cell_n + cwi - cwi
def _overlap(self):
if self.is_weighted:
self.Quantity.weight()
m = self.Quantity.matrix.copy()
m = np.nansum(m, 1) if self.test_total else np.nansum(m[:, 1:, 1:], 1)
if not self.is_weighted:
m /= m
m[m == 0] = np.NaN
col_pairs = list(combinations(range(0, m.shape[1]), 2))
if self.parameters['use_ebase'] and self.is_weighted:
# Overlap computation when effective base is being used
w_sum_sq = np.array([np.nansum(m[:, [c1]] + m[:, [c2]], axis=0)**2
for c1, c2 in col_pairs])
w_sq_sum = np.array([np.nansum(m[:, [c1]]**2 + m[:, [c2]]**2, axis=0)
for c1, c2 in col_pairs])
return np.nan_to_num((w_sum_sq/w_sq_sum)/2).T
else:
# Overlap with simple weighted/unweighted base size
ovlp = np.array([np.nansum(m[:, [c1]] + m[:, [c2]], axis=0)
for c1, c2 in col_pairs])
return (np.nan_to_num(ovlp)/2).T
def _get_base_flags(self):
bases = self.ebases[0]
small = self.flags['small']
minimum = self.flags['min']
flags = []
for base in bases:
if base >= small:
flags.append('')
elif base < small and base >= minimum:
flags.append('*')
else:
flags.append('**')
return flags
# -------------------------------------------------
# Output creation
# -------------------------------------------------
def _output(self, sigs):
res = {y: {x: [] for x in self.xdef} for y in self.ydef}
test_columns = ['@'] + self.ydef if self.test_total else self.ydef
for col, val in sigs.iteritems():
if self.is_nested and not col in self._valid_pairs:
continue
if self.is_nested:
upper_v = self.idmap[col[1]]
lower_v = self.idmap[col[0]]
else:
upper_v = col[1]
lower_v = col[0]
if self._flags_exist():
b1ix, b2ix = test_columns.index(col[0]), test_columns.index(col[1])
b1_ok = self.flags['flagged_bases'][b1ix] != '**'
b2_ok = self.flags['flagged_bases'][b2ix] != '**'
else:
b1_ok, b2_ok = True, True
for row, v in val.iteritems():
if v > 0:
if b2_ok:
if col[0] == '@':
res[col[1]][row].append('@H')
else:
res[col[0]][row].append(upper_v)
# res[col[0]][row].append(self.idmap[col[1]])
# res[col[0]][row].append(col[1])
if v < 0:
if b1_ok:
if col[0] == '@':
res[col[1]][row].append('@L')
else:
res[col[1]][row].append(lower_v)
# res[col[1]][row].append(self.idmap[col[0]])
# res[col[1]][row].append(col[0])
test = pd.DataFrame(res).applymap(lambda x: str(x))
test = test.reindex(index=self.xdef, columns=self.ydef)
if self._flags_exist():
test = self._apply_base_flags(test)
test.replace('[]*', '*', inplace=True)
test.replace('[]', np.NaN, inplace=True)
# removing test results on post-aggregation rows [calc()]
if self.has_calc:
if len(test.index) > 1:
test.iloc[-1:, :] = np.NaN
else:
test.iloc[:, :] = np.NaN
test.index, test.columns = self.multiindex[0], self.multiindex[1]
return test
def _empty_output(self):
"""
"""
values = self.values
if self.metric == 'proportions':
if self.no_pairs or self.no_diffs:
values[:] = np.NaN
if self.test_total and not self.no_pairs:
values = values[:, 1:]
if values.shape == (1, 1) or values.shape == (1, 0):
values = [np.NaN]
if self.metric == 'means':
if self.no_pairs:
values = [np.NaN]
if self.no_diffs and not self.no_pairs:
values[:] = np.NaN
if self.test_total and not self.no_pairs:
values = values[:, 1:]
return pd.DataFrame(values,
index=self.multiindex[0],
columns=self.multiindex[1])
def _flags_exist(self):
return (self.flags is not None and
not all(self.flags['flagged_bases']) == '')
def _apply_base_flags(self, sigres, replace=True):
flags = self.flags['flagged_bases']
if self.test_total: flags = flags[1:]
for res_col, flag in zip(sigres.columns, flags):
if flag == '**':
if replace:
sigres[res_col] = flag
else:
sigres[res_col] = sigres[res_col] + flag
elif flag == '*':
sigres[res_col] = sigres[res_col] + flag
return sigres
class Nest(object):
"""
Description of class...
"""
def __init__(self, nest, data, meta):
self.data = data
self.meta = meta
self.name = nest
self.levels = len(self.variables)
self.level_codes = []
self.code_maps = None
self._needs_multi = self._any_multicoded()
@property
def variables(self):
return [variable.strip() for variable in self.name.split('>')]
def nest(self):
self._get_nested_meta()
self._get_code_maps()
interlocked = self._interlock_codes()
if not self.name in self.data.columns:
recode_map = {code: intersection(code_pair) for code, code_pair
in enumerate(interlocked, start=1)}
self.data[self.name] = np.NaN
self.data[self.name] = recode(self.meta, self.data,
target=self.name, mapper=recode_map)
nest_info = {'variables': self.variables,
'level_codes': self.level_codes,
'levels': self.levels}
return nest_info
def _any_multicoded(self):
return any(self.data[self.variables].dtypes == 'object')
def _get_code_maps(self):
code_maps = []
for level, var in enumerate(self.variables):
mapping = [{var: [int(code)]} for code
in self.level_codes[level]]
code_maps.append(mapping)
self.code_maps = code_maps
return None
def _interlock_codes(self):
return list(product(*self.code_maps))
def _get_nested_meta(self):
meta_dict = {}
qtext, valtexts = self._interlock_texts()
meta_dict['type'] = 'delimited set' if self._needs_multi else 'single'
meta_dict['text'] = {'en-GB': '>'.join(qtext[0])}
meta_dict['values'] = [{'text' : {'en-GB': '>'.join(valtext)},
'value': c}
for c, valtext
in enumerate(valtexts, start=1)]
self.meta['columns'][self.name] = meta_dict
return None
def _interlock_texts(self):
all_valtexts = []
all_qtexts = []
for var in self.variables:
var_valtexts = []
values = self.meta['columns'][var]['values']
all_qtexts.append(self.meta['columns'][var]['text'].values())
for value in values:
var_valtexts.append(value['text'].values()[0])
all_valtexts.append(var_valtexts)
self.level_codes.append([code['value'] for code in values])
interlocked_valtexts = list(product(*all_valtexts))
interlocked_qtexts = list(product(*all_qtexts))
return interlocked_qtexts, interlocked_valtexts
class Level(object):
"""
"""
def __init__(self, quantity):
"""
"""
self.quantity = quantity
self.dataset = self.quantity.ds
self._lvlspec = self.dataset.get_property(self.quantity.x, 'level')
self.array = self._lvlspec['source']
self.level_codes = self._lvlspec['level_codes']
self.item_look = self._lvlspec['item_look']
self._auxdf = self.quantity.count(margin=False).result.reset_index()
self._collapse_codes()
self.lvldf = None
def _reindex(self, like='freq'):
ds = self.dataset
like_item = self.item_look
itemres = self.quantity.swap(like_item, axis='x', inplace=False)
if like == 'freq':
itemres.count(margin=False, axis=None, as_df=True)
self.lvldf = self.lvldf.reindex(ds.codes(like_item))
elif like == 'base':
itemres.count(margin=False, axis='x', as_df=True)
x = [self.quantity.x]
vals = itemres.result.index.get_level_values(1).tolist()
idx = pd.MultiIndex.from_product([x, vals],
names=['Question', 'Values'])
self.lvldf.index = idx
None
def _collapse_codes(self):
df = self._auxdf
for org, lvls in self.level_codes.items():
for lvl in lvls:
df['Values'] = df['Values'].replace(
lvl, int(org), inplace=False)
return None
def count(self):
"""
"""
df = self._auxdf.set_index(['Question', 'Values'])
self.lvldf = df.sum(level=1, axis=0)
self._reindex()
return None
def base(self):
"""
"""
df = self._auxdf.set_index(['Question', 'Values'])
self.lvldf = df.sum(level=0, axis=0)
self._reindex(like='base')
return None
def percent(self):
"""
"""
self.count()
c = self.lvldf
self.base()
b = self.lvldf
pcts = c.values / b.values * 100
self.lvldf = pd.DataFrame(pcts, index=c.index, columns=c.columns)
return None
def as_view(self):
"""
"""
pass | mit |
woodscn/scipy | scipy/stats/tests/test_discrete_basic.py | 38 | 8770 | from __future__ import division, print_function, absolute_import
import numpy.testing as npt
import numpy as np
from scipy._lib.six import xrange
from scipy import stats
from common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_edge_support,
check_named_args, check_random_state_property,
check_pickling, check_rvs_broadcast)
from scipy.stats._distr_params import distdiscrete
knf = npt.dec.knownfailureif
vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4])
distdiscrete += [[stats.rv_discrete(values=vals), ()]]
def test_discrete_basic():
for distname, arg in distdiscrete:
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
np.random.seed(9765456)
rvs = distfn.rvs(size=2000, *arg)
supp = np.unique(rvs)
m, v = distfn.stats(*arg)
yield check_cdf_ppf, distfn, arg, supp, distname + ' cdf_ppf'
yield check_pmf_cdf, distfn, arg, distname
yield check_oth, distfn, arg, supp, distname + ' oth'
yield check_edge_support, distfn, arg
alpha = 0.01
yield (check_discrete_chisquare, distfn, arg, rvs, alpha,
distname + ' chisquare')
seen = set()
for distname, arg in distdiscrete:
if distname in seen:
continue
seen.add(distname)
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
locscale_defaults = (0,)
meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, }
k = spec_k.get(distname, 1)
yield check_named_args, distfn, k, arg, locscale_defaults, meths
if distname != 'sample distribution':
yield check_scale_docstring, distfn
yield check_random_state_property, distfn, arg
yield check_pickling, distfn, arg
# Entropy
yield check_entropy, distfn, arg, distname
if distfn.__class__._entropy != stats.rv_discrete._entropy:
yield check_private_entropy, distfn, arg, stats.rv_discrete
def test_moments():
for distname, arg in distdiscrete:
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
m, v, s, k = distfn.stats(*arg, moments='mvsk')
yield check_normalization, distfn, arg, distname
# compare `stats` and `moment` methods
yield check_moment, distfn, arg, m, v, distname
yield check_mean_expect, distfn, arg, m, distname
yield check_var_expect, distfn, arg, m, v, distname
yield check_skew_expect, distfn, arg, m, v, s, distname
cond = distname in ['zipf']
msg = distname + ' fails kurtosis'
yield knf(cond, msg)(check_kurt_expect), distfn, arg, m, v, k, distname
# frozen distr moments
yield check_moment_frozen, distfn, arg, m, 1
yield check_moment_frozen, distfn, arg, v+m*m, 2
def test_rvs_broadcast():
for dist, shape_args in distdiscrete:
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['skellam']
try:
distfunc = getattr(stats, dist)
except TypeError:
distfunc = dist
dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk)
loc = np.zeros(2)
nargs = distfunc.numargs
allargs = []
bshape = []
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 3,) + (1,)*(k + 1)
param_val = shape_args[k]
allargs.append(param_val*np.ones(shp, dtype=np.array(param_val).dtype))
bshape.insert(0, shp[0])
allargs.append(loc)
bshape.append(loc.size)
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
yield check_rvs_broadcast, distfunc, dist, allargs, bshape, shape_only, [np.int_]
def check_cdf_ppf(distfn, arg, supp, msg):
# cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer}
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg),
supp, msg + '-roundtrip')
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg),
supp, msg + '-roundtrip')
if not hasattr(distfn, 'xk'):
supp1 = supp[supp < distfn.b]
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg),
supp1 + distfn.inc, msg + ' ppf-cdf-next')
# -1e-8 could cause an error if pmf < 1e-8
def check_pmf_cdf(distfn, arg, distname):
if hasattr(distfn, 'xk'):
index = distfn.xk
else:
startind = int(distfn.ppf(0.01, *arg) - 1)
index = list(range(startind, startind + 10))
cdfs = distfn.cdf(index, *arg)
pmfs_cum = distfn.pmf(index, *arg).cumsum()
atol, rtol = 1e-10, 1e-10
if distname == 'skellam': # ncx2 accuracy
atol, rtol = 1e-5, 1e-5
npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0],
atol=atol, rtol=rtol)
def check_moment_frozen(distfn, arg, m, k):
npt.assert_allclose(distfn(*arg).moment(k), m,
atol=1e-10, rtol=1e-10)
def check_oth(distfn, arg, supp, msg):
# checking other methods of distfn
npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg),
atol=1e-10, rtol=1e-10)
q = np.linspace(0.01, 0.99, 20)
npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg),
atol=1e-10, rtol=1e-10)
median_sf = distfn.isf(0.5, *arg)
npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
"""Perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
"""
wsupp = 0.05
# construct intervals with minimum mass `wsupp`.
# intervals are left-half-open as in a cdf difference
lo = int(max(distfn.a, -1000))
distsupport = xrange(lo, int(min(distfn.b, 1000)) + 1)
last = 0
distsupp = [lo]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii, *arg)
if current - last >= wsupp - 1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1 - wsupp):
break
if distsupp[-1] < distfn.b:
distsupp.append(distfn.b)
distmass.append(1 - last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp + 1e-8
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
freq, hsupp = np.histogram(rvs, histsupp)
chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass)
npt.assert_(pval > alpha,
'chisquare - test for %s at arg = %s with pval = %s' %
(msg, str(arg), str(pval)))
def check_scale_docstring(distfn):
if distfn.__doc__ is not None:
# Docstrings can be stripped if interpreter is run with -OO
npt.assert_('scale' not in distfn.__doc__)
if __name__ == "__main__":
npt.run_module_suite()
| bsd-3-clause |
itai12312/workspaces | hellodjango/venv/lib/python2.7/site-packages/django/core/serializers/json.py | 113 | 3461 | """
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import
import datetime
import decimal
import json
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def start_serialization(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
raise DeserializationError(e)
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| gpl-2.0 |
cgwalters/anaconda | tests/gui/outside/__init__.py | 1 | 10874 | #!/usr/bin/python2
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Chris Lumens <[email protected]>
__all__ = ["Creator", "OutsideMixin"]
import blivet
from blivet.size import MiB
from contextlib import contextmanager
from nose.plugins.multiprocess import TimedOutException
import os
import shutil
import subprocess
import tempfile
import errno
# Copied from python's subprocess.py
def eintr_retry_call(func, *args):
"""Retry an interruptible system call if interrupted."""
while True:
try:
return func(*args)
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
class Creator(object):
"""A Creator subclass defines all the parameters for making a VM to run a
test against as well as handles creating and running that VM, inspecting
results, and managing temporary data.
Most Creator subclasses will only need to define the following four
attributes:
drives -- A list of tuples describing disk images to create. Each
tuple is the name of the drive and its size as a blivet.Size.
environ -- A dictionary of environment variables that should be added
to the environment the test suite will run under.
name -- A unique string that names a Creator. This name will
be used in creating the results directory (and perhaps
other places in the future) so make sure it doesn't
conflict with another object.
tests -- A list of tuples describing which test cases make up
this test. Each tuple is the name of the module
containing the test case (minus the leading "inside."
and the name of the test case class. Tests will be
run in the order provided.
"""
drives = []
environ = {}
name = "Creator"
tests = []
def __init__(self):
self._drivePaths = {}
self._mountpoint = None
self._proc = None
self._tempdir = None
self._reqMemory = 1536
def _call(self, args):
subprocess.call(args, stdout=open("/dev/null", "w"), stderr=open("/dev/null", "w"))
def archive(self):
"""Copy all log files and other test results to a subdirectory of the
given resultsdir. If logs are no longer available, this method
does nothing. It is up to the caller to make sure logs are available
beforehand and clean up afterwards.
"""
from testconfig import config
if not os.path.ismount(self.mountpoint):
return
shutil.copytree(self.mountpoint + "/result", config["resultsdir"] + "/" + self.name)
def cleanup(self):
"""Remove all disk images used during this test case and the temporary
directory they were stored in.
"""
shutil.rmtree(self.tempdir, ignore_errors=True)
def die(self):
"""Kill any running qemu process previously started by this test."""
if self._proc:
self._proc.kill()
self._proc = None
def makeDrives(self):
"""Create all hard drive images associated with this test. Images
must be listed in Creator.drives and will be stored in a temporary
directory this method creates. It is up to the caller to remove
everything later by calling Creator.cleanup.
"""
for (drive, size) in self.drives:
(fd, diskimage) = tempfile.mkstemp(dir=self.tempdir)
eintr_retry_call(os.close, fd)
# For now we are using qemu-img to create these files but specifying
# sizes in blivet Size objects. Unfortunately, qemu-img wants sizes
# as xM or xG, not xMB or xGB. That's what the conversion here is for.
self._call(["/usr/bin/qemu-img", "create", "-f", "raw", diskimage, "%sM" % size.convertTo(MiB)])
self._drivePaths[drive] = diskimage
@property
def template(self):
with open("outside/template.py", "r") as f:
return f.read()
def makeSuite(self):
"""The suite is a small disk image attached to every test VM automatically
by the test framework. It includes all the inside/ stuff, a special
suite.py file that will be automatically run by the live CD (and is
what actually runs the test), and a directory structure for reporting
results.
It is mounted under Creator.mountpoint as needed.
This method creates the suite image and adds it to the internal list of
images associated with this test.
Note that because this image is attached to the VM, anaconda will always
see two hard drives and thus will never automatically select disks.
Note also that this means tests must be careful to not select this
disk.
"""
from testconfig import config
self._call(["/usr/bin/qemu-img", "create", "-f", "raw", self.suitepath, "10M"])
self._call(["/sbin/mkfs.ext4", "-F", self.suitepath, "-L", "ANACTEST"])
self._call(["/usr/bin/mount", "-o", "loop", self.suitepath, self.mountpoint])
# Create the directory structure needed for storing results.
os.makedirs(self.mountpoint + "/result/anaconda")
# Copy all the inside stuff into the mountpoint.
shutil.copytree("inside", self.mountpoint + "/inside")
# Create the suite file, which contains all the test cases to run and is how
# the VM will figure out what to run.
with open(self.mountpoint + "/suite.py", "w") as f:
imports = map(lambda (path, cls): " from inside.%s import %s" % (path, cls), self.tests)
addtests = map(lambda (path, cls): " s.addTest(%s())" % cls, self.tests)
f.write(self.template % {"environ": " os.environ.update(%s)" % self.environ,
"imports": "\n".join(imports),
"addtests": "\n".join(addtests),
"anacondaArgs": config.get("anacondaArgs", "").strip('"')})
self._call(["/usr/bin/umount", self.mountpoint])
# This ensures it gets passed to qemu-kvm as a disk arg.
self._drivePaths[self.suitename] = self.suitepath
@contextmanager
def suiteMounted(self):
"""This context manager allows for wrapping code that needs to access the
suite. It mounts the disk image beforehand and unmounts it afterwards.
"""
if self._drivePaths.get(self.suitename, "") == "":
return
self._call(["/usr/bin/mount", "-o", "loop", self.suitepath, self.mountpoint])
try:
yield
except:
raise
finally:
self._call(["/usr/bin/umount", self.mountpoint])
def run(self):
"""Given disk images previously created by Creator.makeDrives and
Creator.makeSuite, start qemu and wait for it to terminate.
"""
from testconfig import config
args = ["/usr/bin/qemu-kvm",
"-vnc", "none",
"-m", str(self._reqMemory),
"-boot", "d",
"-drive", "file=%s,media=cdrom,readonly" % config["liveImage"]]
for drive in self._drivePaths.values():
args += ["-drive", "file=%s,media=disk" % drive]
# Save a reference to the running qemu process so we can later kill
# it if necessary. For now, the only reason we'd want to kill it is
# an expired timer.
self._proc = subprocess.Popen(args)
try:
self._proc.wait()
except TimedOutException:
self.die()
self.cleanup()
raise
finally:
self._proc = None
@property
def mountpoint(self):
"""The directory where the suite is mounted. This is a subdirectory of
Creator.tempdir, and it is assumed the mountpoint directory (though not
the mount itself) exists throughout this test.
"""
if not self._mountpoint:
self._mountpoint = tempfile.mkdtemp(dir=self.tempdir)
return self._mountpoint
@property
def tempdir(self):
"""The temporary directory used to store disk images and other data
this test requires. This directory will be removed by Creator.cleanup.
It is up to the caller to call that method, though.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(prefix="%s-" % self.name, dir="/var/tmp")
return self._tempdir
@property
def suitename(self):
return self.name + "_suite"
@property
def suitepath(self):
return self.tempdir + "/" + self.suitename
class OutsideMixin(object):
"""A BaseOutsideTestCase subclass is the interface between the unittest framework
and a running VM. It interfaces with an associated Creator object to create
devices and fire up a VM, and also handles actually reporting a result that
unittest knows how to process.
Each subclass will likely only want to define a single attribute:
creatorClass -- A Creator subclass that goes with this test.
"""
creatorClass = None
def archive(self):
self.creator.archive()
def runTest(self):
self.creator.run()
with self.creator.suiteMounted():
self.assertTrue(os.path.exists(self.creator.mountpoint + "/result"),
msg="results directory does not exist")
self.archive()
self.assertFalse(os.path.exists(self.creator.mountpoint + "/result/unittest-failures"),
msg="automated UI test %s failed" % self.creator.name)
def setUp(self):
# pylint: disable=not-callable
self.creator = self.creatorClass()
self.creator.makeDrives()
self.creator.makeSuite()
def tearDown(self):
self.creator.cleanup()
| gpl-2.0 |
JVenberg/PokemonGo-Bot-Desktop | pywin/Lib/test/crashers/nasty_eq_vs_dict.py | 168 | 1046 | # from http://mail.python.org/pipermail/python-dev/2001-June/015239.html
# if you keep changing a dictionary while looking up a key, you can
# provoke an infinite recursion in C
# At the time neither Tim nor Michael could be bothered to think of a
# way to fix it.
class Yuck:
def __init__(self):
self.i = 0
def make_dangerous(self):
self.i = 1
def __hash__(self):
# direct to slot 4 in table of size 8; slot 12 when size 16
return 4 + 8
def __eq__(self, other):
if self.i == 0:
# leave dict alone
pass
elif self.i == 1:
# fiddle to 16 slots
self.__fill_dict(6)
self.i = 2
else:
# fiddle to 8 slots
self.__fill_dict(4)
self.i = 1
return 1
def __fill_dict(self, n):
self.i = 0
dict.clear()
for i in range(n):
dict[i] = i
dict[self] = "OK!"
y = Yuck()
dict = {y: "OK!"}
z = Yuck()
y.make_dangerous()
print dict[z]
| mit |
bjko/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/irc_command.py | 115 | 13369 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import random
import re
from webkitpy.common.config import irc as config_irc
from webkitpy.common.config import urls
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.net.web import Web
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.grammar import join_with_separators
def _post_error_and_check_for_bug_url(tool, nicks_string, exception):
tool.irc().post("%s" % exception)
bug_id = urls.parse_bug_id(exception.output)
if bug_id:
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Ugg... Might have created %s" % (nicks_string, bug_url))
# FIXME: Merge with Command?
class IRCCommand(object):
usage_string = None
help_string = None
def execute(self, nick, args, tool, sheriff):
raise NotImplementedError("subclasses must implement")
@classmethod
def usage(cls, nick):
return "%s: Usage: %s" % (nick, cls.usage_string)
@classmethod
def help(cls, nick):
return "%s: %s" % (nick, cls.help_string)
class CreateBug(IRCCommand):
usage_string = "create-bug BUG_TITLE"
help_string = "Creates a Bugzilla bug with the given title."
def execute(self, nick, args, tool, sheriff):
if not args:
return self.usage(nick)
bug_title = " ".join(args)
bug_description = "%s\nRequested by %s on %s." % (bug_title, nick, config_irc.channel)
# There happens to be a committers list hung off of Bugzilla, so
# re-using that one makes things easiest for now.
requester = tool.bugs.committers.contributor_by_irc_nickname(nick)
requester_email = requester.bugzilla_email() if requester else None
try:
bug_id = tool.bugs.create_bug(bug_title, bug_description, cc=requester_email, assignee=requester_email)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
return "%s: Created bug: %s" % (nick, bug_url)
except Exception, e:
return "%s: Failed to create bug:\n%s" % (nick, e)
class Help(IRCCommand):
usage_string = "help [COMMAND]"
help_string = "Provides help on my individual commands."
def execute(self, nick, args, tool, sheriff):
if args:
for command_name in args:
if command_name in commands:
self._post_command_help(nick, tool, commands[command_name])
else:
tool.irc().post("%s: Available commands: %s" % (nick, ", ".join(sorted(visible_commands.keys()))))
tool.irc().post('%s: Type "%s: help COMMAND" for help on my individual commands.' % (nick, sheriff.name()))
def _post_command_help(self, nick, tool, command):
tool.irc().post(command.usage(nick))
tool.irc().post(command.help(nick))
aliases = " ".join(sorted(filter(lambda alias: commands[alias] == command and alias not in visible_commands, commands)))
if aliases:
tool.irc().post("%s: Aliases: %s" % (nick, aliases))
class Hi(IRCCommand):
usage_string = "hi"
help_string = "Responds with hi."
def execute(self, nick, args, tool, sheriff):
if len(args) and re.match(sheriff.name() + r'_*\s*!\s*', ' '.join(args)):
return "%s: hi %s!" % (nick, nick)
quips = tool.bugs.quips()
quips.append('"Only you can prevent forest fires." -- Smokey the Bear')
return random.choice(quips)
class PingPong(IRCCommand):
usage_string = "ping"
help_string = "Responds with pong."
def execute(self, nick, args, tool, sheriff):
return nick + ": pong"
class YouThere(IRCCommand):
usage_string = "yt?"
help_string = "Responds with yes."
def execute(self, nick, args, tool, sheriff):
return "%s: yes" % nick
class Restart(IRCCommand):
usage_string = "restart"
help_string = "Restarts sherrifbot. Will update its WebKit checkout, and re-join the channel momentarily."
def execute(self, nick, args, tool, sheriff):
tool.irc().post("Restarting...")
raise TerminateQueue()
class RollChromiumDEPS(IRCCommand):
usage_string = "roll-chromium-deps REVISION"
help_string = "Rolls WebKit's Chromium DEPS to the given revision???"
def execute(self, nick, args, tool, sheriff):
if not len(args):
return self.usage(nick)
tool.irc().post("%s: Will roll Chromium DEPS to %s" % (nick, ' '.join(args)))
tool.irc().post("%s: Rolling Chromium DEPS to %s" % (nick, ' '.join(args)))
tool.irc().post("%s: Rolled Chromium DEPS to %s" % (nick, ' '.join(args)))
tool.irc().post("%s: Thank You" % nick)
class Rollout(IRCCommand):
usage_string = "rollout SVN_REVISION [SVN_REVISIONS] REASON"
help_string = "Opens a rollout bug, CCing author + reviewer, and attaching the reverse-diff of the given revisions marked as commit-queue=?."
def _extract_revisions(self, arg):
revision_list = []
possible_revisions = arg.split(",")
for revision in possible_revisions:
revision = revision.strip()
if not revision:
continue
revision = revision.lstrip("r")
# If one part of the arg isn't in the correct format,
# then none of the arg should be considered a revision.
if not revision.isdigit():
return None
revision_list.append(int(revision))
return revision_list
def _parse_args(self, args):
if not args:
return (None, None)
svn_revision_list = []
remaining_args = args[:]
# First process all revisions.
while remaining_args:
new_revisions = self._extract_revisions(remaining_args[0])
if not new_revisions:
break
svn_revision_list += new_revisions
remaining_args = remaining_args[1:]
# Was there a revision number?
if not len(svn_revision_list):
return (None, None)
# Everything left is the reason.
rollout_reason = " ".join(remaining_args)
return svn_revision_list, rollout_reason
def _responsible_nicknames_from_revisions(self, tool, sheriff, svn_revision_list):
commit_infos = map(tool.checkout().commit_info_for_revision, svn_revision_list)
nickname_lists = map(sheriff.responsible_nicknames_from_commit_info, commit_infos)
return sorted(set(itertools.chain(*nickname_lists)))
def _nicks_string(self, tool, sheriff, requester_nick, svn_revision_list):
# FIXME: _parse_args guarentees that our svn_revision_list is all numbers.
# However, it's possible our checkout will not include one of the revisions,
# so we may need to catch exceptions from commit_info_for_revision here.
target_nicks = [requester_nick] + self._responsible_nicknames_from_revisions(tool, sheriff, svn_revision_list)
return ", ".join(target_nicks)
def _update_working_copy(self, tool):
tool.scm().discard_local_changes()
tool.executive.run_and_throw_if_fail(tool.deprecated_port().update_webkit_command(), quiet=True, cwd=tool.scm().checkout_root)
def _check_diff_failure(self, error_log, tool):
if not error_log:
return None
revert_failure_message_start = error_log.find("Failed to apply reverse diff for revision")
if revert_failure_message_start == -1:
return None
lines = error_log[revert_failure_message_start:].split('\n')[1:]
files = itertools.takewhile(lambda line: tool.filesystem.exists(tool.scm().absolute_path(line)), lines)
if files:
return "Failed to apply reverse diff for file(s): %s" % ", ".join(files)
return None
def execute(self, nick, args, tool, sheriff):
svn_revision_list, rollout_reason = self._parse_args(args)
if (not svn_revision_list or not rollout_reason):
return self.usage(nick)
revision_urls_string = join_with_separators([urls.view_revision_url(revision) for revision in svn_revision_list])
tool.irc().post("%s: Preparing rollout for %s ..." % (nick, revision_urls_string))
self._update_working_copy(tool)
# FIXME: IRCCommand should bind to a tool and have a self._tool like Command objects do.
# Likewise we should probably have a self._sheriff.
nicks_string = self._nicks_string(tool, sheriff, nick, svn_revision_list)
try:
complete_reason = "%s (Requested by %s on %s)." % (
rollout_reason, nick, config_irc.channel)
bug_id = sheriff.post_rollout_patch(svn_revision_list, complete_reason)
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Created rollout: %s" % (nicks_string, bug_url))
except ScriptError, e:
tool.irc().post("%s: Failed to create rollout patch:" % nicks_string)
diff_failure = self._check_diff_failure(e.output, tool)
if diff_failure:
return "%s: %s" % (nicks_string, diff_failure)
_post_error_and_check_for_bug_url(tool, nicks_string, e)
class Whois(IRCCommand):
usage_string = "whois SEARCH_STRING"
help_string = "Searches known contributors and returns any matches with irc, email and full name. Wild card * permitted."
def _full_record_and_nick(self, contributor):
result = ''
if contributor.irc_nicknames:
result += ' (:%s)' % ', :'.join(contributor.irc_nicknames)
if contributor.can_review:
result += ' (r)'
elif contributor.can_commit:
result += ' (c)'
return unicode(contributor) + result
def execute(self, nick, args, tool, sheriff):
if not args:
return self.usage(nick)
search_string = unicode(" ".join(args))
# FIXME: We should get the ContributorList off the tool somewhere.
contributors = CommitterList().contributors_by_search_string(search_string)
if not contributors:
return unicode("%s: Sorry, I don't know any contributors matching '%s'.") % (nick, search_string)
if len(contributors) > 5:
return unicode("%s: More than 5 contributors match '%s', could you be more specific?") % (nick, search_string)
if len(contributors) == 1:
contributor = contributors[0]
if not contributor.irc_nicknames:
return unicode("%s: %s hasn't told me their nick. Boo hoo :-(") % (nick, contributor)
return unicode("%s: %s is %s. Why do you ask?") % (nick, search_string, self._full_record_and_nick(contributor))
contributor_nicks = map(self._full_record_and_nick, contributors)
contributors_string = join_with_separators(contributor_nicks, only_two_separator=" or ", last_separator=', or ')
return unicode("%s: I'm not sure who you mean? %s could be '%s'.") % (nick, contributors_string, search_string)
# FIXME: Lame. We should have an auto-registering CommandCenter.
visible_commands = {
"create-bug": CreateBug,
"help": Help,
"hi": Hi,
"ping": PingPong,
"restart": Restart,
"roll-chromium-deps": RollChromiumDEPS,
"rollout": Rollout,
"whois": Whois,
"yt?": YouThere,
}
# Add revert as an "easter egg" command. Why?
# revert is the same as rollout and it would be confusing to list both when
# they do the same thing. However, this command is a very natural thing for
# people to use and it seems silly to have them hunt around for "rollout" instead.
commands = visible_commands.copy()
commands["revert"] = Rollout
# "hello" Alias for "hi" command for the purposes of testing aliases
commands["hello"] = Hi
| bsd-3-clause |
tux-00/ansible | lib/ansible/modules/network/nxos/nxos_pim_rp_address.py | 30 | 8315 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_pim_rp_address
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of an PIM static RP address instance.
description:
- Manages configuration of an Protocol Independent Multicast (PIM) static
rendezvous point (RP) address instance.
author: Gabriele Gerbino (@GGabriele)
notes:
- C(state=absent) remove the whole rp-address configuration, if existing.
options:
rp_address:
description:
- Configures a Protocol Independent Multicast (PIM) static
rendezvous point (RP) address. Valid values are
unicast addresses.
required: true
group_list:
description:
- Group range for static RP. Valid values are multicast addresses.
required: false
default: null
prefix_list:
description:
- Prefix list policy for static RP. Valid values are prefix-list
policy names.
required: false
default: null
route_map:
description:
- Route map policy for static RP. Valid values are route-map
policy names.
required: false
default: null
bidir:
description:
- Group range is treated in PIM bidirectional mode.
required: false
choices: ['true','false']
default: null
'''
EXAMPLES = '''
- nxos_pim_rp_address:
rp_address: "10.1.1.20"
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"rp_address": "10.1.1.21"}
existing:
description: list of existing pim rp-address configuration entries
returned: verbose mode
type: list
sample: []
end_state:
description: pim rp-address configuration entries after module execution
returned: verbose mode
type: list
sample: [{"bidir": false, "group_list": "224.0.0.0/4",
"rp_address": "10.1.1.21"}]
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
BOOL_PARAMS = ['bidir']
PARAM_TO_COMMAND_KEYMAP = {
'rp_address': 'ip pim rp-address'
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(config, module):
value_list = []
splitted_config = config.splitlines()
for line in splitted_config:
tmp = {}
if 'ip pim rp-address' in line:
splitted_line = line.split()
tmp['rp_address'] = splitted_line[3]
if len(splitted_line) > 5:
value = splitted_line[5]
if splitted_line[4] == 'route-map':
tmp['route_map'] = value
elif splitted_line[4] == 'prefix-list':
tmp['prefix_list'] = value
elif splitted_line[4] == 'group-list':
tmp['group_list'] = value
if 'bidir' in line:
tmp['bidir'] = True
else:
tmp['bidir'] = False
value_list.append(tmp)
return value_list
def get_existing(module, args):
existing = {}
config = str(get_config(module))
existing = get_value(config, module)
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
command = 'ip pim rp-address {0}'.format(module.params['rp_address'])
commands = build_command(proposed, command)
if commands:
candidate.add(commands, parents=[])
def build_command(param_dict, command):
for param in ['group_list', 'prefix_list', 'route_map']:
if param_dict.get(param):
command += ' {0} {1}'.format(
param.replace('_', '-'), param_dict.get(param))
if param_dict.get('bidir'):
command += ' bidir'
return [command]
def state_absent(module, existing, proposed, candidate):
commands = list()
for each in existing:
if each.get('rp_address') == proposed['rp_address']:
command = 'no ip pim rp-address {0}'.format(proposed['rp_address'])
if each.get('group_list'):
commands = build_command(each, command)
else:
commands = [command]
if commands:
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
rp_address=dict(required=True, type='str'),
group_list=dict(required=False, type='str'),
prefix_list=dict(required=False, type='str'),
route_map=dict(required=False, type='str'),
bidir=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['group_list', 'route_map'],
['group_list', 'prefix_list'],
['route_map', 'prefix_list']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
args = [
'rp_address',
'group_list',
'prefix_list',
'route_map',
'bidir'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
for each in existing:
if each.get(key) or (not each.get(key) and value):
proposed[key] = value
result = {}
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
response = load_config(module, candidate)
result.update(response)
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lpryszcz/bin | bam2bigwig.py | 1 | 4219 | #!/usr/bin/env python
desc="""Convert BAM to BigWig.
Inspired by: https://www.biostars.org/p/64495/#64680
Added support for non-UCSC genomes.
Require:
- bedGraphToBigWig
- samtools
- pybedtools
- pysam
TBD:
- you can avoid genome fasta if faidx is generated from bam header
"""
epilog="""Author: [email protected]
Mizerow, 25/02/2015
"""
import os, pysam, resource, sys
from datetime import datetime
#from pybedtools.contrib.bigwig import bam_to_bigwig
import pybedtools
def get_mapped(bam, verbose=0):
"""Return number of mapped reads in BAM file.
Create BAM index if not present. """
# generate BAM index if absent
if not os.path.isfile(bam+'.bai'):
cmd = "samtools index %s"%bam
if verbose:
sys.stderr.write("[%s] Indexing BAM file: %s\n"%(datetime.ctime(datetime.now()), cmd))
os.system(cmd)
# open BAM file
sam = pysam.AlignmentFile(bam)
return sam.mapped
def bam2bigwig(bam, genome, output, strand=None, scaled=True, verbose=1):
"""Convert BAM to BigWig scaled in reads per million mapped reads."""
# skip if outfile exists
if os.path.isfile(output):
sys.exit("File exists: %s"%output)
# generate faidx if absent
faidx = genome+".fai"
if not os.path.isfile(faidx):
pysam.FastaFile(genome)
# altered source from https://pythonhosted.org/pybedtools/_modules/pybedtools/contrib/bigwig.html#bam_to_bigwig
#bam_to_bigwig(bam='path/to/bam', genome='hg19', output='path/to/bigwig')
if verbose:
sys.stderr.write("[%s] Converting BAM to BED...\n"%(datetime.ctime(datetime.now()), ))
kwargs = dict(bg=True, split=True, g=faidx)
# store strand info
if strand in ("+", "-", "pos", "neg"):
if strand=="pos":
strand="+"
elif strand=="neg":
strand="-"
kwargs['strand'] = strand
#store scaling info
if scaled:
# speed-up using samtools idxstats
#readcount = mapped_read_count(bam)
readcount = get_mapped(bam, verbose)
_scale = 1 / (readcount / 1e6)
kwargs['scale'] = _scale
# get genome coverage
if verbose:
sys.stderr.write("[%s] Generating genome coverage\n"%(datetime.ctime(datetime.now()), ))
x = pybedtools.BedTool(bam).genome_coverage(**kwargs)
cmds = ['bedGraphToBigWig', x.fn, faidx, output]
# convert to bigWig
if verbose:
sys.stderr.write("[%s] Converting BED to bigWig: %s\n"%(datetime.ctime(datetime.now()), " ".join(cmds)))
os.system(" ".join(cmds))
# clean-up
os.unlink(x.fn)
def main():
import argparse
usage = "%(prog)s -v" #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='1.0b')
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose")
parser.add_argument("-i", "--bam", required=True,
help="BAM file")
parser.add_argument("-g", "--genome", required=True,
help="genome FASTA file")
parser.add_argument("-o", "--output", required=True,
help="output stream [stdout]")
parser.add_argument("-s", "--strand", default="both", choices=("both", "+","-", "pos", "neg"),
help="report coverage from + or - strand [%(default)s]")
parser.add_argument("--scaling", default=True, action="store_false",
help="disable RPM scaling")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
bam2bigwig(o.bam, o.genome, o.output, o.strand, o.scaling, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n"%dt)
| gpl-3.0 |
dims/cinder | cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py | 2 | 23352 | # (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Script to push the zone configuration to brocade SAN switches.
"""
import random
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder import ssh_utils
from cinder import utils
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
LOG = logging.getLogger(__name__)
class BrcdFCZoneClientCLI(object):
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
switch_key = 'none'
patrn = re.compile('[;\s]+')
def __init__(self, ipaddress, username,
password, port, key):
"""Initializing the client."""
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.switch_key = key
self.sshpool = None
def get_active_zone_set(self):
"""Return the active zone configuration.
Return active zoneset from fabric. When none of the configurations
are active then it will return empty map.
:returns: Map -- active zone set map in the following format
{
'zones':
{'openstack50060b0000c26604201900051ee8e329':
['50060b0000c26604', '201900051ee8e329']
},
'active_zone_config': 'OpenStack_Cfg'
}
"""
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[zone_constant.GET_ACTIVE_ZONE_CFG])
except exception.BrocadeZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting active zone set "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
line_split = re.split('\\t', line)
if len(line_split) > 2:
line_split = [x.replace(
'\n', '') for x in line_split]
line_split = [x.replace(
' ',
'') for x in line_split]
if zone_constant.CFG_ZONESET in line_split:
zone_set_name = line_split[1]
continue
if line_split[1]:
zone_name = line_split[1]
zone[zone_name] = list()
if line_split[2]:
zone_member = line_split[2]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[zone_constant.CFG_ZONES] = zone
zone_set[zone_constant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.exception(msg)
raise exception.FCZoneDriverException(reason=msg)
switch_data = None
return zone_set
def add_zones(self, zones, activate, active_zone_set=None):
"""Add zone configuration.
This method will add the zone configuration passed by user.
input params:
zones - zone names mapped to members.
zone members are colon separated but case-insensitive
{ zonename1:[zonememeber1,zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g: {'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
}
activate - True/False
active_zone_set - active zone set dict retrieved from
get_active_zone_set method
"""
LOG.debug("Add Zones - Zones passed: %s", zones)
cfg_name = None
iterator_count = 0
zone_with_sep = ''
if not active_zone_set:
active_zone_set = self.get_active_zone_set()
LOG.debug("Active zone set: %s", active_zone_set)
zone_list = active_zone_set[zone_constant.CFG_ZONES]
LOG.debug("zone list: %s", zone_list)
for zone in zones.keys():
# If zone exists, its an update. Delete & insert
# TODO(skolathur): This still need to be optimized
# to an update call later. Now we just handled the
# same zone name with same zone members.
if (zone in zone_list):
if set(zones[zone]) == set(zone_list[zone]):
break
try:
self.delete_zones(zone, activate, active_zone_set)
except exception.BrocadeZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting zone failed %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_members_with_sep = ';'.join(str(member) for
member in zones[zone])
LOG.debug("Forming command for add zone")
cmd = 'zonecreate "%(zone)s", "%(zone_members_with_sep)s"' % {
'zone': zone,
'zone_members_with_sep': zone_members_with_sep}
LOG.debug("Adding zone, cmd to run %s", cmd)
self.apply_zone_change(cmd.split())
LOG.debug("Created zones on the switch")
if(iterator_count > 0):
zone_with_sep += ';'
iterator_count += 1
zone_with_sep += zone
if not zone_with_sep:
return
try:
# Get active zone set from device, as some of the zones
# could be deleted.
active_zone_set = self.get_active_zone_set()
cfg_name = active_zone_set[zone_constant.ACTIVE_ZONE_CONFIG]
cmd = None
if not cfg_name:
cfg_name = zone_constant.OPENSTACK_CFG_NAME
cmd = 'cfgcreate "%(zoneset)s", "%(zones)s"' \
% {'zoneset': cfg_name, 'zones': zone_with_sep}
else:
cmd = 'cfgadd "%(zoneset)s", "%(zones)s"' \
% {'zoneset': cfg_name, 'zones': zone_with_sep}
LOG.debug("New zone %s", cmd)
self.apply_zone_change(cmd.split())
if activate:
self.activate_zoneset(cfg_name)
else:
self._cfg_save()
except Exception as e:
self._cfg_trans_abort()
msg = _("Creating and activating zone set failed: "
"(Zone set=%(cfg_name)s error=%(err)s)."
) % {'cfg_name': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.BrocadeZoningCliException(reason=msg)
def activate_zoneset(self, cfgname):
"""Method to Activate the zone config. Param cfgname - ZonesetName."""
cmd_list = [zone_constant.ACTIVATE_ZONESET, cfgname]
return self._ssh_execute(cmd_list, True, 1)
def deactivate_zoneset(self):
"""Method to deActivate the zone config."""
return self._ssh_execute([zone_constant.DEACTIVATE_ZONESET], True, 1)
def delete_zones(self, zone_names, activate, active_zone_set=None):
"""Delete zones from fabric.
Method to delete the active zone config zones
:param zone_names: zoneNames separated by semicolon
:param activate: True/False
:param active_zone_set: the active zone set dict retrieved
from get_active_zone_set method
"""
active_zoneset_name = None
zone_list = []
if not active_zone_set:
active_zone_set = self.get_active_zone_set()
active_zoneset_name = active_zone_set[
zone_constant.ACTIVE_ZONE_CONFIG]
zone_list = active_zone_set[zone_constant.CFG_ZONES]
zones = self.patrn.split(''.join(zone_names))
cmd = None
try:
if len(zones) == len(zone_list):
self.deactivate_zoneset()
cmd = 'cfgdelete "%(active_zoneset_name)s"' \
% {'active_zoneset_name': active_zoneset_name}
# Active zoneset is being deleted, hence reset activate flag
activate = False
else:
cmd = 'cfgremove "%(active_zoneset_name)s", "%(zone_names)s"' \
% {'active_zoneset_name': active_zoneset_name,
'zone_names': zone_names
}
LOG.debug("Delete zones: Config cmd to run: %s", cmd)
self.apply_zone_change(cmd.split())
for zone in zones:
self._zone_delete(zone)
if activate:
self.activate_zoneset(active_zoneset_name)
else:
self._cfg_save()
except Exception as e:
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
) % {'cmd': cmd, 'err': six.text_type(e)}
LOG.error(msg)
self._cfg_trans_abort()
raise exception.BrocadeZoningCliException(reason=msg)
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
return_list = []
try:
cmd = '%(nsshow)s;%(nscamshow)s' % {
'nsshow': zone_constant.NS_SHOW,
'nscamshow': zone_constant.NS_CAM_SHOW}
cli_output = self._get_switch_info([cmd])
except exception.BrocadeZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nsshow "
"info for fabric %s"), self.switch_ip)
if (cli_output):
return_list = self._parse_ns_output(cli_output)
cli_output = None
return return_list
def _cfg_save(self):
self._ssh_execute([zone_constant.CFG_SAVE], True, 1)
def _zone_delete(self, zone_name):
cmd = 'zonedelete "%(zone_name)s"' % {'zone_name': zone_name}
self.apply_zone_change(cmd.split())
def _cfg_trans_abort(self):
is_abortable = self._is_trans_abortable()
if(is_abortable):
self.apply_zone_change([zone_constant.CFG_ZONE_TRANS_ABORT])
def _is_trans_abortable(self):
is_abortable = False
stdout, stderr = None, None
stdout, stderr = self._run_ssh(
[zone_constant.CFG_SHOW_TRANS], True, 1)
output = stdout.splitlines()
is_abortable = False
for line in output:
if(zone_constant.TRANS_ABORTABLE in line):
is_abortable = True
break
if stderr:
msg = _("Error while checking transaction status: %s") % stderr
raise exception.BrocadeZoningCliException(reason=msg)
else:
return is_abortable
def apply_zone_change(self, cmd_list):
"""Execute zoning cli with no status update.
Executes CLI commands such as addZone where status return is
not expected.
"""
stdout, stderr = None, None
LOG.debug("Executing command via ssh: %s", cmd_list)
stdout, stderr = self._run_ssh(cmd_list, True, 1)
# no output expected, so output means there is an error
if stdout:
msg = _("Error while running zoning CLI: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list, 'err': stdout}
LOG.error(msg)
self._cfg_trans_abort()
raise exception.BrocadeZoningCliException(reason=msg)
def is_supported_firmware(self):
"""Check firmware version is v6.4 or higher.
This API checks if the firmware version per the plug-in support level.
This only checks major and minor version.
"""
cmd = ['version']
firmware = 0
try:
stdout, stderr = self._execute_shell_cmd(cmd)
if (stdout):
for line in stdout:
if 'Fabric OS: v' in line:
LOG.debug("Firmware version string: %s", line)
ver = line.split('Fabric OS: v')[1].split('.')
if (ver):
firmware = int(ver[0] + ver[1])
return firmware > 63
else:
LOG.error(_LE("No CLI output for firmware version check"))
return False
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.BrocadeZoningCliException(reason=msg)
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True, 1)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.BrocadeZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns: List -- list of device port wwn from ns info
"""
return_list = []
for line in switch_data:
if not(" NL " in line or " N " in line):
continue
linesplit = line.split(';')
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
return_list.append(node_port_wwn)
else:
msg = _("Malformed nameserver string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return return_list
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
# TODO(skolathur): Need to implement ssh_injection check
# currently, the check will fail for zonecreate command
# as zone members are separated by ';'which is a danger char
command = ' '. join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
self.switch_key,
min_size=1,
max_size=5)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
LOG.exception(_LE('Error executing SSH command.'))
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s"), command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands such as cfgsave where status return is expected.
"""
utils.check_ssh_injection(cmd_list)
command = ' '. join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
self.switch_key,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s", command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
stdin.write("%s\n" % zone_constant.YES)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh: %s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s', exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
LOG.exception(_LE('Error executing SSH command.'))
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after "
"SSH: %s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error executing command via ssh: %s"), e)
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def _execute_shell_cmd(self, cmd):
"""Run command over shell for older firmware versions.
Invokes shell and issue the command and return the output.
This is primarily used for issuing read commands when we are not sure
if the firmware supports exec_command.
"""
utils.check_ssh_injection(cmd)
command = ' '. join(cmd)
stdout, stderr = None, None
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
self.switch_key,
min_size=1,
max_size=5)
with self.sshpool.item() as ssh:
LOG.debug('Running cmd (SSH): %s', command)
channel = ssh.invoke_shell()
stdin_stream = channel.makefile('wb')
stdout_stream = channel.makefile('rb')
stderr_stream = channel.makefile('rb')
stdin_stream.write('''%s
exit
''' % command)
stdin_stream.flush()
stdout = stdout_stream.readlines()
stderr = stderr_stream.readlines()
stdin_stream.close()
stdout_stream.close()
stderr_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s', exit_status)
if exit_status != 0:
LOG.debug("command %s failed", command)
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
try:
channel.close()
except Exception:
LOG.exception(_LE('Error closing channel.'))
LOG.debug("_execute_cmd: stdout to return: %s", stdout)
LOG.debug("_execute_cmd: stderr to return: %s", stderr)
return (stdout, stderr)
def cleanup(self):
self.sshpool = None
| apache-2.0 |
b3j0f/requester | b3j0f/requester/utils.py | 1 | 1808 | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016 Jonathan Labéjof <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Utilities."""
__all__ = ['tostr']
def tostr(self):
"""Get the string representation of an object with the class attribute
__slots__ or __dict__."""
cls = type(self)
result = '{0}('.format(cls.__name__)
try:
names = cls.__slots__
except AttributeError:
names = self.__dict__
for name in names:
val = getattr(self, name)
if val is not None:
result += '{0}: {1}, '.format(name, val)
return result + ')'
| mit |
mikebenfield/scikit-learn | sklearn/tests/test_learning_curve.py | 45 | 11897 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorFailing(BaseEstimator):
"""Dummy classifier to test error_score in learning curve"""
def fit(self, X_subset, y_subset):
raise ValueError()
def score(self, X=None, y=None):
return None
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_error_score():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
_, _, test_scores = learning_curve(estimator, X, y, cv=3, error_score=0)
all_zeros = not np.any(test_scores)
assert(all_zeros)
def test_learning_curve_error_score_default_raise():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
asdacap/iiumschedule | server/lib/werkzeug/debug/__init__.py | 310 | 7800 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
| gpl-3.0 |
Gabriel439/pants | tests/python/pants_test/tasks/test_execution_graph.py | 3 | 9880 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.backend.jvm.tasks.jvm_compile.execution_graph import (ExecutionFailure, ExecutionGraph,
Job, JobExistsError,
NoRootJobError, UnknownJobError)
class ImmediatelyExecutingPool(object):
num_workers = 1
def submit_async_work(self, work):
work.func(*work.args_tuples[0])
class PrintLogger(object):
def error(self, msg):
print(msg)
def debug(self, msg):
print(msg)
def passing_fn():
pass
def raising_fn():
raise Exception("I'm an error")
class ExecutionGraphTest(unittest.TestCase):
def setUp(self):
self.jobs_run = []
def execute(self, exec_graph):
exec_graph.execute(ImmediatelyExecutingPool(), PrintLogger())
def job(self, name, fn, dependencies, size=0, on_success=None, on_failure=None):
def recording_fn():
self.jobs_run.append(name)
fn()
return Job(name, recording_fn, dependencies, size, on_success, on_failure)
def test_single_job(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A"])
def test_single_dependency(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "A"])
def test_simple_binary_tree(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_simple_linear_dependencies(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, [])])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["C", "B", "A"])
def test_simple_unconnected(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, []),
self.job("B", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B"])
def test_simple_unconnected_tree(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_dependee_depends_on_dependency_of_its_dependency(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, []),
])
self.execute(exec_graph)
self.assertEqual(["C", "B", "A"], self.jobs_run)
def test_one_failure_raises_exception(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Failed jobs: A", str(cm.exception))
def test_failure_of_dependency_does_not_run_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["F"]),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_dependency_does_not_run_second_order_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["F"]),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_one_leg_of_tree_does_not_cancel_other(self):
# TODO do we want this behavior, or do we want to fail fast on the first failed job?
exec_graph = ExecutionGraph([self.job("B", passing_fn, []),
self.job("F", raising_fn, ["B"]),
self.job("A", passing_fn, ["B"])])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertTrue(self.jobs_run == ["B", "F", "A"] or self.jobs_run == ["B", "A", "F"])
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_disconnected_job_does_not_cancel_non_dependents(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, []),
self.job("F", raising_fn, [])])
with self.assertRaises(ExecutionFailure):
self.execute(exec_graph)
self.assertEqual(["A", "F"], self.jobs_run)
def test_cycle_in_graph_causes_failure(self):
with self.assertRaises(NoRootJobError) as cm:
ExecutionGraph([self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["A"])])
self.assertEqual(
"Unexecutable graph: All scheduled jobs have dependencies. "
"There must be a circular dependency.",
str(cm.exception))
def test_non_existent_dependency_causes_failure(self):
with self.assertRaises(UnknownJobError) as cm:
ExecutionGraph([self.job("A", passing_fn, []),
self.job("B", passing_fn, ["Z"])])
self.assertEqual("Unexecutable graph: Undefined dependencies u'Z'", str(cm.exception))
def test_on_success_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], on_success=raising_fn)])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_success for A: I'm an error", str(cm.exception))
def test_on_failure_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [], on_failure=raising_fn)])
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_failure for A: I'm an error", str(cm.exception))
def test_same_key_scheduled_twice_is_error(self):
with self.assertRaises(JobExistsError) as cm:
ExecutionGraph([self.job("Same", passing_fn, []),
self.job("Same", passing_fn, [])])
self.assertEqual("Unexecutable graph: Job already scheduled u'Same'", str(cm.exception))
def test_priorities_for_chain_of_jobs(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 4),
self.job("C", passing_fn, ["B"], 2),
self.job("D", passing_fn, ["C"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 15, "B": 7, "C": 3, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C", "D"])
def test_priorities_for_fork(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 4),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["A"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 6, "B": 2, "C": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C"])
def test_priorities_for_mirrored_fork(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 4),
self.job("B", passing_fn, ["A"], 1),
self.job("C", passing_fn, ["A"], 2)])
self.assertEqual(exec_graph._job_priority, {"A": 6, "B": 1, "C": 2})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "C", "B"])
def test_priorities_for_diamond(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 4),
self.job("C", passing_fn, ["A"], 2),
self.job("D", passing_fn, ["B", "C"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 13, "B": 5, "C": 3, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C", "D"])
def test_priorities_for_mirrored_diamond(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["A"], 4),
self.job("D", passing_fn, ["B", "C"], 1)])
self.assertEqual(exec_graph._job_priority, {"A": 13, "B": 3, "C": 5, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "C", "B", "D"])
def test_priorities_for_skewed_diamond(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], 1),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["B"], 4),
self.job("D", passing_fn, ["A"], 8),
self.job("E", passing_fn, ["C", "D"], 16)])
self.assertEqual(exec_graph._job_priority, {"A": 25, "B": 22, "C": 20, "D": 24, "E": 16})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "D", "B", "C", "E"])
| apache-2.0 |
cnsuperx/Cocos2d-x-2.2.5 | samples/Lua/TestLua/proj.marmalade/cccopy.py | 16 | 1647 | import os
import shutil
from optparse import OptionParser
def cccopy(sourcePath, destPath):
for root, dirs, files in os.walk(sourcePath):
#figure out where we're going
dest = destPath + root.replace(sourcePath, '')
destAbsPath = os.path.abspath(destPath)
#if we're in a directory that doesn't exist in the destination folder then create a new folder
if not os.path.isdir(dest):
os.mkdir(dest)
print os.path.abspath(dest).replace(destAbsPath, '')[1:] + ' directory created.'
#loop through all files in the directory
for f in files:
#compute current (old) & new file locations
oldLoc = root + "/" + f
newLoc = dest + "/" + f
if not os.path.isfile(newLoc):
try:
shutil.copy2(oldLoc, newLoc)
print os.path.abspath(newLoc).replace(destAbsPath,'')[1:] + ' copied.'
except IOError:
print os.path.abspath(newLoc).replace(destAbsPath,'')[1:] + ' already exists.'
# main
def main():
# parse options
parser = OptionParser(usage="%prog [options]")
parser.add_option("-s", "--sourcePath", action="store", help="Source path", dest="sourcePath")
parser.add_option("-d", "--destPath", action="store", help="Destination path", dest="destPath")
(options, args) = parser.parse_args()
if options.sourcePath and options.destPath:
cccopy(options.sourcePath, options.destPath)
else:
parser.error("")
## entry
if __name__ == "__main__":
main() | mit |
fpy171/scrapy | tests/test_spiderloader/__init__.py | 107 | 3094 | import sys
import os
import shutil
from zope.interface.verify import verifyObject
from twisted.trial import unittest
# ugly hack to avoid cyclic imports of scrapy.spiders when running this test
# alone
from scrapy.interfaces import ISpiderLoader
from scrapy.spiderloader import SpiderLoader
from scrapy.settings import Settings
from scrapy.http import Request
module_dir = os.path.dirname(os.path.abspath(__file__))
class SpiderLoaderTest(unittest.TestCase):
def setUp(self):
orig_spiders_dir = os.path.join(module_dir, 'test_spiders')
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.spiders_dir = os.path.join(self.tmpdir, 'test_spiders_xxx')
shutil.copytree(orig_spiders_dir, self.spiders_dir)
sys.path.append(self.tmpdir)
settings = Settings({'SPIDER_MODULES': ['test_spiders_xxx']})
self.spider_loader = SpiderLoader.from_settings(settings)
def tearDown(self):
del self.spider_loader
del sys.modules['test_spiders_xxx']
sys.path.remove(self.tmpdir)
def test_interface(self):
verifyObject(ISpiderLoader, self.spider_loader)
def test_list(self):
self.assertEqual(set(self.spider_loader.list()),
set(['spider1', 'spider2', 'spider3']))
def test_load(self):
spider1 = self.spider_loader.load("spider1")
self.assertEqual(spider1.__name__, 'Spider1')
def test_find_by_request(self):
self.assertEqual(self.spider_loader.find_by_request(Request('http://scrapy1.org/test')),
['spider1'])
self.assertEqual(self.spider_loader.find_by_request(Request('http://scrapy2.org/test')),
['spider2'])
self.assertEqual(set(self.spider_loader.find_by_request(Request('http://scrapy3.org/test'))),
set(['spider1', 'spider2']))
self.assertEqual(self.spider_loader.find_by_request(Request('http://scrapy999.org/test')),
[])
self.assertEqual(self.spider_loader.find_by_request(Request('http://spider3.com')),
[])
self.assertEqual(self.spider_loader.find_by_request(Request('http://spider3.com/onlythis')),
['spider3'])
def test_load_spider_module(self):
module = 'tests.test_spiderloader.test_spiders.spider1'
settings = Settings({'SPIDER_MODULES': [module]})
self.spider_loader = SpiderLoader.from_settings(settings)
assert len(self.spider_loader._spiders) == 1
def test_load_spider_module(self):
prefix = 'tests.test_spiderloader.test_spiders.'
module = ','.join(prefix + s for s in ('spider1', 'spider2'))
settings = Settings({'SPIDER_MODULES': module})
self.spider_loader = SpiderLoader.from_settings(settings)
assert len(self.spider_loader._spiders) == 2
def test_load_base_spider(self):
module = 'tests.test_spiderloader.test_spiders.spider0'
settings = Settings({'SPIDER_MODULES': [module]})
self.spider_loader = SpiderLoader.from_settings(settings)
assert len(self.spider_loader._spiders) == 0
| bsd-3-clause |
astonshane/AdventOfCode | 2015/python/day13/chal25.py | 1 | 1191 | import sys
import itertools
hUnits = {}
def findHappiness(arrangment):
happiness = 0
for i in range(0, len(arrangment)):
person = arrangment[i]
personLeft, personRight = None, None
if i == 0:
personLeft = arrangment[-1]
else:
personLeft = arrangment[i-1]
if i == len(arrangment) - 1:
personRight = arrangment[0]
else:
personRight = arrangment[i+1]
happiness += hUnits[person][personLeft] + hUnits[person][personRight]
return happiness
# ######################
if len(sys.argv) != 2:
print "need an input file"
exit(1)
f = open(sys.argv[1])
# parse the file
for line in f:
line = line.strip(" \n.").split()
person1 = line[0]
person2 = line[-1]
units = int(line[3])
if line[2] == 'lose':
units *= -1
if person1 not in hUnits:
hUnits[person1] = {}
hUnits[person1][person2] = units
print hUnits
max_happy = None
allPerms = itertools.permutations(hUnits.keys())
for perm in allPerms:
tmp = findHappiness(perm)
if max_happy is None or tmp > max_happy:
max_happy = tmp
print "max_happiness:", max_happy
| mit |
aimas/TuniErp-8.0 | openerp/report/render/rml2pdf/customfonts.py | 261 | 3493 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 P. Christeas, Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP SA. (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab import rl_config
import logging
import glob
import os
# .apidoc title: TTF Font Table
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
CustomTTFonts = []
# Search path for TTF files, in addition of rl_config.TTFSearchPath
TTFSearchPath = [
'/usr/share/fonts/truetype', # SuSE
'/usr/share/fonts/dejavu', '/usr/share/fonts/liberation', # Fedora, RHEL
'/usr/share/fonts/truetype/*','/usr/local/share/fonts' # Ubuntu,
'/usr/share/fonts/TTF/*', # Mandriva/Mageia
'/usr/share/fonts/TTF', # Arch Linux
'/usr/lib/openoffice/share/fonts/truetype/',
'~/.fonts',
'~/.local/share/fonts',
# mac os X - from
# http://developer.apple.com/technotes/tn/tn2024.html
'~/Library/Fonts',
'/Library/Fonts',
'/Network/Library/Fonts',
'/System/Library/Fonts',
# windows
'c:/winnt/fonts',
'c:/windows/fonts'
]
def list_all_sysfonts():
"""
This function returns list of font directories of system.
"""
filepath = []
# Perform the search for font files ourselves, as reportlab's
# TTFOpenFile is not very good at it.
searchpath = list(set(TTFSearchPath + rl_config.TTFSearchPath))
for dirname in searchpath:
for filename in glob.glob(os.path.join(os.path.expanduser(dirname), '*.[Tt][Tt][FfCc]')):
filepath.append(filename)
return filepath
def SetCustomFonts(rmldoc):
""" Map some font names to the corresponding TTF fonts
The ttf font may not even have the same name, as in
Times -> Liberation Serif.
This function is called once per report, so it should
avoid system-wide processing (cache it, instead).
"""
for family, font, filename, mode in CustomTTFonts:
if os.path.isabs(filename) and os.path.exists(filename):
rmldoc.setTTFontMapping(family, font, filename, mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/perf/page_sets/top_25_pages.py | 27 | 2675 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page
from telemetry.page import shared_page_state
from telemetry import story
from page_sets import top_pages
class Top25PageSet(story.StorySet):
""" Page set consists of top 25 pages with only navigation actions. """
def __init__(self):
super(Top25PageSet, self).__init__(
archive_data_file='data/top_25.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
shared_desktop_state = shared_page_state.SharedDesktopPageState
self.AddStory(top_pages.GoogleWebSearchPage(self, shared_desktop_state))
self.AddStory(top_pages.GmailPage(self, shared_desktop_state))
self.AddStory(top_pages.GoogleCalendarPage(self, shared_desktop_state))
self.AddStory(
top_pages.GoogleImageSearchPage(self, shared_desktop_state))
self.AddStory(top_pages.GoogleDocPage(self, shared_desktop_state))
self.AddStory(top_pages.GooglePlusPage(self, shared_desktop_state))
self.AddStory(top_pages.YoutubePage(self, shared_desktop_state))
self.AddStory(top_pages.BlogspotPage(self, shared_desktop_state))
self.AddStory(top_pages.WordpressPage(self, shared_desktop_state))
self.AddStory(top_pages.FacebookPage(self, shared_desktop_state))
self.AddStory(top_pages.LinkedinPage(self, shared_desktop_state))
self.AddStory(top_pages.WikipediaPage(self, shared_desktop_state))
self.AddStory(top_pages.TwitterPage(self, shared_desktop_state))
self.AddStory(top_pages.PinterestPage(self, shared_desktop_state))
self.AddStory(top_pages.ESPNPage(self, shared_desktop_state))
self.AddStory(top_pages.WeatherPage(self, shared_desktop_state))
self.AddStory(top_pages.YahooGamesPage(self, shared_desktop_state))
other_urls = [
# Why: #1 news worldwide (Alexa global)
'http://news.yahoo.com',
# Why: #2 news worldwide
# crbug.com/528472
#'http://www.cnn.com',
# Why: #1 world commerce website by visits; #3 commerce in the US by
# time spent
'http://www.amazon.com',
# Why: #1 commerce website by time spent by users in US
'http://www.ebay.com',
# Why: #1 Alexa recreation
'http://booking.com',
# Why: #1 Alexa reference
'http://answers.yahoo.com',
# Why: #1 Alexa sports
'http://sports.yahoo.com/',
# Why: top tech blog
'http://techcrunch.com'
]
for url in other_urls:
self.AddStory(
page.Page(url, self, shared_page_state_class=shared_desktop_state))
| mit |
Icenowy/MissionPlanner | Lib/encodings/aliases.py | 84 | 15375 | """ Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {
# Please keep this list sorted alphabetically by value !
# ascii codec
'646' : 'ascii',
'ansi_x3.4_1968' : 'ascii',
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
'ansi_x3.4_1986' : 'ascii',
'cp367' : 'ascii',
'csascii' : 'ascii',
'ibm367' : 'ascii',
'iso646_us' : 'ascii',
'iso_646.irv_1991' : 'ascii',
'iso_ir_6' : 'ascii',
'us' : 'ascii',
'us_ascii' : 'ascii',
# base64_codec codec
'base64' : 'base64_codec',
'base_64' : 'base64_codec',
# big5 codec
'big5_tw' : 'big5',
'csbig5' : 'big5',
# big5hkscs codec
'big5_hkscs' : 'big5hkscs',
'hkscs' : 'big5hkscs',
# bz2_codec codec
'bz2' : 'bz2_codec',
# cp037 codec
'037' : 'cp037',
'csibm037' : 'cp037',
'ebcdic_cp_ca' : 'cp037',
'ebcdic_cp_nl' : 'cp037',
'ebcdic_cp_us' : 'cp037',
'ebcdic_cp_wt' : 'cp037',
'ibm037' : 'cp037',
'ibm039' : 'cp037',
# cp1026 codec
'1026' : 'cp1026',
'csibm1026' : 'cp1026',
'ibm1026' : 'cp1026',
# cp1140 codec
'1140' : 'cp1140',
'ibm1140' : 'cp1140',
# cp1250 codec
'1250' : 'cp1250',
'windows_1250' : 'cp1250',
# cp1251 codec
'1251' : 'cp1251',
'windows_1251' : 'cp1251',
# cp1252 codec
'1252' : 'cp1252',
'windows_1252' : 'cp1252',
# cp1253 codec
'1253' : 'cp1253',
'windows_1253' : 'cp1253',
# cp1254 codec
'1254' : 'cp1254',
'windows_1254' : 'cp1254',
# cp1255 codec
'1255' : 'cp1255',
'windows_1255' : 'cp1255',
# cp1256 codec
'1256' : 'cp1256',
'windows_1256' : 'cp1256',
# cp1257 codec
'1257' : 'cp1257',
'windows_1257' : 'cp1257',
# cp1258 codec
'1258' : 'cp1258',
'windows_1258' : 'cp1258',
# cp424 codec
'424' : 'cp424',
'csibm424' : 'cp424',
'ebcdic_cp_he' : 'cp424',
'ibm424' : 'cp424',
# cp437 codec
'437' : 'cp437',
'cspc8codepage437' : 'cp437',
'ibm437' : 'cp437',
# cp500 codec
'500' : 'cp500',
'csibm500' : 'cp500',
'ebcdic_cp_be' : 'cp500',
'ebcdic_cp_ch' : 'cp500',
'ibm500' : 'cp500',
# cp775 codec
'775' : 'cp775',
'cspc775baltic' : 'cp775',
'ibm775' : 'cp775',
# cp850 codec
'850' : 'cp850',
'cspc850multilingual' : 'cp850',
'ibm850' : 'cp850',
# cp852 codec
'852' : 'cp852',
'cspcp852' : 'cp852',
'ibm852' : 'cp852',
# cp855 codec
'855' : 'cp855',
'csibm855' : 'cp855',
'ibm855' : 'cp855',
# cp857 codec
'857' : 'cp857',
'csibm857' : 'cp857',
'ibm857' : 'cp857',
# cp858 codec
'858' : 'cp858',
'csibm858' : 'cp858',
'ibm858' : 'cp858',
# cp860 codec
'860' : 'cp860',
'csibm860' : 'cp860',
'ibm860' : 'cp860',
# cp861 codec
'861' : 'cp861',
'cp_is' : 'cp861',
'csibm861' : 'cp861',
'ibm861' : 'cp861',
# cp862 codec
'862' : 'cp862',
'cspc862latinhebrew' : 'cp862',
'ibm862' : 'cp862',
# cp863 codec
'863' : 'cp863',
'csibm863' : 'cp863',
'ibm863' : 'cp863',
# cp864 codec
'864' : 'cp864',
'csibm864' : 'cp864',
'ibm864' : 'cp864',
# cp865 codec
'865' : 'cp865',
'csibm865' : 'cp865',
'ibm865' : 'cp865',
# cp866 codec
'866' : 'cp866',
'csibm866' : 'cp866',
'ibm866' : 'cp866',
# cp869 codec
'869' : 'cp869',
'cp_gr' : 'cp869',
'csibm869' : 'cp869',
'ibm869' : 'cp869',
# cp932 codec
'932' : 'cp932',
'ms932' : 'cp932',
'mskanji' : 'cp932',
'ms_kanji' : 'cp932',
# cp949 codec
'949' : 'cp949',
'ms949' : 'cp949',
'uhc' : 'cp949',
# cp950 codec
'950' : 'cp950',
'ms950' : 'cp950',
# euc_jis_2004 codec
'jisx0213' : 'euc_jis_2004',
'eucjis2004' : 'euc_jis_2004',
'euc_jis2004' : 'euc_jis_2004',
# euc_jisx0213 codec
'eucjisx0213' : 'euc_jisx0213',
# euc_jp codec
'eucjp' : 'euc_jp',
'ujis' : 'euc_jp',
'u_jis' : 'euc_jp',
# euc_kr codec
'euckr' : 'euc_kr',
'korean' : 'euc_kr',
'ksc5601' : 'euc_kr',
'ks_c_5601' : 'euc_kr',
'ks_c_5601_1987' : 'euc_kr',
'ksx1001' : 'euc_kr',
'ks_x_1001' : 'euc_kr',
# gb18030 codec
'gb18030_2000' : 'gb18030',
# gb2312 codec
'chinese' : 'gb2312',
'csiso58gb231280' : 'gb2312',
'euc_cn' : 'gb2312',
'euccn' : 'gb2312',
'eucgb2312_cn' : 'gb2312',
'gb2312_1980' : 'gb2312',
'gb2312_80' : 'gb2312',
'iso_ir_58' : 'gb2312',
# gbk codec
'936' : 'gbk',
'cp936' : 'gbk',
'ms936' : 'gbk',
# hex_codec codec
'hex' : 'hex_codec',
# hp_roman8 codec
'roman8' : 'hp_roman8',
'r8' : 'hp_roman8',
'csHPRoman8' : 'hp_roman8',
# hz codec
'hzgb' : 'hz',
'hz_gb' : 'hz',
'hz_gb_2312' : 'hz',
# iso2022_jp codec
'csiso2022jp' : 'iso2022_jp',
'iso2022jp' : 'iso2022_jp',
'iso_2022_jp' : 'iso2022_jp',
# iso2022_jp_1 codec
'iso2022jp_1' : 'iso2022_jp_1',
'iso_2022_jp_1' : 'iso2022_jp_1',
# iso2022_jp_2 codec
'iso2022jp_2' : 'iso2022_jp_2',
'iso_2022_jp_2' : 'iso2022_jp_2',
# iso2022_jp_2004 codec
'iso_2022_jp_2004' : 'iso2022_jp_2004',
'iso2022jp_2004' : 'iso2022_jp_2004',
# iso2022_jp_3 codec
'iso2022jp_3' : 'iso2022_jp_3',
'iso_2022_jp_3' : 'iso2022_jp_3',
# iso2022_jp_ext codec
'iso2022jp_ext' : 'iso2022_jp_ext',
'iso_2022_jp_ext' : 'iso2022_jp_ext',
# iso2022_kr codec
'csiso2022kr' : 'iso2022_kr',
'iso2022kr' : 'iso2022_kr',
'iso_2022_kr' : 'iso2022_kr',
# iso8859_10 codec
'csisolatin6' : 'iso8859_10',
'iso_8859_10' : 'iso8859_10',
'iso_8859_10_1992' : 'iso8859_10',
'iso_ir_157' : 'iso8859_10',
'l6' : 'iso8859_10',
'latin6' : 'iso8859_10',
# iso8859_11 codec
'thai' : 'iso8859_11',
'iso_8859_11' : 'iso8859_11',
'iso_8859_11_2001' : 'iso8859_11',
# iso8859_13 codec
'iso_8859_13' : 'iso8859_13',
'l7' : 'iso8859_13',
'latin7' : 'iso8859_13',
# iso8859_14 codec
'iso_8859_14' : 'iso8859_14',
'iso_8859_14_1998' : 'iso8859_14',
'iso_celtic' : 'iso8859_14',
'iso_ir_199' : 'iso8859_14',
'l8' : 'iso8859_14',
'latin8' : 'iso8859_14',
# iso8859_15 codec
'iso_8859_15' : 'iso8859_15',
'l9' : 'iso8859_15',
'latin9' : 'iso8859_15',
# iso8859_16 codec
'iso_8859_16' : 'iso8859_16',
'iso_8859_16_2001' : 'iso8859_16',
'iso_ir_226' : 'iso8859_16',
'l10' : 'iso8859_16',
'latin10' : 'iso8859_16',
# iso8859_2 codec
'csisolatin2' : 'iso8859_2',
'iso_8859_2' : 'iso8859_2',
'iso_8859_2_1987' : 'iso8859_2',
'iso_ir_101' : 'iso8859_2',
'l2' : 'iso8859_2',
'latin2' : 'iso8859_2',
# iso8859_3 codec
'csisolatin3' : 'iso8859_3',
'iso_8859_3' : 'iso8859_3',
'iso_8859_3_1988' : 'iso8859_3',
'iso_ir_109' : 'iso8859_3',
'l3' : 'iso8859_3',
'latin3' : 'iso8859_3',
# iso8859_4 codec
'csisolatin4' : 'iso8859_4',
'iso_8859_4' : 'iso8859_4',
'iso_8859_4_1988' : 'iso8859_4',
'iso_ir_110' : 'iso8859_4',
'l4' : 'iso8859_4',
'latin4' : 'iso8859_4',
# iso8859_5 codec
'csisolatincyrillic' : 'iso8859_5',
'cyrillic' : 'iso8859_5',
'iso_8859_5' : 'iso8859_5',
'iso_8859_5_1988' : 'iso8859_5',
'iso_ir_144' : 'iso8859_5',
# iso8859_6 codec
'arabic' : 'iso8859_6',
'asmo_708' : 'iso8859_6',
'csisolatinarabic' : 'iso8859_6',
'ecma_114' : 'iso8859_6',
'iso_8859_6' : 'iso8859_6',
'iso_8859_6_1987' : 'iso8859_6',
'iso_ir_127' : 'iso8859_6',
# iso8859_7 codec
'csisolatingreek' : 'iso8859_7',
'ecma_118' : 'iso8859_7',
'elot_928' : 'iso8859_7',
'greek' : 'iso8859_7',
'greek8' : 'iso8859_7',
'iso_8859_7' : 'iso8859_7',
'iso_8859_7_1987' : 'iso8859_7',
'iso_ir_126' : 'iso8859_7',
# iso8859_8 codec
'csisolatinhebrew' : 'iso8859_8',
'hebrew' : 'iso8859_8',
'iso_8859_8' : 'iso8859_8',
'iso_8859_8_1988' : 'iso8859_8',
'iso_ir_138' : 'iso8859_8',
# iso8859_9 codec
'csisolatin5' : 'iso8859_9',
'iso_8859_9' : 'iso8859_9',
'iso_8859_9_1989' : 'iso8859_9',
'iso_ir_148' : 'iso8859_9',
'l5' : 'iso8859_9',
'latin5' : 'iso8859_9',
# johab codec
'cp1361' : 'johab',
'ms1361' : 'johab',
# koi8_r codec
'cskoi8r' : 'koi8_r',
# latin_1 codec
#
# Note that the latin_1 codec is implemented internally in C and a
# lot faster than the charmap codec iso8859_1 which uses the same
# encoding. This is why we discourage the use of the iso8859_1
# codec and alias it to latin_1 instead.
#
'8859' : 'latin_1',
'cp819' : 'latin_1',
'csisolatin1' : 'latin_1',
'ibm819' : 'latin_1',
'iso8859' : 'latin_1',
'iso8859_1' : 'latin_1',
'iso_8859_1' : 'latin_1',
'iso_8859_1_1987' : 'latin_1',
'iso_ir_100' : 'latin_1',
'l1' : 'latin_1',
'latin' : 'latin_1',
'latin1' : 'latin_1',
# mac_cyrillic codec
'maccyrillic' : 'mac_cyrillic',
# mac_greek codec
'macgreek' : 'mac_greek',
# mac_iceland codec
'maciceland' : 'mac_iceland',
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
'macroman' : 'mac_roman',
# mac_turkish codec
'macturkish' : 'mac_turkish',
# mbcs codec
'dbcs' : 'mbcs',
# ptcp154 codec
'csptcp154' : 'ptcp154',
'pt154' : 'ptcp154',
'cp154' : 'ptcp154',
'cyrillic_asian' : 'ptcp154',
# quopri_codec codec
'quopri' : 'quopri_codec',
'quoted_printable' : 'quopri_codec',
'quotedprintable' : 'quopri_codec',
# rot_13 codec
'rot13' : 'rot_13',
# shift_jis codec
'csshiftjis' : 'shift_jis',
'shiftjis' : 'shift_jis',
'sjis' : 'shift_jis',
's_jis' : 'shift_jis',
# shift_jis_2004 codec
'shiftjis2004' : 'shift_jis_2004',
'sjis_2004' : 'shift_jis_2004',
's_jis_2004' : 'shift_jis_2004',
# shift_jisx0213 codec
'shiftjisx0213' : 'shift_jisx0213',
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
# tactis codec
'tis260' : 'tactis',
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
'tis_620_2529_0' : 'tis_620',
'tis_620_2529_1' : 'tis_620',
'iso_ir_166' : 'tis_620',
# utf_16 codec
'u16' : 'utf_16',
'utf16' : 'utf_16',
# utf_16_be codec
'unicodebigunmarked' : 'utf_16_be',
'utf_16be' : 'utf_16_be',
# utf_16_le codec
'unicodelittleunmarked' : 'utf_16_le',
'utf_16le' : 'utf_16_le',
# utf_32 codec
'u32' : 'utf_32',
'utf32' : 'utf_32',
# utf_32_be codec
'utf_32be' : 'utf_32_be',
# utf_32_le codec
'utf_32le' : 'utf_32_le',
# utf_7 codec
'u7' : 'utf_7',
'utf7' : 'utf_7',
'unicode_1_1_utf_7' : 'utf_7',
# utf_8 codec
'u8' : 'utf_8',
'utf' : 'utf_8',
'utf8' : 'utf_8',
'utf8_ucs2' : 'utf_8',
'utf8_ucs4' : 'utf_8',
# uu_codec codec
'uu' : 'uu_codec',
# zlib_codec codec
'zip' : 'zlib_codec',
'zlib' : 'zlib_codec',
}
| gpl-3.0 |
asadziach/tensorflow | tensorflow/python/kernel_tests/sparse_ops_test.py | 35 | 34194 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
[1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
(1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with self.test_session(use_gpu=False) as sess:
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
class SparseMergeHighDimTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices0 = np.array([0, 13, 10, 33, 32, 14])
indices1 = np.array([12, 4, 0, 0, 1, 30])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices0 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices0, indices_dtype), np.array(shape, np.int64))
indices1 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices1, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return ([sparse_tensor.SparseTensor.from_value(indices0),
sparse_tensor.SparseTensor.from_value(indices1)],
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
output.indices,
[[0, 0, 12], [1, 10, 0], [1, 13, 4], [1, 14, 30], [2, 32, 1],
[2, 33, 0]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3] + vocab_size)
def testInt64AndFloat32(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self):
vocab_size = [50, 30]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.dense_shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.dense_shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseResetShapeTest(test_util.TensorFlowTestCase):
_IND_2_5_6 = np.array(
[[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testFeedInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testTightBoundingBox(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testInvalidRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7], dtype=np.int64)
with self.assertRaises(ValueError):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidRankNewShapeUnavailableInGraphConstruction(self):
with self.test_session(use_gpu=False) as sess:
new_shape = array_ops.placeholder(dtype=dtypes.int64)
sp_input = self._SparseTensor_2x5x6()
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x == y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: np.array([3, 7], dtype=np.int64)})
def testInvalidDimensionSize(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out)
def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
with self.test_session(use_gpu=False) as sess:
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
class SparseReduceSumTest(test_util.TensorFlowTestCase):
# [[1, ?, 1]
# [?, 1, ?]]
# where ? is implictly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
dense_shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims):
densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
np_ans = densified
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
keep_dims)
out_dense = tf_dense_ans.eval()
tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t, reduction_axes,
keep_dims)
# Convert to dense for comparison purposes.
out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()
self.assertAllClose(np_ans, out_dense)
self.assertAllClose(np_ans, out_sparse)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False)
self._compare(sp_t, reduction_axes, ndims, True)
def testSimpleAndRandomInputs(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_sum(sp_t, 2).eval()
def testGradient(self):
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.dense_shape.eval(),
result_tensor.dense_shape.eval())
res_densified = sparse_ops.sparse_to_dense(result_tensor.indices,
result_tensor.dense_shape,
result_tensor.values).eval()
self.assertAllEqual(result_np, res_densified)
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.eval().dtype, np.float64)
def testCwiseAdd(self):
with self.test_session(use_gpu=False):
# Identity(2) + AllOnes(2,2). Should be equal to 2 * Identity(2).
indices = [[0, 0], [1, 1]]
vals = [1, 1]
shape = (2, 2)
sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
dense_t = array_ops.ones(shape, dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
dense_t = array_ops.ones([1], dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
class SparseSoftmaxTest(test_util.TensorFlowTestCase):
def testEquivalentToDensified(self):
np.random.seed(1618)
n, m = np.random.choice(20, size=2)
for dtype in [np.float32, np.float64]:
sp_vals_np = np.random.rand(n, m).astype(dtype)
batched_sp_t, unused_nnz1 = _sparsify(
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with self.test_session(use_gpu=False):
densified = constant_op.constant(sp_vals_np)
sp_result = sparse_ops.sparse_softmax(batched_sp_t).eval(
).values.reshape((n, m))
dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result.eval(), sp_result)
def testHigherRanks(self):
# For the first shape:
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
#
# The softmax results should be:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
#
# The second shape: same input data, but with a higher-rank shape.
shapes = [[2, 2, 2], [2, 1, 2, 2]]
for shape in shapes:
values = np.asarray(
[0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
expected_values = [1., 1., 1., .5, .5]
with self.test_session(use_gpu=False):
result = sparse_ops.sparse_softmax(sp_t).eval()
self.assertAllEqual(expected_values, result.values)
self.assertAllEqual(sp_t.indices.eval(), result.indices)
self.assertAllEqual(shape, result.dense_shape)
def testGradient(self):
x_shape = [2, 5, 10]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
y_tf = sparse_ops.sparse_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testBasic(self):
with self.test_session(use_gpu=False):
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one).eval()
self._assertSparseTensorValueEqual(sp_one.eval(), max_tf)
self._assertSparseTensorValueEqual(sp_zero.eval(), min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2).eval()
self._assertSparseTensorValueEqual(expected.eval(), max_tf)
self._assertSparseTensorValueEqual(expected.eval(), min_tf)
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.test_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with self.test_session(use_gpu=False):
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
class SparseTransposeTest(test.TestCase):
def testTranspose(self):
with self.test_session(use_gpu=False):
np.random.seed(1618)
shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
rank = array_ops.rank(dn_input).eval()
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans).eval()
expected_trans = array_ops.transpose(dn_input, perm=perm).eval()
self.assertAllEqual(dn_trans, expected_trans)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
domob1812/namecore | test/util/bitcoin-util-test.py | 59 | 6594 | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for bitcoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, bitcoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| mit |
yejingxin/kaggle-ndsb | configurations/bagging_00_convroll4_big_wd_maxout512.py | 6 | 5478 | import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import dihedral_fast
import tmp_dnn
import tta
validation_split_path = "splits/bagging_split_0.pkl"
patch_size = (95, 95)
augmentation_params = {
'zoom_range': (1 / 1.6, 1.6),
'rotation_range': (0, 360),
'shear_range': (-20, 20),
'translation_range': (-10, 10),
'do_flip': True,
'allow_stretch': 1.3,
}
batch_size = 128 // 4
chunk_size = 32768 // 4
num_chunks_train = 840
momentum = 0.9
learning_rate_schedule = {
0: 0.003,
700: 0.0003,
800: 0.00003,
}
validate_every = 20
save_every = 20
def estimate_scale(img):
return np.maximum(img.shape[0], img.shape[1]) / 85.0
# augmentation_transforms_test = []
# for flip in [True, False]:
# for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
# for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
# tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
# augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(70, **{
'zoom_range': (1 / 1.4, 1.4),
'rotation_range': (0, 360),
'shear_range': (-10, 10),
'translation_range': (-8, 8),
'do_flip': True,
'allow_stretch': 1.2,
})
data_loader = load.ZmuvRescaledDataLoader(estimate_scale=estimate_scale, num_chunks_train=num_chunks_train,
patch_size=patch_size, chunk_size=chunk_size, augmentation_params=augmentation_params,
augmentation_transforms_test=augmentation_transforms_test, validation_split_path=validation_split_path)
# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
# MaxPool2DLayer = nn.layers.cuda_convnet.MaxPool2DCCLayer
Conv2DLayer = tmp_dnn.Conv2DDNNLayer
MaxPool2DLayer = tmp_dnn.MaxPool2DDNNLayer
def build_model():
l0 = nn.layers.InputLayer((batch_size, 1, patch_size[0], patch_size[1]))
l0c = dihedral.CyclicSliceLayer(l0)
l1a = Conv2DLayer(l0c, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1b = Conv2DLayer(l1a, num_filters=16, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1 = MaxPool2DLayer(l1b, ds=(3, 3), strides=(2, 2))
l1r = dihedral_fast.CyclicConvRollLayer(l1)
l2a = Conv2DLayer(l1r, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2b = Conv2DLayer(l2a, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2 = MaxPool2DLayer(l2b, ds=(3, 3), strides=(2, 2))
l2r = dihedral_fast.CyclicConvRollLayer(l2)
l3a = Conv2DLayer(l2r, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3b = Conv2DLayer(l3a, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3c = Conv2DLayer(l3b, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3 = MaxPool2DLayer(l3c, ds=(3, 3), strides=(2, 2))
l3r = dihedral_fast.CyclicConvRollLayer(l3)
l4a = Conv2DLayer(l3r, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4b = Conv2DLayer(l4a, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4c = Conv2DLayer(l4b, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4 = MaxPool2DLayer(l4c, ds=(3, 3), strides=(2, 2))
l4r = dihedral_fast.CyclicConvRollLayer(l4)
l4f = nn.layers.flatten(l4r)
l5 = nn.layers.DenseLayer(nn.layers.dropout(l4f, p=0.5), num_units=1024, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=None)
l5fp = nn.layers.FeaturePoolLayer(l5, ds=2)
l5r = dihedral_fast.CyclicRollLayer(l5fp)
l6 = nn.layers.DenseLayer(nn.layers.dropout(l5r, p=0.5), num_units=1024, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=None)
l6fp = nn.layers.FeaturePoolLayer(l6, ds=2)
l6m = dihedral.CyclicPoolLayer(l6fp, pool_function=nn_plankton.rms)
l7 = nn.layers.DenseLayer(nn.layers.dropout(l6m, p=0.5), num_units=data.num_classes, nonlinearity=T.nnet.softmax, W=nn_plankton.Orthogonal(1.0))
return [l0], l7
def build_objective(l_ins, l_out):
lambda_reg = 0.0005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
| mit |
scriptmediala/mitmproxy | libmproxy/console/pathedit.py | 33 | 2145 | import glob
import os.path
import urwid
class _PathCompleter:
def __init__(self, _testing=False):
"""
_testing: disables reloading of the lookup table to make testing
possible.
"""
self.lookup, self.offset = None, None
self.final = None
self._testing = _testing
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""
Returns the next completion for txt, or None if there is no
completion.
"""
path = os.path.expanduser(txt)
if not self.lookup:
if not self._testing:
# Lookup is a set of (display value, actual value) tuples.
self.lookup = []
if os.path.isdir(path):
files = glob.glob(os.path.join(path, "*"))
prefix = txt
else:
files = glob.glob(path + "*")
prefix = os.path.dirname(txt)
prefix = prefix or "./"
for f in files:
display = os.path.join(prefix, os.path.basename(f))
if os.path.isdir(f):
display += "/"
self.lookup.append((display, f))
if not self.lookup:
self.final = path
return path
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.final = ret[1]
return ret[0]
class PathEdit(urwid.Edit, _PathCompleter):
def __init__(self, *args, **kwargs):
urwid.Edit.__init__(self, *args, **kwargs)
_PathCompleter.__init__(self)
def keypress(self, size, key):
if key == "tab":
comp = self.complete(self.get_edit_text())
self.set_edit_text(comp)
self.set_edit_pos(len(comp))
else:
self.reset()
return urwid.Edit.keypress(self, size, key)
| mit |
mjrulesamrat/xbmcbackup | resources/lib/dropbox/client.py | 12 | 40015 | """
The main client API you'll be working with most often. You'll need to
configure a dropbox.session.DropboxSession for this to work, but otherwise
it's fairly self-explanatory.
Before you can begin making requests to the dropbox API, you have to
authenticate your application with Dropbox and get the user to
authorize your application to use dropbox on his behalf. A typical
progam, from the initial imports to making a simple request (``account_info``),
looks like this:
.. code-block:: python
# Include the Dropbox SDK libraries
from dropbox import client, rest, session
# Get your app key and secret from the Dropbox developer website
APP_KEY = 'INSERT_APP_KEY_HERE'
APP_SECRET = 'INSERT_SECRET_HERE'
# ACCESS_TYPE should be 'dropbox' or 'app_folder' as configured for your app
ACCESS_TYPE = 'INSERT_ACCESS_TYPE_HERE'
sess = session.DropboxSession(APP_KEY, APP_SECRET, ACCESS_TYPE)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
# Make the user sign in and authorize this token
print "url:", url
print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
raw_input()
# This will fail if the user didn't visit the above URL and hit 'Allow'
access_token = sess.obtain_access_token(request_token)
client = client.DropboxClient(sess)
print "linked account:", client.account_info()
"""
from __future__ import absolute_import
import re
import os
from StringIO import StringIO
try:
import json
except ImportError:
import simplejson as json
from .rest import ErrorResponse, RESTClient
def format_path(path):
"""Normalize path for use with the Dropbox API.
This function turns multiple adjacent slashes into single
slashes, then ensures that there's a leading slash but
not a trailing slash.
"""
if not path:
return path
path = re.sub(r'/+', '/', path)
if path == '/':
return (u"" if isinstance(path, unicode) else "")
else:
return '/' + path.strip('/')
class DropboxClient(object):
"""
The main access point of doing REST calls on Dropbox. You should
first create and configure a dropbox.session.DropboxSession object,
and then pass it into DropboxClient's constructor. DropboxClient
then does all the work of properly calling each API method
with the correct OAuth authentication.
You should be aware that any of these methods can raise a
rest.ErrorResponse exception if the server returns a non-200
or invalid HTTP response. Note that a 401 return status at any
point indicates that the user needs to be reauthenticated.
"""
def __init__(self, session, rest_client=RESTClient):
"""Initialize the DropboxClient object.
Args:
``session``: A dropbox.session.DropboxSession object to use for making requests.
``rest_client``: A dropbox.rest.RESTClient-like object to use for making requests. [optional]
"""
self.session = session
self.rest_client = rest_client
def request(self, target, params=None, method='POST', content_server=False):
"""Make an HTTP request to a target API method.
This is an internal method used to properly craft the url, headers, and
params for a Dropbox API request. It is exposed for you in case you
need craft other API calls not in this library or if you want to debug it.
Args:
- ``target``: The target URL with leading slash (e.g. '/files')
- ``params``: A dictionary of parameters to add to the request
- ``method``: An HTTP method (e.g. 'GET' or 'POST')
- ``content_server``: A boolean indicating whether the request is to the
API content server, for example to fetch the contents of a file
rather than its metadata.
Returns:
- A tuple of (url, params, headers) that should be used to make the request.
OAuth authentication information will be added as needed within these fields.
"""
assert method in ['GET','POST', 'PUT'], "Only 'GET', 'POST', and 'PUT' are allowed."
if params is None:
params = {}
host = self.session.API_CONTENT_HOST if content_server else self.session.API_HOST
base = self.session.build_url(host, target)
headers, params = self.session.build_access_headers(method, base, params)
if method in ('GET', 'PUT'):
url = self.session.build_url(host, target, params)
else:
url = self.session.build_url(host, target)
return url, params, headers
def account_info(self):
"""Retrieve information about the user's account.
Returns:
- A dictionary containing account information.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#account-info
"""
url, params, headers = self.request("/account/info", method='GET')
return self.rest_client.GET(url, headers)
def get_chunked_uploader(self, file_obj, length):
"""Creates a ChunkedUploader to upload the given file-like object.
Args:
- ``file_obj``: The file-like object which is the source of the data
being uploaded.
- ``length``: The number of bytes to upload.
The expected use of this function is as follows:
.. code-block:: python
bigFile = open("data.txt", 'rb')
uploader = myclient.get_chunked_uploader(bigFile, size)
print "uploading: ", size
while uploader.offset < size:
try:
upload = uploader.upload_chunked()
except rest.ErrorResponse, e:
# perform error handling and retry logic
The SDK leaves the error handling and retry logic to the developer
to implement, as the exact requirements will depend on the application
involved.
"""
return DropboxClient.ChunkedUploader(self, file_obj, length)
class ChunkedUploader(object):
"""Contains the logic around a chunked upload, which uploads a
large file to Dropbox via the /chunked_upload endpoint
"""
def __init__(self, client, file_obj, length):
self.client = client
self.offset = 0
self.upload_id = None
self.last_block = None
self.file_obj = file_obj
self.target_length = length
def upload_chunked(self, chunk_size = 4 * 1024 * 1024):
"""Uploads data from this ChunkedUploader's file_obj in chunks, until
an error occurs. Throws an exception when an error occurs, and can
be called again to resume the upload.
Args:
- ``chunk_size``: The number of bytes to put in each chunk. [default 4 MB]
"""
while self.offset < self.target_length:
next_chunk_size = min(chunk_size, self.target_length - self.offset)
if self.last_block == None:
self.last_block = self.file_obj.read(next_chunk_size)
try:
(self.offset, self.upload_id) = self.client.upload_chunk(StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)
self.last_block = None
except ErrorResponse, e:
reply = e.body
if "offset" in reply and reply['offset'] != 0:
if reply['offset'] > self.offset:
self.last_block = None
self.offset = reply['offset']
def finish(self, path, overwrite=False, parent_rev=None):
"""Commits the bytes uploaded by this ChunkedUploader to a file
in the users dropbox.
Args:
- ``path``: The full path of the file in the Dropbox.
- ``overwrite``: Whether to overwrite an existing file at the given path. [default False]
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
- ``parent_rev``: The rev field from the 'parent' of this upload. [optional]
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most-recent parent_rev,
and it will never be overwritten if you send a less-recent one.
"""
path = "/commit_chunked_upload/%s%s" % (self.client.session.root, format_path(path))
params = dict(
overwrite = bool(overwrite),
upload_id = self.upload_id
)
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.client.request(path, params, content_server=True)
return self.client.rest_client.POST(url, params, headers)
def upload_chunk(self, file_obj, length, offset=0, upload_id=None):
"""Uploads a single chunk of data from the given file like object. The majority of users
should use the ChunkedUploader object, which provides a simpler interface to the
chunked_upload API endpoint.
Args:
- ``file_obj``: The source of the data to upload
- ``length``: The number of bytes to upload in one chunk.
Returns:
- The reply from the server, as a dictionary
"""
params = dict()
if upload_id:
params['upload_id'] = upload_id
params['offset'] = offset
url, ignored_params, headers = self.request("/chunked_upload", params, method='PUT', content_server=True)
try:
reply = self.rest_client.PUT(url, file_obj, headers)
return reply['offset'], reply['upload_id']
except ErrorResponse, e:
raise e
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
"""Upload a file.
A typical use case would be as follows:
.. code-block:: python
f = open('working-draft.txt')
response = client.put_file('/magnum-opus.txt', f)
print "uploaded:", response
which would return the metadata of the uploaded file, similar to:
.. code-block:: python
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
Args:
- ``full_path``: The full path to upload the file to, *including the file name*.
If the destination directory does not yet exist, it will be created.
- ``file_obj``: A file-like object to upload. If you would like, you can pass a string as file_obj.
- ``overwrite``: Whether to overwrite an existing file at the given path. [default False]
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
- ``parent_rev``: The rev field from the 'parent' of this upload. [optional]
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most-recent parent_rev,
and it will never be overwritten if you send a less-recent one.
Returns:
- A dictionary containing the metadata of the newly uploaded file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#files-put
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 503: User over quota
Note: In Python versions below version 2.6, httplib doesn't handle file-like objects.
In that case, this code will read the entire file into memory (!).
"""
path = "/files_put/%s%s" % (self.session.root, format_path(full_path))
params = {
'overwrite': bool(overwrite),
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request(path, params, method='PUT', content_server=True)
return self.rest_client.PUT(url, file_obj, headers)
def get_file(self, from_path, rev=None):
"""Download a file.
Unlike most other calls, get_file returns a raw HTTPResponse with the connection open.
You should call .read() and perform any processing you need, then close the HTTPResponse.
A typical usage looks like this:
.. code-block:: python
out = open('magnum-opus.txt', 'w')
f, metadata = client.get_file_and_metadata('/magnum-opus.txt').read()
out.write(f)
which would download the file ``magnum-opus.txt`` and write the contents into
the file ``magnum-opus.txt`` on the local filesystem.
Args:
- ``from_path``: The path to the file to be downloaded.
- ``rev``: A previous rev value of the file to be downloaded. [optional]
Returns:
- An httplib.HTTPResponse that is the result of the request.
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
path = "/files/%s%s" % (self.session.root, format_path(from_path))
params = {}
if rev is not None:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def get_file_and_metadata(self, from_path, rev=None):
"""Download a file alongwith its metadata.
Acts as a thin wrapper around get_file() (see get_file() comments for
more details)
Args:
- ``from_path``: The path to the file to be downloaded.
- ``rev``: A previous rev value of the file to be downloaded. [optional]
Returns:
- An httplib.HTTPResponse that is the result of the request.
- A dictionary containing the metadata of the file (see
https://www.dropbox.com/developers/reference/api#metadata for details).
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
file_res = self.get_file(from_path, rev)
metadata = DropboxClient.__parse_metadata_as_dict(file_res)
return file_res, metadata
@staticmethod
def __parse_metadata_as_dict(dropbox_raw_response):
"""Parses file metadata from a raw dropbox HTTP response, raising a
dropbox.rest.ErrorResponse if parsing fails.
"""
metadata = None
for header, header_val in dropbox_raw_response.getheaders():
if header.lower() == 'x-dropbox-metadata':
try:
metadata = json.loads(header_val)
except ValueError:
raise ErrorResponse(dropbox_raw_response)
if not metadata: raise ErrorResponse(dropbox_raw_response)
return metadata
def delta(self, cursor=None):
"""A way of letting you keep up with changes to files and folders in a
user's Dropbox. You can periodically call delta() to get a list of "delta
entries", which are instructions on how to update your local state to
match the server's state.
Arguments:
- ``cursor``: On the first call, omit this argument (or pass in ``None``). On
subsequent calls, pass in the ``cursor`` string returned by the previous
call.
Returns: A dict with three fields.
- ``entries``: A list of "delta entries" (described below)
- ``reset``: If ``True``, you should your local state to be an empty folder
before processing the list of delta entries. This is only ``True`` only
in rare situations.
- ``cursor``: A string that is used to keep track of your current state.
On the next call to delta(), pass in this value to return entries
that were recorded since the cursor was returned.
- ``has_more``: If ``True``, then there are more entries available; you can
call delta() again immediately to retrieve those entries. If ``False``,
then wait at least 5 minutes (preferably longer) before checking again.
Delta Entries: Each entry is a 2-item list of one of following forms:
- [*path*, *metadata*]: Indicates that there is a file/folder at the given
path. You should add the entry to your local path. (The *metadata*
value is the same as what would be returned by the ``metadata()`` call.)
- If the new entry includes parent folders that don't yet exist in your
local state, create those parent folders in your local state. You
will eventually get entries for those parent folders.
- If the new entry is a file, replace whatever your local state has at
*path* with the new entry.
- If the new entry is a folder, check what your local state has at
*path*. If it's a file, replace it with the new entry. If it's a
folder, apply the new *metadata* to the folder, but do not modify
the folder's children.
- [*path*, ``nil``]: Indicates that there is no file/folder at the *path* on
Dropbox. To update your local state to match, delete whatever is at *path*,
including any children (you will sometimes also get "delete" delta entries
for the children, but this is not guaranteed). If your local state doesn't
have anything at *path*, ignore this entry.
Remember: Dropbox treats file names in a case-insensitive but case-preserving
way. To facilitate this, the *path* strings above are lower-cased versions of
the actual path. The *metadata* dicts have the original, case-preserved path.
"""
path = "/delta"
params = {}
if cursor is not None:
params['cursor'] = cursor
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def create_copy_ref(self, from_path):
"""Creates and returns a copy ref for a specific file. The copy ref can be
used to instantly copy that file to the Dropbox of another account.
Args:
- ``path``: The path to the file for a copy ref to be created on.
Returns:
- A dictionary that looks like the following example:
``{"expires":"Fri, 31 Jan 2042 21:01:05 +0000", "copy_ref":"z1X6ATl6aWtzOGq0c3g5Ng"}``
"""
path = "/copy_ref/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {}, method='GET')
return self.rest_client.GET(url, headers)
def add_copy_ref(self, copy_ref, to_path):
"""Adds the file referenced by the copy ref to the specified path
Args:
- ``copy_ref``: A copy ref string that was returned from a create_copy_ref call.
The copy_ref can be created from any other Dropbox account, or from the same account.
- ``path``: The path to where the file will be created.
Returns:
- A dictionary containing the metadata of the new copy of the file.
"""
path = "/fileops/copy"
params = {'from_copy_ref': copy_ref,
'to_path': format_path(to_path),
'root': self.session.root}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def file_copy(self, from_path, to_path):
"""Copy a file or folder to a new location.
Args:
- ``from_path``: The path to the file or folder to be copied.
- ``to_path``: The destination path of the file or folder to be copied.
This parameter should include the destination filename (e.g.
from_path: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path, this copy will be renamed to
be unique.
Returns:
- A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-copy
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path),
}
url, params, headers = self.request("/fileops/copy", params)
return self.rest_client.POST(url, params, headers)
def file_create_folder(self, path):
"""Create a folder.
Args:
- ``path``: The path of the new folder.
Returns:
- A dictionary containing the metadata of the newly created folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-create-folder
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 403: A folder at that path already exists.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/create_folder", params)
return self.rest_client.POST(url, params, headers)
def file_delete(self, path):
"""Delete a file or folder.
Args:
- ``path``: The path of the file or folder.
Returns:
- A dictionary containing the metadata of the just deleted file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-delete
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given path.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/delete", params)
return self.rest_client.POST(url, params, headers)
def file_move(self, from_path, to_path):
"""Move a file or folder to a new location.
Args:
- ``from_path``: The path to the file or folder to be moved.
- ``to_path``: The destination path of the file or folder to be moved.
This parameter should include the destination filename (e.g.
- ``from_path``: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path, this file or folder will be renamed to
be unique.
Returns:
- A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#fileops-move
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root, 'from_path': format_path(from_path), 'to_path': format_path(to_path)}
url, params, headers = self.request("/fileops/move", params)
return self.rest_client.POST(url, params, headers)
def metadata(self, path, list=True, file_limit=25000, hash=None, rev=None, include_deleted=False):
"""Retrieve metadata for a file or folder.
A typical use would be:
.. code-block:: python
folder_metadata = client.metadata('/')
print "metadata:", folder_metadata
which would return the metadata of the root directory. This
will look something like:
.. code-block:: python
{
'bytes': 0,
'contents': [
{
'bytes': 0,
'icon': 'folder',
'is_dir': True,
'modified': 'Thu, 25 Aug 2011 00:03:15 +0000',
'path': '/Sample Folder',
'rev': '803beb471',
'revision': 8,
'root': 'dropbox',
'size': '0 bytes',
'thumb_exists': False
},
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
],
'hash': 'efdac89c4da886a9cece1927e6c22977',
'icon': 'folder',
'is_dir': True,
'path': '/',
'root': 'app_folder',
'size': '0 bytes',
'thumb_exists': False
}
In this example, the root directory contains two things: ``Sample Folder``,
which is a folder, and ``/magnum-opus.txt``, which is a text file 77 bytes long
Args:
- ``path``: The path to the file or folder.
- ``list``: Whether to list all contained files (only applies when
path refers to a folder).
- ``file_limit``: The maximum number of file entries to return within
a folder. If the number of files in the directory exceeds this
limit, an exception is raised. The server will return at max
25,000 files within a folder.
- ``hash``: Every directory listing has a hash parameter attached that
can then be passed back into this function later to save on\
bandwidth. Rather than returning an unchanged folder's contents,\
the server will instead return a 304.\
- ``rev``: The revision of the file to retrieve the metadata for. [optional]
This parameter only applies for files. If omitted, you'll receive
the most recent revision metadata.
Returns:
- A dictionary containing the metadata of the file or folder
(and contained files if appropriate).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#metadata
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 304: Current directory hash matches hash parameters, so contents are unchanged.
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at given path.
- 406: Too many file entries to return.
"""
path = "/metadata/%s%s" % (self.session.root, format_path(path))
params = {'file_limit': file_limit,
'list': 'true',
'include_deleted': include_deleted,
}
if not list:
params['list'] = 'false'
if hash is not None:
params['hash'] = hash
if rev:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def thumbnail(self, from_path, size='large', format='JPEG'):
"""Download a thumbnail for an image.
Unlike most other calls, thumbnail returns a raw HTTPResponse with the connection open.
You should call .read() and perform any processing you need, then close the HTTPResponse.
Args:
- ``from_path``: The path to the file to be thumbnailed.
- ``size``: A string describing the desired thumbnail size.
At this time, 'small', 'medium', and 'large' are
officially supported sizes (32x32, 64x64, and 128x128
respectively), though others may be available. Check
https://www.dropbox.com/developers/reference/api#thumbnails for
more details.
Returns:
- An httplib.HTTPResponse that is the result of the request.
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given from_path, or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
"""
assert format in ['JPEG', 'PNG'], "expected a thumbnail format of 'JPEG' or 'PNG', got %s" % format
path = "/thumbnails/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {'size': size, 'format': format}, method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def thumbnail_and_metadata(self, from_path, size='large', format='JPEG'):
"""Download a thumbnail for an image alongwith its metadata.
Acts as a thin wrapper around thumbnail() (see thumbnail() comments for
more details)
Args:
- ``from_path``: The path to the file to be thumbnailed.
- ``size``: A string describing the desired thumbnail size. See thumbnail()
for details.
Returns:
- An httplib.HTTPResponse that is the result of the request.
- A dictionary containing the metadata of the file whose thumbnail
was downloaded (see https://www.dropbox.com/developers/reference/api#metadata
for details).
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No file was found at the given from_path, or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
- 200: Request was okay but response was malformed in some way.
"""
thumbnail_res = self.thumbnail(from_path, size, format)
metadata = DropboxClient.__parse_metadata_as_dict(thumbnail_res)
return thumbnail_res, metadata
def search(self, path, query, file_limit=1000, include_deleted=False):
"""Search directory for filenames matching query.
Args:
- ``path``: The directory to search within.
- ``query``: The query to search on (minimum 3 characters).
- ``file_limit``: The maximum number of file entries to return within a folder.
The server will return at max 1,000 files.
- ``include_deleted``: Whether to include deleted files in search results.
Returns:
- A list of the metadata of all matching files (up to
file_limit entries). For a detailed description of what
this call returns, visit:
https://www.dropbox.com/developers/reference/api#search
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
"""
path = "/search/%s%s" % (self.session.root, format_path(path))
params = {
'query': query,
'file_limit': file_limit,
'include_deleted': include_deleted,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def revisions(self, path, rev_limit=1000):
"""Retrieve revisions of a file.
Args:
- ``path``: The file to fetch revisions for. Note that revisions
are not available for folders.
- ``rev_limit``: The maximum number of file entries to return within
a folder. The server will return at max 1,000 revisions.
Returns:
- A list of the metadata of all matching files (up to rev_limit entries).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#revisions
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: No revisions were found at the given path.
"""
path = "/revisions/%s%s" % (self.session.root, format_path(path))
params = {
'rev_limit': rev_limit,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def restore(self, path, rev):
"""Restore a file to a previous revision.
Args:
- ``path``: The file to restore. Note that folders can't be restored.
- ``rev``: A previous rev value of the file to be restored to.
Returns:
- A dictionary containing the metadata of the newly restored file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#restore
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given revision.
"""
path = "/restore/%s%s" % (self.session.root, format_path(path))
params = {
'rev': rev,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def media(self, path):
"""Get a temporary unauthenticated URL for a media file.
All of Dropbox's API methods require OAuth, which may cause problems in
situations where an application expects to be able to hit a URL multiple times
(for example, a media player seeking around a video file). This method
creates a time-limited URL that can be accessed without any authentication,
and returns that to you, along with an expiration time.
Args:
- ``path``: The file to return a URL for. Folders are not supported.
Returns:
- A dictionary that looks like the following example:
``{'url': 'https://dl.dropbox.com/0/view/wvxv1fw6on24qw7/file.mov', 'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}``
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#media
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given path.
"""
path = "/media/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
def share(self, path):
"""Create a shareable link to a file or folder.
Shareable links created on Dropbox are time-limited, but don't require any
authentication, so they can be given out freely. The time limit should allow
at least a day of shareability, though users have the ability to disable
a link from their account if they like.
Args:
- ``path``: The file or folder to share.
Returns:
- A dictionary that looks like the following example:
``{'url': 'http://www.dropbox.com/s/m/a2mbDa2', 'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}``
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/reference/api#shares
Raises:
- A dropbox.rest.ErrorResponse with an HTTP status of
- 400: Bad request (may be due to many things; check e.error for details)
- 404: Unable to find the file at the given path.
"""
path = "/shares/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
| mit |
dotcool/zulip | zproject/urls.py | 108 | 15143 | from django.conf import settings
from django.conf.urls import patterns, url, include
from django.views.generic import TemplateView, RedirectView
import os.path
import zerver.forms
# NB: There are several other pieces of code which route requests by URL:
#
# - runtornado.py has its own URL list for Tornado views. See the
# invocation of web.Application in that file.
#
# - The Nginx config knows which URLs to route to Django or Tornado.
#
# - Likewise for the local dev server in tools/run-dev.py.
urlpatterns = patterns('',
url(r'^$', 'zerver.views.home'),
# We have a desktop-specific landing page in case we change our / to not log in in the future. We don't
# want to require a new desktop app build for everyone in that case
url(r'^desktop_home/$', 'zerver.views.desktop_home'),
url(r'^accounts/login/sso/$', 'zerver.views.remote_user_sso', name='login-sso'),
url(r'^accounts/login/jwt/$', 'zerver.views.remote_user_jwt', name='login-jwt'),
url(r'^accounts/login/google/$', 'zerver.views.start_google_oauth2'),
url(r'^accounts/login/google/done/$', 'zerver.views.finish_google_oauth2'),
url(r'^accounts/login/local/$', 'zerver.views.dev_direct_login'),
# We have two entries for accounts/login to allow reverses on the Django
# view we're wrapping to continue to function.
url(r'^accounts/login/', 'zerver.views.login_page', {'template_name': 'zerver/login.html'}),
url(r'^accounts/login/', 'django.contrib.auth.views.login', {'template_name': 'zerver/login.html'}),
url(r'^accounts/logout/', 'zerver.views.logout_then_login'),
url(r'^accounts/webathena_kerberos_login/', 'zerver.views.webathena_kerberos_login'),
url(r'^accounts/password/reset/$', 'django.contrib.auth.views.password_reset',
{'post_reset_redirect' : '/accounts/password/reset/done/',
'template_name': 'zerver/reset.html',
'email_template_name': 'registration/password_reset_email.txt',
}),
url(r'^accounts/password/reset/done/$', 'django.contrib.auth.views.password_reset_done',
{'template_name': 'zerver/reset_emailed.html'}),
url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm',
{'post_reset_redirect' : '/accounts/password/done/',
'template_name': 'zerver/reset_confirm.html',
'set_password_form' : zerver.forms.LoggingSetPasswordForm}),
url(r'^accounts/password/done/$', 'django.contrib.auth.views.password_reset_complete',
{'template_name': 'zerver/reset_done.html'}),
# Avatar
url(r'^avatar/(?P<email>[\S]+)?', 'zerver.views.avatar'),
# Registration views, require a confirmation ID.
url(r'^accounts/home/', 'zerver.views.accounts_home'),
url(r'^accounts/send_confirm/(?P<email>[\S]+)?',
TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'), name='send_confirm'),
url(r'^accounts/register/', 'zerver.views.accounts_register'),
url(r'^accounts/do_confirm/(?P<confirmation_key>[\w]+)', 'confirmation.views.confirm'),
url(r'^invite/$', 'zerver.views.initial_invite_page', name='initial-invite-users'),
# Unsubscription endpoint. Used for various types of e-mails (day 1 & 2,
# missed PMs, etc.)
url(r'^accounts/unsubscribe/(?P<type>[\w]+)/(?P<token>[\w]+)',
'zerver.views.email_unsubscribe'),
# Portico-styled page used to provide email confirmation of terms acceptance.
url(r'^accounts/accept_terms/$', 'zerver.views.accounts_accept_terms'),
# Terms of service and privacy policy
url(r'^terms/$', TemplateView.as_view(template_name='zerver/terms.html')),
url(r'^terms-enterprise/$', TemplateView.as_view(template_name='zerver/terms-enterprise.html')),
url(r'^privacy/$', TemplateView.as_view(template_name='zerver/privacy.html')),
# Login/registration
url(r'^register/$', 'zerver.views.accounts_home', name='register'),
url(r'^login/$', 'zerver.views.login_page', {'template_name': 'zerver/login.html'}),
# A registration page that passes through the domain, for totally open realms.
url(r'^register/(?P<domain>\S+)/$', 'zerver.views.accounts_home_with_domain'),
# API and integrations documentation
url(r'^api/$', TemplateView.as_view(template_name='zerver/api.html')),
url(r'^api/endpoints/$', 'zerver.views.api_endpoint_docs'),
url(r'^integrations/$', TemplateView.as_view(template_name='zerver/integrations.html')),
url(r'^apps/$', TemplateView.as_view(template_name='zerver/apps.html')),
url(r'^robots\.txt$', RedirectView.as_view(url='/static/robots.txt')),
# Landing page, features pages, signup form, etc.
url(r'^hello/$', TemplateView.as_view(template_name='zerver/hello.html'),
name='landing-page'),
url(r'^new-user/$', RedirectView.as_view(url='/hello')),
url(r'^features/$', TemplateView.as_view(template_name='zerver/features.html')),
)
# These are used for voyager development. On a real voyager instance,
# these files would be served by nginx.
if settings.DEVELOPMENT and settings.LOCAL_UPLOADS_DIR is not None:
urlpatterns += patterns('',
url(r'^user_avatars/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")}),
url(r'^user_uploads/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(settings.LOCAL_UPLOADS_DIR, "files")}),
)
urlpatterns += patterns('zerver.views',
# These are json format views used by the web client. They require a logged in browser.
url(r'^json/update_pointer$', 'json_update_pointer'),
url(r'^json/get_old_messages$', 'messages.json_get_old_messages'),
url(r'^json/get_public_streams$', 'json_get_public_streams'),
url(r'^json/rename_stream$', 'json_rename_stream'),
url(r'^json/make_stream_public$', 'json_make_stream_public'),
url(r'^json/make_stream_private$', 'json_make_stream_private'),
url(r'^json/send_message$', 'messages.json_send_message'),
url(r'^json/invite_users$', 'json_invite_users'),
url(r'^json/bulk_invite_users$', 'json_bulk_invite_users'),
url(r'^json/settings/change$', 'json_change_settings'),
url(r'^json/notify_settings/change$', 'json_change_notify_settings'),
url(r'^json/ui_settings/change$', 'json_change_ui_settings'),
url(r'^json/subscriptions/remove$', 'json_remove_subscriptions'),
url(r'^json/subscriptions/add$', 'json_add_subscriptions'),
url(r'^json/subscriptions/exists$', 'json_stream_exists'),
url(r'^json/subscriptions/property$', 'json_subscription_property'),
url(r'^json/get_subscribers$', 'json_get_subscribers'),
url(r'^json/fetch_api_key$', 'json_fetch_api_key'),
url(r'^json/update_active_status$', 'json_update_active_status'),
url(r'^json/get_active_statuses$', 'json_get_active_statuses'),
url(r'^json/tutorial_send_message$', 'json_tutorial_send_message'),
url(r'^json/tutorial_status$', 'json_tutorial_status'),
url(r'^json/change_enter_sends$', 'json_change_enter_sends'),
url(r'^json/get_profile$', 'json_get_profile'),
url(r'^json/report_error$', 'json_report_error'),
url(r'^json/report_send_time$', 'json_report_send_time'),
url(r'^json/report_narrow_time$', 'json_report_narrow_time'),
url(r'^json/report_unnarrow_time$', 'json_report_unnarrow_time'),
url(r'^json/update_message_flags$', 'messages.json_update_flags'),
url(r'^json/register$', 'json_events_register'),
url(r'^json/upload_file$', 'json_upload_file'),
url(r'^json/messages_in_narrow$', 'messages.json_messages_in_narrow'),
url(r'^json/update_message$', 'messages.json_update_message'),
url(r'^json/fetch_raw_message$', 'messages.json_fetch_raw_message'),
url(r'^json/refer_friend$', 'json_refer_friend'),
url(r'^json/set_alert_words$', 'json_set_alert_words'),
url(r'^json/set_muted_topics$', 'json_set_muted_topics'),
url(r'^json/set_avatar$', 'json_set_avatar'),
url(r'^json/time_setting$', 'json_time_setting'),
url(r'^json/left_side_userlist$', 'json_left_side_userlist'),
# This json format view is used by the LEGACY pre-REST API. It
# requires an API key.
url(r'^api/v1/send_message$', 'messages.api_send_message'),
# This json format view used by the mobile apps accepts a username
# password/pair and returns an API key.
url(r'^api/v1/fetch_api_key$', 'api_fetch_api_key'),
# These are integration-specific web hook callbacks
url(r'^api/v1/external/beanstalk$' , 'webhooks.api_beanstalk_webhook'),
url(r'^api/v1/external/github$', 'webhooks.api_github_landing'),
url(r'^api/v1/external/jira$', 'webhooks.api_jira_webhook'),
url(r'^api/v1/external/pivotal$', 'webhooks.api_pivotal_webhook'),
url(r'^api/v1/external/newrelic$', 'webhooks.api_newrelic_webhook'),
url(r'^api/v1/external/bitbucket$', 'webhooks.api_bitbucket_webhook'),
url(r'^api/v1/external/desk$', 'webhooks.api_deskdotcom_webhook'),
url(r'^api/v1/external/stash$', 'webhooks.api_stash_webhook'),
url(r'^api/v1/external/freshdesk$', 'webhooks.api_freshdesk_webhook'),
url(r'^api/v1/external/zendesk$', 'webhooks.api_zendesk_webhook'),
url(r'^api/v1/external/pagerduty$', 'webhooks.api_pagerduty_webhook'),
url(r'^user_uploads/(?P<realm_id>(\d*|unk))/(?P<filename>.*)', 'get_uploaded_file'),
)
# JSON format views used by the redesigned API, accept basic auth username:password.
v1_api_and_json_patterns = patterns('zerver.views',
url(r'^export$', 'rest_dispatch',
{'GET': 'export'}),
url(r'^streams$', 'rest_dispatch',
{'GET': 'get_streams_backend'}),
# GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404)
url(r'^streams/(?P<stream_name>.*)/members$', 'rest_dispatch',
{'GET': 'get_subscribers_backend'}),
url(r'^streams/(?P<stream_name>.*)$', 'rest_dispatch',
{'HEAD': 'stream_exists_backend',
'GET': 'stream_exists_backend',
'PATCH': 'update_stream_backend',
'DELETE': 'deactivate_stream_backend'}),
url(r'^users$', 'rest_dispatch',
{'GET': 'get_members_backend',
'POST': 'create_user_backend'}),
url(r'^users/me$', 'rest_dispatch',
{'GET': 'get_profile_backend'}),
url(r'^users/me/enter-sends$', 'rest_dispatch',
{'POST': 'json_change_enter_sends'}),
url(r'^users/me/pointer$', 'rest_dispatch',
{'GET': 'get_pointer_backend',
'PUT': 'update_pointer_backend'}),
# GET lists your streams, POST bulk adds, PATCH bulk modifies/removes
url(r'^users/me/subscriptions$', 'rest_dispatch',
{'GET': 'list_subscriptions_backend',
'POST': 'add_subscriptions_backend',
'PATCH': 'update_subscriptions_backend'}),
url(r'^users/me/alert_words$', 'rest_dispatch',
{'GET': 'list_alert_words',
'PUT': 'set_alert_words',
'PATCH': 'add_alert_words',
'DELETE': 'remove_alert_words'}),
url(r'^default_streams$', 'rest_dispatch',
{'PATCH': 'add_default_stream',
'DELETE': 'remove_default_stream'}),
url(r'^realm$', 'rest_dispatch',
{'PATCH': 'update_realm'}),
url(r'^users/me/api_key/regenerate$', 'rest_dispatch',
{'POST': 'regenerate_api_key'}),
url(r'^users/me/presence$', 'rest_dispatch',
{'POST': 'update_active_status_backend'}),
# Endpoint used by iOS devices to register their
# unique APNS device token
url(r'^users/me/apns_device_token$', 'rest_dispatch',
{'POST' : 'add_apns_device_token',
'DELETE': 'remove_apns_device_token'}),
url(r'^users/me/android_gcm_reg_id$', 'rest_dispatch',
{'POST': 'add_android_reg_id',
'DELETE': 'remove_android_reg_id'}),
url(r'^users/(?P<email>.*)/reactivate$', 'rest_dispatch',
{'POST': 'reactivate_user_backend'}),
url(r'^users/(?P<email>.*)$', 'rest_dispatch',
{'PATCH': 'update_user_backend',
'DELETE': 'deactivate_user_backend'}),
url(r'^bots$', 'rest_dispatch',
{'GET': 'get_bots_backend',
'POST': 'add_bot_backend'}),
url(r'^bots/(?P<email>.*)/api_key/regenerate$', 'rest_dispatch',
{'POST': 'regenerate_bot_api_key'}),
url(r'^bots/(?P<email>.*)$', 'rest_dispatch',
{'PATCH': 'patch_bot_backend',
'DELETE': 'deactivate_bot_backend'}),
url(r'^register$', 'rest_dispatch',
{'POST': 'api_events_register'}),
# Returns a 204, used by desktop app to verify connectivity status
url(r'generate_204$', 'generate_204'),
) + patterns('zerver.views.messages',
# GET returns messages, possibly filtered, POST sends a message
url(r'^messages$', 'rest_dispatch',
{'GET': 'get_old_messages_backend',
'PATCH': 'update_message_backend',
'POST': 'send_message_backend'}),
url(r'^messages/render$', 'rest_dispatch',
{'GET': 'render_message_backend'}),
url(r'^messages/flags$', 'rest_dispatch',
{'POST': 'update_message_flags'}),
) + patterns('zerver.tornadoviews',
url(r'^events$', 'rest_dispatch',
{'GET': 'get_events_backend',
'DELETE': 'cleanup_event_queue'}),
)
if not settings.VOYAGER:
v1_api_and_json_patterns += patterns('',
# Still scoped to api/v1/, but under a different project
url(r'^deployments/', include('zilencer.urls.api')),
)
urlpatterns += patterns('',
url(r'^', include('zilencer.urls.pages')),
)
urlpatterns += patterns('',
url(r'^', include('analytics.urls')),
)
urlpatterns += patterns('',
url(r'^', include('corporate.urls')),
)
urlpatterns += patterns('zerver.tornadoviews',
# Tornado views
url(r'^json/get_events$', 'json_get_events'),
# Used internally for communication between Django and Tornado processes
url(r'^notify_tornado$', 'notify'),
)
# Include the dual-use patterns twice
urlpatterns += patterns('',
url(r'^api/v1/', include(v1_api_and_json_patterns)),
url(r'^json/', include(v1_api_and_json_patterns)),
)
if settings.DEVELOPMENT:
use_prod_static = getattr(settings, 'PIPELINE', False)
static_root = os.path.join(settings.DEPLOY_ROOT,
'prod-static/serve' if use_prod_static else 'static')
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': static_root}))
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/core/management/commands/squashmigrations.py | 162 | 8767 | from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, migrations
from django.db.migrations.loader import AmbiguityError, MigrationLoader
from django.db.migrations.migration import SwappableTuple
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.writer import MigrationWriter
from django.utils import six
from django.utils.version import get_docs_version
class Command(BaseCommand):
help = "Squashes an existing set of migrations (from first until specified) into a single new one."
def add_arguments(self, parser):
parser.add_argument('app_label',
help='App label of the application to squash migrations for.')
parser.add_argument('start_migration_name', default=None, nargs='?',
help='Migrations will be squashed starting from and including this migration.')
parser.add_argument('migration_name',
help='Migrations will be squashed until and including this migration.')
parser.add_argument('--no-optimize', action='store_true', dest='no_optimize', default=False,
help='Do not try to optimize the squashed operations.')
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
def handle(self, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
app_label = options['app_label']
start_migration_name = options['start_migration_name']
migration_name = options['migration_name']
no_optimize = options['no_optimize']
# Load the current graph state, check the app and migration they asked for exists
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
if app_label not in loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (so squashmigrations on "
"it makes no sense)" % app_label
)
migration = self.find_migration(loader, app_label, migration_name)
# Work out the list of predecessor migrations
migrations_to_squash = [
loader.get_migration(al, mn)
for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name))
if al == migration.app_label
]
if start_migration_name:
start_migration = self.find_migration(loader, app_label, start_migration_name)
start = loader.get_migration(start_migration.app_label, start_migration.name)
try:
start_index = migrations_to_squash.index(start)
migrations_to_squash = migrations_to_squash[start_index:]
except ValueError:
raise CommandError(
"The migration '%s' cannot be found. Maybe it comes after "
"the migration '%s'?\n"
"Have a look at:\n"
" python manage.py showmigrations %s\n"
"to debug this issue." % (start_migration, migration, app_label)
)
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:"))
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = six.moves.input("Do you wish to proceed? [yN] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together,
# along with collecting external dependencies and detecting
# double-squashing
operations = []
dependencies = set()
# We need to take all dependencies from the first migration in the list
# as it may be 0002 depending on 0001
first_migration = True
for smigration in migrations_to_squash:
if smigration.replaces:
raise CommandError(
"You cannot squash squashed migrations! Please transition "
"it to a normal migration first: "
"https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version()
)
operations.extend(smigration.operations)
for dependency in smigration.dependencies:
if isinstance(dependency, SwappableTuple):
if settings.AUTH_USER_MODEL == dependency.setting:
dependencies.add(("__setting__", "AUTH_USER_MODEL"))
else:
dependencies.add(dependency)
elif dependency[0] != smigration.app_label or first_migration:
dependencies.add(dependency)
first_migration = False
if no_optimize:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)"))
new_operations = operations
else:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(
" Optimized from %s operations to %s operations." %
(len(operations), len(new_operations))
)
# Work out the value of replaces (any squashed ones we're re-squashing)
# need to feed their replaces into ours
replaces = []
for migration in migrations_to_squash:
if migration.replaces:
replaces.extend(migration.replaces)
else:
replaces.append((migration.app_label, migration.name))
# Make a new migration with those operations
subclass = type("Migration", (migrations.Migration, ), {
"dependencies": dependencies,
"operations": new_operations,
"replaces": replaces,
})
if start_migration_name:
new_migration = subclass("%s_squashed_%s" % (start_migration.name, migration.name), app_label)
else:
new_migration = subclass("0001_squashed_%s" % migration.name, app_label)
new_migration.initial = True
# Write out the new migration file
writer = MigrationWriter(new_migration)
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path))
self.stdout.write(" You should commit this migration but leave the old ones in place;")
self.stdout.write(" the new migration will be used for new installs. Once you are sure")
self.stdout.write(" all instances of the codebase have applied the migrations you squashed,")
self.stdout.write(" you can delete them.")
if writer.needs_manual_porting:
self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required"))
self.stdout.write(" Your migrations contained functions that must be manually copied over,")
self.stdout.write(" as we could not safely copy their implementation.")
self.stdout.write(" See the comment at the top of the squashed migration for details.")
def find_migration(self, loader, app_label, name):
try:
return loader.get_migration_by_prefix(app_label, name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. Please be "
"more specific." % (name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'." %
(name, app_label)
)
| artistic-2.0 |
qzzhang/KBSolrUtil | lib/KBSolrUtil/authclient.py | 105 | 2623 | '''
Created on Aug 1, 2016
A very basic KBase auth client for the Python server.
@author: [email protected]
'''
import time as _time
import requests as _requests
import threading as _threading
import hashlib
class TokenCache(object):
''' A basic cache for tokens. '''
_MAX_TIME_SEC = 5 * 60 # 5 min
_lock = _threading.RLock()
def __init__(self, maxsize=2000):
self._cache = {}
self._maxsize = maxsize
self._halfmax = maxsize / 2 # int division to round down
def get_user(self, token):
token = hashlib.sha256(token).hexdigest()
with self._lock:
usertime = self._cache.get(token)
if not usertime:
return None
user, intime = usertime
if _time.time() - intime > self._MAX_TIME_SEC:
return None
return user
def add_valid_token(self, token, user):
if not token:
raise ValueError('Must supply token')
if not user:
raise ValueError('Must supply user')
token = hashlib.sha256(token).hexdigest()
with self._lock:
self._cache[token] = [user, _time.time()]
if len(self._cache) > self._maxsize:
for i, (t, _) in enumerate(sorted(self._cache.items(),
key=lambda (_, v): v[1])):
if i <= self._halfmax:
del self._cache[t]
else:
break
class KBaseAuth(object):
'''
A very basic KBase auth client for the Python server.
'''
_LOGIN_URL = 'https://kbase.us/services/authorization/Sessions/Login'
def __init__(self, auth_url=None):
'''
Constructor
'''
self._authurl = auth_url
if not self._authurl:
self._authurl = self._LOGIN_URL
self._cache = TokenCache()
def get_user(self, token):
if not token:
raise ValueError('Must supply token')
user = self._cache.get_user(token)
if user:
return user
d = {'token': token, 'fields': 'user_id'}
ret = _requests.post(self._authurl, data=d)
if not ret.ok:
try:
err = ret.json()
except:
ret.raise_for_status()
raise ValueError('Error connecting to auth service: {} {}\n{}'
.format(ret.status_code, ret.reason,
err['error_msg']))
user = ret.json()['user_id']
self._cache.add_valid_token(token, user)
return user
| mit |
albertliangcode/Pi_MonteCarloSim | venv/lib/python2.7/site.py | 784 | 27543 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| mit |
Endika/odoo | openerp/addons/test_new_api/tests/test_new_fields.py | 9 | 19593 | #
# test cases for new-style fields
#
from datetime import date, datetime
from collections import defaultdict
from openerp.exceptions import AccessError, except_orm
from openerp.tests import common
from openerp.tools import mute_logger
class TestNewFields(common.TransactionCase):
def test_00_basics(self):
""" test accessing new fields """
# find a discussion
discussion = self.env.ref('test_new_api.discussion_0')
# read field as a record attribute or as a record item
self.assertIsInstance(discussion.name, basestring)
self.assertIsInstance(discussion['name'], basestring)
self.assertEqual(discussion['name'], discussion.name)
# read it with method read()
values = discussion.read(['name'])[0]
self.assertEqual(values['name'], discussion.name)
def test_01_basic_get_assertion(self):
""" test item getter """
# field access works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
ok = record.body
# field access fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
faulty = records.body
def test_01_basic_set_assertion(self):
""" test item setter """
# field assignment works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
record.body = 'OK'
# field assignment fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
records.body = 'Faulty'
def test_10_computed(self):
""" check definition of computed fields """
# by default function fields are not stored and readonly
field = self.env['test_new_api.message']._fields['size']
self.assertFalse(field.store)
self.assertTrue(field.readonly)
field = self.env['test_new_api.message']._fields['name']
self.assertTrue(field.store)
self.assertTrue(field.readonly)
def test_10_non_stored(self):
""" test non-stored fields """
# find messages
for message in self.env['test_new_api.message'].search([]):
# check definition of field
self.assertEqual(message.size, len(message.body or ''))
# check recomputation after record is modified
size = message.size
message.write({'body': (message.body or '') + "!!!"})
self.assertEqual(message.size, size + 3)
# special case: computed field without dependency must be computed
record = self.env['test_new_api.mixed'].create({})
self.assertTrue(record.now)
def test_11_stored(self):
""" test stored fields """
# find the demo discussion
discussion = self.env.ref('test_new_api.discussion_0')
self.assertTrue(len(discussion.messages) > 0)
# check messages
name0 = discussion.name or ""
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name0, message.author.name))
# modify discussion name, and check again messages
discussion.name = name1 = 'Talking about stuff...'
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name1, message.author.name))
# switch message from discussion, and check again
name2 = 'Another discussion'
discussion2 = discussion.copy({'name': name2})
message2 = discussion.messages[0]
message2.discussion = discussion2
for message in discussion2.messages:
self.assertEqual(message.name, "[%s] %s" % (name2, message.author.name))
def test_12_recursive(self):
""" test recursively dependent fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
cath.parent = finn.parent = gabe
abel.parent = beth.parent = cath
dean.parent = ewan.parent = finn
self.assertEqual(abel.display_name, "Gabriel / Catherine / Abel")
self.assertEqual(beth.display_name, "Gabriel / Catherine / Bethany")
self.assertEqual(cath.display_name, "Gabriel / Catherine")
self.assertEqual(dean.display_name, "Gabriel / Finnley / Dean")
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Ewan")
self.assertEqual(finn.display_name, "Gabriel / Finnley")
self.assertEqual(gabe.display_name, "Gabriel")
ewan.parent = cath
self.assertEqual(ewan.display_name, "Gabriel / Catherine / Ewan")
cath.parent = finn
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Catherine / Ewan")
def test_12_cascade(self):
""" test computed field depending on computed field """
message = self.env.ref('test_new_api.message_0_0')
message.invalidate_cache()
double_size = message.double_size
self.assertEqual(double_size, message.size)
def test_13_inverse(self):
""" test inverse computation of fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
self.assertEqual(ewan.display_name, "Ewan")
ewan.display_name = "Abel / Bethany / Catherine / Erwan"
self.assertEqual(beth.parent, abel)
self.assertEqual(cath.parent, beth)
self.assertEqual(ewan.parent, cath)
self.assertEqual(ewan.name, "Erwan")
def test_14_search(self):
""" test search on computed fields """
discussion = self.env.ref('test_new_api.discussion_0')
# determine message sizes
sizes = set(message.size for message in discussion.messages)
# search for messages based on their size
for size in sizes:
messages0 = self.env['test_new_api.message'].search(
[('discussion', '=', discussion.id), ('size', '<=', size)])
messages1 = self.env['test_new_api.message'].browse()
for message in discussion.messages:
if message.size <= size:
messages1 += message
self.assertEqual(messages0, messages1)
def test_15_constraint(self):
""" test new-style Python constraints """
discussion = self.env.ref('test_new_api.discussion_0')
# remove oneself from discussion participants: we can no longer create
# messages in discussion
discussion.participants -= self.env.user
with self.assertRaises(Exception):
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
# make sure that assertRaises() does not leave fields to recompute
self.assertFalse(self.env.has_todo())
# put back oneself into discussion participants: now we can create
# messages in discussion
discussion.participants += self.env.user
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
def test_20_float(self):
""" test float fields """
record = self.env['test_new_api.mixed'].create({})
# assign value, and expect rounding
record.write({'number': 2.4999999999999996})
self.assertEqual(record.number, 2.50)
# same with field setter
record.number = 2.4999999999999996
self.assertEqual(record.number, 2.50)
def test_21_date(self):
""" test date fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.date = None
self.assertFalse(record.date)
# one may assign date and datetime objects
record.date = date(2012, 05, 01)
self.assertEqual(record.date, '2012-05-01')
record.date = datetime(2012, 05, 01, 10, 45, 00)
self.assertEqual(record.date, '2012-05-01')
# one may assign dates in the default format, and it must be checked
record.date = '2012-05-01'
self.assertEqual(record.date, '2012-05-01')
with self.assertRaises(ValueError):
record.date = '12-5-1'
def test_22_selection(self):
""" test selection fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.lang = None
self.assertFalse(record.lang)
# one may assign a value, and it must be checked
for language in self.env['res.lang'].search([]):
record.lang = language.code
with self.assertRaises(ValueError):
record.lang = 'zz_ZZ'
def test_23_relation(self):
""" test relation fields """
demo = self.env.ref('base.user_demo')
message = self.env.ref('test_new_api.message_0_0')
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
# "migrate" message into demo_env, and check again
demo_message = message.sudo(demo)
self.assertEqual(demo_message.env, demo_env)
self.assertEqual(demo_message.discussion.env, demo_env)
# assign record's parent to a record in demo_env
message.discussion = message.discussion.copy({'name': 'Copy'})
# both message and its parent field must be in self.env
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
def test_24_reference(self):
""" test reference fields. """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.reference = None
self.assertFalse(record.reference)
# one may assign a user or a partner...
record.reference = self.env.user
self.assertEqual(record.reference, self.env.user)
record.reference = self.env.user.partner_id
self.assertEqual(record.reference, self.env.user.partner_id)
# ... but no record from a model that starts with 'ir.'
with self.assertRaises(ValueError):
record.reference = self.env['ir.model'].search([], limit=1)
def test_25_related(self):
""" test related fields. """
message = self.env.ref('test_new_api.message_0_0')
discussion = message.discussion
# by default related fields are not stored
field = message._fields['discussion_name']
self.assertFalse(field.store)
self.assertFalse(field.readonly)
# check value of related field
self.assertEqual(message.discussion_name, discussion.name)
# change discussion name, and check result
discussion.name = 'Foo'
self.assertEqual(message.discussion_name, 'Foo')
# change discussion name via related field, and check result
message.discussion_name = 'Bar'
self.assertEqual(discussion.name, 'Bar')
self.assertEqual(message.discussion_name, 'Bar')
# change discussion name via related field on several records
discussion1 = discussion.create({'name': 'X1'})
discussion2 = discussion.create({'name': 'X2'})
discussion1.participants = discussion2.participants = self.env.user
message1 = message.create({'discussion': discussion1.id})
message2 = message.create({'discussion': discussion2.id})
self.assertEqual(message1.discussion_name, 'X1')
self.assertEqual(message2.discussion_name, 'X2')
(message1 + message2).write({'discussion_name': 'X3'})
self.assertEqual(discussion1.name, 'X3')
self.assertEqual(discussion2.name, 'X3')
# search on related field, and check result
search_on_related = self.env['test_new_api.message'].search([('discussion_name', '=', 'Bar')])
search_on_regular = self.env['test_new_api.message'].search([('discussion.name', '=', 'Bar')])
self.assertEqual(search_on_related, search_on_regular)
# check that field attributes are copied
message_field = message.fields_get(['discussion_name'])['discussion_name']
discussion_field = discussion.fields_get(['name'])['name']
self.assertEqual(message_field['help'], discussion_field['help'])
def test_25_related_multi(self):
""" test write() on several related fields based on a common computed field. """
foo = self.env['test_new_api.foo'].create({'name': 'A', 'value1': 1, 'value2': 2})
bar = self.env['test_new_api.bar'].create({'name': 'A'})
self.assertEqual(bar.foo, foo)
self.assertEqual(bar.value1, 1)
self.assertEqual(bar.value2, 2)
foo.invalidate_cache()
bar.write({'value1': 3, 'value2': 4})
self.assertEqual(foo.value1, 3)
self.assertEqual(foo.value2, 4)
def test_26_inherited(self):
""" test inherited fields. """
# a bunch of fields are inherited from res_partner
for user in self.env['res.users'].search([]):
partner = user.partner_id
for field in ('is_company', 'name', 'email', 'country_id'):
self.assertEqual(getattr(user, field), getattr(partner, field))
self.assertEqual(user[field], partner[field])
def test_30_read(self):
""" test computed fields as returned by read(). """
discussion = self.env.ref('test_new_api.discussion_0')
for message in discussion.messages:
display_name = message.display_name
size = message.size
data = message.read(['display_name', 'size'])[0]
self.assertEqual(data['display_name'], display_name)
self.assertEqual(data['size'], size)
def test_31_prefetch(self):
""" test prefetch of records handle AccessError """
Category = self.env['test_new_api.category']
cat_1 = Category.create({'name': 'NOACCESS'}).id
cat_2 = Category.create({'name': 'ACCESS', 'parent': cat_1}).id
self.env.clear()
cat = Category.browse(cat_2)
self.assertEqual(cat.name, 'ACCESS')
# both categories should be in prefetch ids
self.assertSetEqual(self.env.prefetch[Category._name], set([cat_1, cat_2]))
# but due to our (lame) overwrite of `read`, it should not forbid us to read records we have access to
self.assertFalse(len(cat.discussions))
self.assertEqual(cat.parent.id, cat_1)
with self.assertRaises(AccessError):
Category.browse(cat_1).name
def test_40_new(self):
""" test new records. """
discussion = self.env.ref('test_new_api.discussion_0')
# create a new message
message = self.env['test_new_api.message'].new()
self.assertFalse(message.id)
# assign some fields; should have no side effect
message.discussion = discussion
message.body = BODY = "May the Force be with you."
self.assertEqual(message.discussion, discussion)
self.assertEqual(message.body, BODY)
self.assertFalse(message.author)
self.assertNotIn(message, discussion.messages)
# check computed values of fields
self.assertEqual(message.name, "[%s] %s" % (discussion.name, ''))
self.assertEqual(message.size, len(BODY))
@mute_logger('openerp.addons.base.ir.ir_model')
def test_41_new_related(self):
""" test the behavior of related fields starting on new records. """
# make discussions unreadable for demo user
access = self.env.ref('test_new_api.access_discussion')
access.write({'perm_read': False})
# create an environment for demo user
env = self.env(user=self.env.ref('base.user_demo'))
self.assertEqual(env.user.login, "demo")
# create a new message as demo user
discussion = self.env.ref('test_new_api.discussion_0')
message = env['test_new_api.message'].new({'discussion': discussion})
self.assertEqual(message.discussion, discussion)
# read the related field discussion_name
self.assertEqual(message.discussion.env, env)
self.assertEqual(message.discussion_name, discussion.name)
with self.assertRaises(AccessError):
message.discussion.name
@mute_logger('openerp.addons.base.ir.ir_model')
def test_42_new_related(self):
""" test the behavior of related fields traversing new records. """
# make discussions unreadable for demo user
access = self.env.ref('test_new_api.access_discussion')
access.write({'perm_read': False})
# create an environment for demo user
env = self.env(user=self.env.ref('base.user_demo'))
self.assertEqual(env.user.login, "demo")
# create a new discussion and a new message as demo user
discussion = env['test_new_api.discussion'].new({'name': 'Stuff'})
message = env['test_new_api.message'].new({'discussion': discussion})
self.assertEqual(message.discussion, discussion)
# read the related field discussion_name
self.assertNotEqual(message.sudo().env, message.env)
self.assertEqual(message.discussion_name, discussion.name)
def test_50_defaults(self):
""" test default values. """
fields = ['discussion', 'body', 'author', 'size']
defaults = self.env['test_new_api.message'].default_get(fields)
self.assertEqual(defaults, {'author': self.env.uid})
defaults = self.env['test_new_api.mixed'].default_get(['number'])
self.assertEqual(defaults, {'number': 3.14})
def test_50_search_many2one(self):
""" test search through a path of computed fields"""
messages = self.env['test_new_api.message'].search(
[('author_partner.name', '=', 'Demo User')])
self.assertEqual(messages, self.env.ref('test_new_api.message_0_1'))
def test_60_x2many_domain(self):
""" test the cache consistency of a x2many field with a domain """
discussion = self.env.ref('test_new_api.discussion_0')
message = discussion.messages[0]
self.assertNotIn(message, discussion.important_messages)
message.important = True
self.assertIn(message, discussion.important_messages)
class TestMagicFields(common.TransactionCase):
def test_write_date(self):
record = self.env['test_new_api.discussion'].create({'name': 'Booba'})
self.assertEqual(record.create_uid, self.env.user)
self.assertEqual(record.write_uid, self.env.user)
| agpl-3.0 |
SolusOS-discontinued/pisi | pisi/component.py | 3 | 2955 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005 - 2007, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import pisi.pxml.xmlfile as xmlfile
import pisi.pxml.autoxml as autoxml
class Error(object):
__metaclass__ = autoxml.autoxml
class Obsolete:
__metaclass__ = autoxml.autoxml
s_Package = [autoxml.String, autoxml.mandatory]
def __str__(self):
return self.package
class Distribution(xmlfile.XmlFile):
__metaclass__ = autoxml.autoxml
tag = "PISI"
t_SourceName = [autoxml.Text, autoxml.mandatory] # name of distribution (source)
t_Description = [autoxml.LocalText, autoxml.mandatory]
t_Version = [autoxml.Text, autoxml.optional]
t_Type = [autoxml.Text, autoxml.mandatory]
t_Dependencies = [ [autoxml.Text], autoxml.optional, "Dependencies/Distribution"]
t_BinaryName = [autoxml.Text, autoxml.optional] # name of repository (binary distro)
t_Architecture = [autoxml.Text, autoxml.optional] # architecture identifier
t_Obsoletes = [ [Obsolete], autoxml.optional, "Obsoletes/Package"]
class Maintainer(xmlfile.XmlFile):
"representation for component responsibles"
__metaclass__ = autoxml.autoxml
t_Name = [autoxml.Text, autoxml.mandatory]
t_Email = [autoxml.String, autoxml.mandatory]
def __str__(self):
s = "%s <%s>" % (self.name, self.email)
return s
class Component(xmlfile.XmlFile):
"representation for component declarations"
__metaclass__ = autoxml.autoxml
t_Name = [autoxml.String, autoxml.mandatory] # fully qualified name
# component name in other languages, for instance in Turkish
# LocalName for system.base could be sistem.taban or "Taban Sistem",
# this could be useful for GUIs
t_LocalName = [autoxml.LocalText, autoxml.optional]
# Information about the component
t_Summary = [autoxml.LocalText, autoxml.optional]
t_Description = [autoxml.LocalText, autoxml.optional]
t_Group = [autoxml.String, autoxml.optional]
# Component responsible
t_Maintainer = [Maintainer, autoxml.optional]
# the parts of this component.
# to be filled by the component database, thus it is optional.
t_Packages = [ [autoxml.String], autoxml.optional, "Parts/Package"]
t_Sources = [ [autoxml.String], autoxml.optional, "Parts/Source"]
class Components(xmlfile.XmlFile):
"representation for component declarations"
__metaclass__ = autoxml.autoxml
tag = "PISI"
t_Components = [ [Component], autoxml.optional, "Components/Component" ]
# FIXME: there will be no component.xml only components.xml
class CompatComponent(Component):
tag = "PISI"
t_VisibleTo = [autoxml.String, autoxml.optional]
| gpl-2.0 |
imaculate/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 52 | 17482 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause |
cprov/snapcraft | snapcraft/formatting_utils.py | 3 | 2778 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import Iterable, List, Sized
def combine_paths(paths: List[str], prepend: str, separator: str) -> str:
"""Combine list of paths into a string.
:param list paths: List of paths to stringify.
:param str prepend: String to prepend to each path in the string.
:param str separator: String to place between each path in the string.
"""
paths = ["{}{}".format(prepend, p) for p in paths]
return separator.join(paths)
def format_path_variable(
envvar: str, paths: List[str], prepend: str, separator: str
) -> str:
"""Return a path-like environment variable definition that appends.
:param str envvar: The environment variable in question.
:param list paths: The paths to append to the environment variable.
:param str prepend: String to prepend to each path in the definition.
:param str separator: String to place between each path in the definition.
"""
if not paths:
raise ValueError("Failed to format '${}': no paths supplied".format(envvar))
return '{envvar}="${envvar}{separator}{paths}"'.format(
envvar=envvar,
separator=separator,
paths=combine_paths(paths, prepend, separator),
)
def humanize_list(
items: Iterable[str], conjunction: str, item_format: str = "{!r}"
) -> str:
"""Format a list into a human-readable string.
:param list items: List to humanize.
:param str conjunction: The conjunction used to join the final element to
the rest of the list (e.g. 'and').
:param str item_format: Format string to use per item.
"""
if not items:
return ""
quoted_items = [item_format.format(item) for item in sorted(items)]
if len(quoted_items) == 1:
return quoted_items[0]
humanized = ", ".join(quoted_items[:-1])
if len(quoted_items) > 2:
humanized += ","
return "{} {} {}".format(humanized, conjunction, quoted_items[-1])
def pluralize(container: Sized, if_one: str, if_multiple: str) -> str:
if len(container) == 1:
return if_one
else:
return if_multiple
| gpl-3.0 |
Atrox/haikunatorpy | haikunator/haikunator.py | 1 | 3671 | from random import Random
class Haikunator:
_adjectives = [
'aged', 'ancient', 'autumn', 'billowing', 'bitter', 'black', 'blue', 'bold',
'broad', 'broken', 'calm', 'cold', 'cool', 'crimson', 'curly', 'damp',
'dark', 'dawn', 'delicate', 'divine', 'dry', 'empty', 'falling', 'fancy',
'flat', 'floral', 'fragrant', 'frosty', 'gentle', 'green', 'hidden', 'holy',
'icy', 'jolly', 'late', 'lingering', 'little', 'lively', 'long', 'lucky',
'misty', 'morning', 'muddy', 'mute', 'nameless', 'noisy', 'odd', 'old',
'orange', 'patient', 'plain', 'polished', 'proud', 'purple', 'quiet', 'rapid',
'raspy', 'red', 'restless', 'rough', 'round', 'royal', 'shiny', 'shrill',
'shy', 'silent', 'small', 'snowy', 'soft', 'solitary', 'sparkling', 'spring',
'square', 'steep', 'still', 'summer', 'super', 'sweet', 'throbbing', 'tight',
'tiny', 'twilight', 'wandering', 'weathered', 'white', 'wild', 'winter', 'wispy',
'withered', 'yellow', 'young'
]
_nouns = [
'art', 'band', 'bar', 'base', 'bird', 'block', 'boat', 'bonus',
'bread', 'breeze', 'brook', 'bush', 'butterfly', 'cake', 'cell', 'cherry',
'cloud', 'credit', 'darkness', 'dawn', 'dew', 'disk', 'dream', 'dust',
'feather', 'field', 'fire', 'firefly', 'flower', 'fog', 'forest', 'frog',
'frost', 'glade', 'glitter', 'grass', 'hall', 'hat', 'haze', 'heart',
'hill', 'king', 'lab', 'lake', 'leaf', 'limit', 'math', 'meadow',
'mode', 'moon', 'morning', 'mountain', 'mouse', 'mud', 'night', 'paper',
'pine', 'poetry', 'pond', 'queen', 'rain', 'recipe', 'resonance', 'rice',
'river', 'salad', 'scene', 'sea', 'shadow', 'shape', 'silence', 'sky',
'smoke', 'snow', 'snowflake', 'sound', 'star', 'sun', 'sun', 'sunset',
'surf', 'term', 'thunder', 'tooth', 'tree', 'truth', 'union', 'unit',
'violet', 'voice', 'water', 'waterfall', 'wave', 'wildflower', 'wind', 'wood'
]
def __init__(self, seed=None, adjectives=None, nouns=None):
"""
Initialize new haikunator
:param seed: Seed for Random
:param adjectives: Custom Adjectives
:param nouns: Custom Nouns
:type adjectives: list
:type nouns: list
"""
if adjectives is not None:
self._adjectives = adjectives
if nouns is not None:
self._nouns = nouns
self.random = Random(seed)
def haikunate(self, delimiter='-', token_length=4, token_hex=False, token_chars='0123456789'):
"""
Generate heroku-like random names to use in your python applications
:param delimiter: Delimiter
:param token_length: TokenLength
:param token_hex: TokenHex
:param token_chars: TokenChars
:type delimiter: str
:type token_length: int
:type token_hex: bool
:type token_chars: str
:return: heroku-like random string
:rtype: str
"""
if token_hex:
token_chars = '0123456789abcdef'
adjective = self._random_element(self._adjectives)
noun = self._random_element(self._nouns)
token = ''.join(self._random_element(token_chars) for _ in range(token_length))
sections = [adjective, noun, token]
return delimiter.join(filter(None, sections))
def _random_element(self, s):
"""
Get random element from string or list
:param s: Element
:type s: str or list
:return: str
:rtype: str
"""
if len(s) <= 0:
return ''
return self.random.choice(s)
| bsd-3-clause |
LLNL/spack | lib/spack/external/macholib/util.py | 12 | 7079 | import os
import sys
import stat
import struct
import shutil
from macholib import mach_o
MAGIC = [
struct.pack('!L', getattr(mach_o, 'MH_' + _))
for _ in ['MAGIC', 'CIGAM', 'MAGIC_64', 'CIGAM_64']
]
FAT_MAGIC_BYTES = struct.pack('!L', mach_o.FAT_MAGIC)
MAGIC_LEN = 4
STRIPCMD = ['/usr/bin/strip', '-x', '-S', '-']
try:
unicode
except NameError:
unicode = str
def fsencoding(s, encoding=sys.getfilesystemencoding()):
"""
Ensure the given argument is in filesystem encoding (not unicode)
"""
if isinstance(s, unicode):
s = s.encode(encoding)
return s
def move(src, dst):
"""
move that ensures filesystem encoding of paths
"""
shutil.move(fsencoding(src), fsencoding(dst))
def copy2(src, dst):
"""
copy2 that ensures filesystem encoding of paths
"""
shutil.copy2(fsencoding(src), fsencoding(dst))
def flipwritable(fn, mode=None):
"""
Flip the writability of a file and return the old mode. Returns None
if the file is already writable.
"""
if os.access(fn, os.W_OK):
return None
old_mode = os.stat(fn).st_mode
os.chmod(fn, stat.S_IWRITE | old_mode)
return old_mode
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file
"""
def __init__(self, fileobj, start, size):
self._fileobj = fileobj
self._start = start
self._end = start + size
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._fileobj.tell() - self._start
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == 0:
seekto += self._start
elif whence == 1:
seekto += self._fileobj.tell()
elif whence == 2:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
def write(self, bytes):
here = self._fileobj.tell()
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.write(bytes)
def read(self, size=sys.maxsize):
if size < 0:
raise ValueError(
"Invalid size %s while reading from %s", size, self._fileobj)
here = self._fileobj.tell()
self._checkwindow(here, 'read')
bytes = min(size, self._end - here)
return self._fileobj.read(bytes)
def mergecopy(src, dest):
"""
copy2, but only if the destination isn't up to date
"""
if os.path.exists(dest) and \
os.stat(dest).st_mtime >= os.stat(src).st_mtime:
return
copy2(src, dest)
def mergetree(src, dst, condition=None, copyfn=mergecopy, srcbase=None):
"""
Recursively merge a directory tree using mergecopy().
"""
src = fsencoding(src)
dst = fsencoding(dst)
if srcbase is None:
srcbase = src
names = map(fsencoding, os.listdir(src))
try:
os.makedirs(dst)
except OSError:
pass
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if condition is not None and not condition(srcname):
continue
try:
if os.path.islink(srcname):
# XXX: This is naive at best, should check srcbase(?)
realsrc = os.readlink(srcname)
os.symlink(realsrc, dstname)
elif os.path.isdir(srcname):
mergetree(
srcname, dstname,
condition=condition, copyfn=copyfn, srcbase=srcbase)
else:
copyfn(srcname, dstname)
except (IOError, os.error) as why:
errors.append((srcname, dstname, why))
if errors:
raise IOError(errors)
def sdk_normalize(filename):
"""
Normalize a path to strip out the SDK portion, normally so that it
can be decided whether it is in a system path or not.
"""
if filename.startswith('/Developer/SDKs/'):
pathcomp = filename.split('/')
del pathcomp[1:4]
filename = '/'.join(pathcomp)
return filename
NOT_SYSTEM_FILES = []
def in_system_path(filename):
"""
Return True if the file is in a system path
"""
fn = sdk_normalize(os.path.realpath(filename))
if fn.startswith('/usr/local/'):
return False
elif fn.startswith('/System/') or fn.startswith('/usr/'):
if fn in NOT_SYSTEM_FILES:
return False
return True
else:
return False
def has_filename_filter(module):
"""
Return False if the module does not have a filename attribute
"""
return getattr(module, 'filename', None) is not None
def get_magic():
"""
Get a list of valid Mach-O header signatures, not including the fat header
"""
return MAGIC
def is_platform_file(path):
"""
Return True if the file is Mach-O
"""
if not os.path.exists(path) or os.path.islink(path):
return False
# If the header is fat, we need to read into the first arch
with open(path, 'rb') as fileobj:
bytes = fileobj.read(MAGIC_LEN)
if bytes == FAT_MAGIC_BYTES:
# Read in the fat header
fileobj.seek(0)
header = mach_o.fat_header.from_fileobj(fileobj, _endian_='>')
if header.nfat_arch < 1:
return False
# Read in the first fat arch header
arch = mach_o.fat_arch.from_fileobj(fileobj, _endian_='>')
fileobj.seek(arch.offset)
# Read magic off the first header
bytes = fileobj.read(MAGIC_LEN)
for magic in MAGIC:
if bytes == magic:
return True
return False
def iter_platform_files(dst):
"""
Walk a directory and yield each full path that is a Mach-O file
"""
for root, dirs, files in os.walk(dst):
for fn in files:
fn = os.path.join(root, fn)
if is_platform_file(fn):
yield fn
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = sum([len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
| lgpl-2.1 |
zacharyvoase/zenqueue | zenqueue/utils/sync.py | 1 | 3408 | # -*- coding: utf-8 -*-
from collections import deque
from functools import wraps
import threading
def with_lock(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
self._lock.acquire()
try:
return method(self, *args, **kwargs)
finally:
self._lock.release()
return wrapper
class Event(object):
"""An event which allows values to be sent."""
class WaitCancelled(Exception): pass
class Timeout(Exception): pass
def __init__(self):
self._lock = threading.Lock()
self._waiters = {}
self._result = None
@with_lock
def send(self, value=True):
self._result = value
for waiter in self._waiters.keys():
self._waiters[waiter][1] = True
self._waiters[waiter][0].set()
@with_lock
def cancel_all(self):
for waiter in self._waiters.keys():
self.cancel(waiter)
@with_lock
def cancel(self, thread):
if thread in self._waiters:
self._waiters[thread][1] = False
self._waiters[thread][0].set()
def wait(self, timeout=None):
event = threading.Event()
self._waiters[threading.currentThread()] = [event, None]
# A timeout of None implies eternal blocking.
if timeout is not None:
event.wait(timeout)
else:
event.wait()
status = self._waiters.pop(threading.currentThread())[1]
if not event.isSet():
raise self.Timeout
if status:
return self._result
raise self.WaitCancelled
class Semaphore(object):
"""A semaphore with queueing which records the threads which acquire it."""
class WaitCancelled(Exception): pass
class Timeout(Exception): pass
def __init__(self, initial=0):
self.evt_queue = deque()
self._lock = threading.Lock()
self.__count = initial
def __enter__(self):
self.acquire()
return self
def __exit__(self, *exc_info):
self.release()
return False
def acquire(self, timeout=None):
if self.__count <= 0:
ready_event = Event()
self.evt_queue.appendleft(ready_event)
try:
result = ready_event.wait(timeout=timeout)
except ready_event.Timeout:
if ready_event in self.evt_queue:
self.evt_queue.remove(ready_event)
raise self.Timeout
except ready_event.WaitCancelled:
if ready_event in self.evt_queue:
self.evt_queue.remove(ready_event)
raise self.WaitCancelled
self.__count -= 1
def release(self):
self.__count += 1
if self.evt_queue:
ready_event = self.evt_queue.pop()
ready_event.send(True)
@with_lock
def cancel_all(self):
while self.evt_queue:
ready_event = self.evt_queue.pop()
ready_event.cancel_all()
@property
def count(self):
return self.__count
class Lock(Semaphore):
def __init__(self):
super(Lock, self).__init__(initial=1)
@property
def in_use(self):
return (self.count == 0) | mit |
sharhar/USB-Thing | UpdaterFiles/Lib/python-3.5.1.amd64/Lib/pathlib.py | 3 | 46272 | import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
import sys
from collections import Sequence
from contextlib import contextmanager
from errno import EINVAL, ENOENT, ENOTDIR
from operator import attrgetter
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
from urllib.parse import quote_from_bytes as urlquote_from_bytes
supports_symlinks = True
if os.name == 'nt':
import nt
if sys.getwindowsversion()[:2] >= (6, 0):
from nt import _getfinalpathname
else:
supports_symlinks = False
_getfinalpathname = None
else:
nt = None
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
def _is_wildcard_pattern(pat):
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(sys.intern(x))
else:
if rel and rel != '.':
parsed.append(sys.intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (os.name == 'nt')
drive_letters = (
set(chr(x) for x in range(ord('a'), ord('z') + 1)) |
set(chr(x) for x in range(ord('A'), ord('Z') + 1))
)
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
{'CON', 'PRN', 'AUX', 'NUL'} |
{'COM%d' % i for i in range(1, 10)} |
{'LPT%d' % i for i in range(1, 10)}
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2+1:]
else:
return part[:index2], sep, part[index2+1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def resolve(self, path):
s = str(path)
if not s:
return os.getcwd()
if _getfinalpathname is not None:
return self._ext_to_normal(_getfinalpathname(s))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
def gethomedir(self, username):
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
try:
drv = os.environ['HOMEDRIVE']
except KeyError:
drv = ''
userhome = drv + os.environ['HOMEPATH']
else:
raise RuntimeError("Can't determine home directory")
if username:
# Try to guess user home directory. By default all users
# directories are located in the same place and are named by
# corresponding usernames. If current user home directory points
# to nonstandard place, this guess is likely wrong.
if os.environ['USERNAME'] != username:
drv, root, parts = self.parse_parts((userhome,))
if parts[-1] != os.environ['USERNAME']:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
parts[-1] = username
if drv or root:
userhome = drv + root + self.join(parts[1:])
else:
userhome = self.join(parts)
return userhome
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def resolve(self, path):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL:
raise
# Not a symlink
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
def gethomedir(self, username):
if not username:
try:
return os.environ['HOME']
except KeyError:
import pwd
return pwd.getpwuid(os.getuid()).pw_dir
else:
import pwd
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
class _NormalAccessor(_Accessor):
def _wrap_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobj, *args):
return strfunc(str(pathobj), *args)
return staticmethod(wrapped)
def _wrap_binary_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobjA, pathobjB, *args):
return strfunc(str(pathobjA), str(pathobjB), *args)
return staticmethod(wrapped)
stat = _wrap_strfunc(os.stat)
lstat = _wrap_strfunc(os.lstat)
open = _wrap_strfunc(os.open)
listdir = _wrap_strfunc(os.listdir)
chmod = _wrap_strfunc(os.chmod)
if hasattr(os, "lchmod"):
lchmod = _wrap_strfunc(os.lchmod)
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = _wrap_strfunc(os.mkdir)
unlink = _wrap_strfunc(os.unlink)
rmdir = _wrap_strfunc(os.rmdir)
rename = _wrap_binary_strfunc(os.rename)
replace = _wrap_binary_strfunc(os.replace)
if nt:
if supports_symlinks:
symlink = _wrap_binary_strfunc(os.symlink)
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError("symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(str(a), str(b))
utime = _wrap_strfunc(os.utime)
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
@contextmanager
def _cached(func):
try:
func.__cached__
yield func
except AttributeError:
cache = {}
def wrapper(*args):
try:
return cache[args]
except KeyError:
value = cache[args] = func(*args)
return value
wrapper.__cached__ = True
try:
yield wrapper
finally:
cache.clear()
def _make_selector(pattern_parts):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts)
else:
self.successor = _TerminatingSelector()
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
listdir = parent_path._accessor.listdir
return self._select_from(parent_path, is_dir, exists, listdir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, listdir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts):
self.name = name
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
path = parent_path._make_child_relpath(self.name)
if exists(path):
for p in self.successor._select_from(path, is_dir, exists, listdir):
yield p
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts):
self.pat = re.compile(fnmatch.translate(pat))
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
cf = parent_path._flavour.casefold
for name in listdir(parent_path):
casefolded = cf(name)
if self.pat.match(casefolded):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(path, is_dir, exists, listdir):
yield p
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts):
_Selector.__init__(self, child_parts)
def _iterate_directories(self, parent_path, is_dir, listdir):
yield parent_path
for name in listdir(parent_path):
path = parent_path._make_child_relpath(name)
if is_dir(path):
for p in self._iterate_directories(path, is_dir, listdir):
yield p
def _select_from(self, parent_path, is_dir, exists, listdir):
if not is_dir(parent_path):
return
with _cached(listdir) as listdir:
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(parent_path, is_dir, listdir):
for p in successor_select(starting_point, is_dir, exists, listdir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{}.parents>".format(self._pathcls.__name__)
class PurePath(object):
"""PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
elif isinstance(a, str):
# Force-cast str subclasses to str (issue #21127)
parts.append(str(a))
else:
raise TypeError(
"argument should be a path or str object, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overriden in concrete Path
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
return os.fsencode(str(self))
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return self._cparts == other._cparts and self._flavour is other._flavour
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""A list of the final component's suffixes, if any."""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % (name))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed (or added, if none)."""
# XXX if suffix is None, should the current suffix be removed?
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % (suffix))
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
return self._from_parts([key] + self._parts)
@property
def parent(self):
"""The logical parent of the path."""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
class PurePosixPath(PurePath):
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
class Path(PurePath):
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
return cls(os.getcwd())
@classmethod
def home(cls):
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
"""
return cls(cls()._flavour.gethomedir(None))
def samefile(self, other_path):
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
"""
st = self.stat()
try:
other_st = other_path.stat()
except AttributeError:
other_st = os.stat(other_path)
return os.path.samestat(st, other_st)
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in {'.', '..'}:
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given pattern.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given pattern, anywhere in this subtree.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if self._closed:
self._raise_closed()
return io.open(str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
def read_bytes(self):
"""
Open the file in bytes mode, read it, and close the file.
"""
with self.open(mode='rb') as f:
return f.read()
def read_text(self, encoding=None, errors=None):
"""
Open the file in text mode, read it, and close the file.
"""
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return f.read()
def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
# type-check for the buffer interface before truncating the file
view = memoryview(data)
with self.open(mode='wb') as f:
return f.write(view)
def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str):
raise TypeError('data must be str, not %s' %
data.__class__.__name__)
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
if self._closed:
self._raise_closed()
if not parents:
try:
self._accessor.mkdir(self, mode)
except FileExistsError:
if not exist_ok or not self.is_dir():
raise
else:
try:
self._accessor.mkdir(self, mode)
except FileExistsError:
if not exist_ok or not self.is_dir():
raise
except OSError as e:
if e.errno != ENOENT:
raise
self.parent.mkdir(parents=True)
self._accessor.mkdir(self, mode)
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
self._accessor.unlink(self)
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def expanduser(self):
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not (self._drv or self._root) and
self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
class PosixPath(Path, PurePosixPath):
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
__slots__ = ()
| apache-2.0 |
marc-sensenich/ansible | lib/ansible/modules/cloud/amazon/lambda.py | 30 | 22661 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda
short_description: Manage AWS Lambda functions
description:
- Allows for the management of Lambda functions.
version_added: '2.2'
requirements: [ boto3 ]
options:
name:
description:
- The name you want to assign to the function you are uploading. Cannot be changed.
required: true
state:
description:
- Create or delete Lambda function.
default: present
choices: [ 'present', 'absent' ]
runtime:
description:
- The runtime environment for the Lambda function you are uploading.
- Required when creating a function. Uses parameters as described in boto3 docs.
- Required when C(state=present).
- For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
- Required when C(state=present).
handler:
description:
- The function within your code that Lambda calls to begin execution.
zip_file:
description:
- A .zip file containing your deployment package
- If C(state=present) then either zip_file or s3_bucket must be present.
aliases: [ 'src' ]
s3_bucket:
description:
- Amazon S3 bucket name where the .zip file containing your deployment package is stored.
- If C(state=present) then either zip_file or s3_bucket must be present.
- C(s3_bucket) and C(s3_key) are required together.
s3_key:
description:
- The Amazon S3 object (the deployment package) key name you want to upload.
- C(s3_bucket) and C(s3_key) are required together.
s3_object_version:
description:
- The Amazon S3 object (the deployment package) version you want to upload.
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
timeout:
description:
- The function maximum execution time in seconds after which Lambda should terminate the function.
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given.
default: 128
vpc_subnet_ids:
description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
vpc_security_group_ids:
description:
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
environment_variables:
description:
- A dictionary of environment variables the Lambda function is given.
aliases: [ 'environment' ]
version_added: "2.3"
dead_letter_arn:
description:
- The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
version_added: "2.3"
tags:
description:
- tag dict to apply to the function (requires botocore 1.5.40 or above).
version_added: "2.5"
author:
- 'Steyn Huizinga (@steynovich)'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Lambda functions
- name: looped creation
lambda:
name: '{{ item.name }}'
state: present
zip_file: '{{ item.zip_file }}'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
vpc_subnet_ids:
- subnet-123abcde
- subnet-edcba321
vpc_security_group_ids:
- sg-123abcde
- sg-edcba321
environment_variables: '{{ item.env_vars }}'
tags:
key1: 'value1'
loop:
- name: HelloWorld
zip_file: hello-code.zip
env_vars:
key1: "first"
key2: "second"
- name: ByeBye
zip_file: bye-code.zip
env_vars:
key1: "1"
key2: "2"
# To remove previously added tags pass a empty dict
- name: remove tags
lambda:
name: 'Lambda function'
state: present
zip_file: 'code.zip'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
tags: {}
# Basic Lambda function deletion
- name: Delete Lambda functions HelloWorld and ByeBye
lambda:
name: '{{ item }}'
state: absent
loop:
- HelloWorld
- ByeBye
'''
RETURN = '''
code:
description: the lambda function location returned by get_function in boto3
returned: success
type: dict
sample:
{
'location': 'a presigned S3 URL',
'repository_type': 'S3',
}
configuration:
description: the lambda function metadata returned by get_function in boto3
returned: success
type: dict
sample:
{
'code_sha256': 'SHA256 hash',
'code_size': 123,
'description': 'My function',
'environment': {
'variables': {
'key': 'value'
}
},
'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
'function_name': 'myFunction',
'handler': 'index.handler',
'last_modified': '2017-08-01T00:00:00.000+0000',
'memory_size': 128,
'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
'runtime': 'nodejs6.10',
'timeout': 3,
'version': '1',
'vpc_config': {
'security_group_ids': [],
'subnet_ids': []
}
}
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import compare_aws_tags
import base64
import hashlib
import traceback
try:
from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
except ImportError:
pass # protected by AnsibleAWSModule
def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs):
"""return the account id we are currently working on
get_account_id tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privileges to
the account should be enough to permit this.
"""
account_id = None
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = sts_client.get_caller_identity().get('Account')
except ClientError:
try:
iam_client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message)
account_id = except_msg.search(r"arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="getting account information")
except Exception as e:
module.fail_json_aws(e, msg="getting account information")
return account_id
def get_current_function(connection, function_name, qualifier=None):
try:
if qualifier is not None:
return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
return connection.get_function(FunctionName=function_name)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return None
except (KeyError, AttributeError):
pass
raise e
def sha256sum(filename):
hasher = hashlib.sha256()
with open(filename, 'rb') as f:
hasher.update(f.read())
code_hash = hasher.digest()
code_b64 = base64.b64encode(code_hash)
hex_digest = code_b64.decode('utf-8')
return hex_digest
def set_tag(client, module, tags, function):
if not hasattr(client, "list_tags"):
module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
changed = False
arn = function['Configuration']['FunctionArn']
try:
current_tags = client.list_tags(Resource=arn).get('Tags', {})
except ClientError as e:
module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
try:
if tags_to_remove:
client.untag_resource(
Resource=arn,
TagKeys=tags_to_remove
)
changed = True
if tags_to_add:
client.tag_resource(
Resource=arn,
Tags=tags_to_add
)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc())
return changed
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
runtime=dict(),
role=dict(),
handler=dict(),
zip_file=dict(aliases=['src']),
s3_bucket=dict(),
s3_key=dict(),
s3_object_version=dict(),
description=dict(default=''),
timeout=dict(type='int', default=3),
memory_size=dict(type='int', default=128),
vpc_subnet_ids=dict(type='list'),
vpc_security_group_ids=dict(type='list'),
environment_variables=dict(type='dict'),
dead_letter_arn=dict(),
tags=dict(type='dict'),
)
mutually_exclusive = [['zip_file', 's3_key'],
['zip_file', 's3_bucket'],
['zip_file', 's3_object_version']]
required_together = [['s3_key', 's3_bucket'],
['vpc_subnet_ids', 'vpc_security_group_ids']]
required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_if=required_if)
name = module.params.get('name')
state = module.params.get('state').lower()
runtime = module.params.get('runtime')
role = module.params.get('role')
handler = module.params.get('handler')
s3_bucket = module.params.get('s3_bucket')
s3_key = module.params.get('s3_key')
s3_object_version = module.params.get('s3_object_version')
zip_file = module.params.get('zip_file')
description = module.params.get('description')
timeout = module.params.get('timeout')
memory_size = module.params.get('memory_size')
vpc_subnet_ids = module.params.get('vpc_subnet_ids')
vpc_security_group_ids = module.params.get('vpc_security_group_ids')
environment_variables = module.params.get('environment_variables')
dead_letter_arn = module.params.get('dead_letter_arn')
tags = module.params.get('tags')
check_mode = module.check_mode
changed = False
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (ClientError, ValidationError) as e:
module.fail_json_aws(e, msg="Trying to connect to AWS")
if state == 'present':
if role.startswith('arn:aws:iam'):
role_arn = role
else:
# get account ID and assemble ARN
account_id = get_account_id(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role)
# Get function configuration if present, False otherwise
current_function = get_current_function(client, name)
# Update existing Lambda function
if state == 'present' and current_function:
# Get current state
current_config = current_function['Configuration']
current_version = None
# Update function configuration
func_kwargs = {'FunctionName': name}
# Update configuration if needed
if role_arn and current_config['Role'] != role_arn:
func_kwargs.update({'Role': role_arn})
if handler and current_config['Handler'] != handler:
func_kwargs.update({'Handler': handler})
if description and current_config['Description'] != description:
func_kwargs.update({'Description': description})
if timeout and current_config['Timeout'] != timeout:
func_kwargs.update({'Timeout': timeout})
if memory_size and current_config['MemorySize'] != memory_size:
func_kwargs.update({'MemorySize': memory_size})
if (environment_variables is not None) and (current_config.get(
'Environment', {}).get('Variables', {}) != environment_variables):
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn is not None:
if current_config.get('DeadLetterConfig'):
if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
else:
if dead_letter_arn != "":
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# Check for unsupported mutation
if current_config['Runtime'] != runtime:
module.fail_json(msg='Cannot change runtime. Please recreate the function')
# If VPC configuration is desired
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
if 'VpcConfig' in current_config:
# Compare VPC config with current config
current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
new_vpc_config = {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}
func_kwargs.update({'VpcConfig': new_vpc_config})
else:
# No VPC configuration is desired, assure VPC config is empty when present in current config
if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
# Upload new configuration if configuration has changed
if len(func_kwargs) > 1:
try:
if not check_mode:
response = client.update_function_configuration(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to update lambda configuration")
# Update code configuration
code_kwargs = {'FunctionName': name, 'Publish': True}
# Update S3 location
if s3_bucket and s3_key:
# If function is stored on S3 always update
code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
# If S3 Object Version is given
if s3_object_version:
code_kwargs.update({'S3ObjectVersion': s3_object_version})
# Compare local checksum, update remote code when different
elif zip_file:
local_checksum = sha256sum(zip_file)
remote_checksum = current_config['CodeSha256']
# Only upload new code when local code is different compared to the remote code
if local_checksum != remote_checksum:
try:
with open(zip_file, 'rb') as f:
encoded_zip = f.read()
code_kwargs.update({'ZipFile': encoded_zip})
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
# Tag Function
if tags is not None:
if set_tag(client, module, tags, current_function):
changed = True
# Upload new code if needed (e.g. code checksum has changed)
if len(code_kwargs) > 2:
try:
if not check_mode:
response = client.update_function_code(**code_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to upload new code")
# Describe function code and configuration
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after updating')
# We're done
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Function doesn't exists, create new Lambda function
elif state == 'present':
if s3_bucket and s3_key:
# If function is stored on S3
code = {'S3Bucket': s3_bucket,
'S3Key': s3_key}
if s3_object_version:
code.update({'S3ObjectVersion': s3_object_version})
elif zip_file:
# If function is stored in local zipfile
try:
with open(zip_file, 'rb') as f:
zip_content = f.read()
code = {'ZipFile': zip_content}
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
else:
module.fail_json(msg='Either S3 object or path to zipfile required')
func_kwargs = {'FunctionName': name,
'Publish': True,
'Runtime': runtime,
'Role': role_arn,
'Code': code,
'Timeout': timeout,
'MemorySize': memory_size,
}
if description is not None:
func_kwargs.update({'Description': description})
if handler is not None:
func_kwargs.update({'Handler': handler})
if environment_variables:
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# If VPC configuration is given
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}})
# Finally try to create function
current_version = None
try:
if not check_mode:
response = client.create_function(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to create function")
# Tag Function
if tags is not None:
if set_tag(client, module, tags, get_current_function(client, name)):
changed = True
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after creating')
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Delete existing Lambda function
if state == 'absent' and current_function:
try:
if not check_mode:
client.delete_function(FunctionName=name)
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to delete Lambda function")
module.exit_json(changed=changed)
# Function already absent, do nothing
elif state == 'absent':
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/tools/compatibility/tf_upgrade.py | 17 | 25376 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.function_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"],
"tf.nn.sigmoid_cross_entropy_with_logits": [
"logits", "labels", "name"]
}
# Specially handled functions.
self.function_handle = {"tf.reverse": self._reverse_handler}
@staticmethod
def _reverse_handler(file_edit_recorder, node):
# TODO(aselle): Could check for a literal list of bools and try to convert
# them to indices.
comment = ("ERROR: tf.reverse has had its argument semantics changed\n"
"significantly the converter cannot detect this reliably, so you"
"need to inspect this usage manually.\n")
file_edit_recorder.add(comment,
node.lineno,
node.col_offset,
"tf.reverse",
"tf.reverse",
error="tf.reverse requires manual check.")
class FileEditTuple(collections.namedtuple(
"FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class TensorFlowCallVisitor(ast.NodeVisitor):
"""AST Visitor that finds TensorFlow Function calls.
Updates function calls from old API version to new API version.
"""
def __init__(self, filename, lines):
self._filename = filename
self._file_edit = FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = APIChangeSpec()
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name and full_name.startswith("tf."):
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if (argval_lineno is not None and argval_col_offset is not None):
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if self._lines[argval_lineno - 1][key_start:key_end] == argkey + "=":
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name and full_name.startswith("tf."):
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class TensorFlowCodeUpgrader(object):
"""Class that handles upgrading a set of Python files to TensorFlow 1.0."""
def __init__(self):
pass
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = TensorFlowCallVisitor(in_filename, lines)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
return file_count, report, tree_errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = TensorFlowCodeUpgrader()
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
| apache-2.0 |
qgis/QGIS | python/plugins/db_manager/db_plugins/vlayers/data_model.py | 53 | 5436 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Virtual layers plugin for DB Manager
Date : December 2015
copyright : (C) 2015 by Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from ..data_model import (TableDataModel,
BaseTableModel,
SqlResultModelAsync,
SqlResultModelTask)
from .connector import VLayerRegistry, getQueryGeometryName
from .plugin import LVectorTable
from ..plugin import DbError, BaseError
from qgis.PyQt.QtCore import QTime, QTemporaryFile
from qgis.core import (QgsVectorLayer,
QgsWkbTypes,
QgsVirtualLayerDefinition,
QgsVirtualLayerTask,
QgsTask)
class LTableDataModel(TableDataModel):
def __init__(self, table, parent=None):
TableDataModel.__init__(self, table, parent)
self.layer = None
if isinstance(table, LVectorTable):
self.layer = VLayerRegistry.instance().getLayer(table.name)
else:
self.layer = VLayerRegistry.instance().getLayer(table)
if not self.layer:
return
# populate self.resdata
self.resdata = []
for f in self.layer.getFeatures():
a = f.attributes()
# add the geometry type
if f.hasGeometry():
a.append(QgsWkbTypes.displayString(f.geometry().wkbType()))
else:
a.append('None')
self.resdata.append(a)
self.fetchedFrom = 0
self.fetchedCount = len(self.resdata)
def rowCount(self, index=None):
if self.layer:
return self.layer.featureCount()
return 0
class LSqlResultModelTask(SqlResultModelTask):
def __init__(self, db, sql, parent):
super().__init__(db, sql, parent)
tf = QTemporaryFile()
tf.open()
path = tf.fileName()
tf.close()
df = QgsVirtualLayerDefinition()
df.setFilePath(path)
df.setQuery(sql)
self.subtask = QgsVirtualLayerTask(df)
self.addSubTask(self.subtask, [], QgsTask.ParentDependsOnSubTask)
def run(self):
try:
path = self.subtask.definition().filePath()
sql = self.subtask.definition().query()
self.model = LSqlResultModel(self.db, sql, None, self.subtask.layer(), path)
except Exception as e:
self.error = BaseError(str(e))
return False
return True
def cancel(self):
SqlResultModelTask.cancel(self)
class LSqlResultModelAsync(SqlResultModelAsync):
def __init__(self, db, sql, parent=None):
super().__init__()
self.task = LSqlResultModelTask(db, sql, parent)
self.task.taskCompleted.connect(self.modelDone)
self.task.taskTerminated.connect(self.modelDone)
def modelDone(self):
self.status = self.task.status
self.model = self.task.model
if self.task.subtask.exceptionText():
self.error = BaseError(self.task.subtask.exceptionText())
self.done.emit()
class LSqlResultModel(BaseTableModel):
def __init__(self, db, sql, parent=None, layer=None, path=None):
t = QTime()
t.start()
if not layer:
tf = QTemporaryFile()
tf.open()
path = tf.fileName()
tf.close()
df = QgsVirtualLayerDefinition()
df.setFilePath(path)
df.setQuery(sql)
layer = QgsVectorLayer(df.toString(), "vv", "virtual")
self._secs = t.elapsed() / 1000.0
data = []
header = []
if not layer.isValid():
raise DbError(layer.dataProvider().error().summary(), sql)
else:
header = [f.name() for f in layer.fields()]
has_geometry = False
if layer.geometryType() != QgsWkbTypes.NullGeometry:
gn = getQueryGeometryName(path)
if gn:
has_geometry = True
header += [gn]
for f in layer.getFeatures():
a = f.attributes()
if has_geometry:
if f.hasGeometry():
a += [f.geometry().asWkt()]
else:
a += [None]
data += [a]
self._secs = 0
self._affectedRows = len(data)
BaseTableModel.__init__(self, header, data, parent)
def secs(self):
return self._secs
def affectedRows(self):
return self._affectedRows
| gpl-2.0 |
nivekkagicom/uncrustify | scripts/check_options.py | 8 | 2333 | #! /usr/bin/env python
#
# Check the option usage.
# Make sure the union member matches the option type.
#
from os.path import dirname, join, abspath
from os import listdir, EX_OK, EX_DATAERR
from fnmatch import filter
# just use the first letter of the member name - should be unique
map_access_type = {
'b': 'AT_BOOL',
'a': 'AT_IARF',
'n': 'AT_NUM',
'u': 'AT_UNUM',
'l': 'AT_LINE',
't': 'AT_POS',
}
map_option_type = {}
# checks if while accessing the cpd.settings the right union accessor is used in the file
def check_file(file_path):
problems = 0
line_no = 0
fd = open(file_path, 'r')
for line in fd:
line_no += 1
pos_cpd_s = line.find('cpd.settings[UO_')
pos_cpd_e = line[pos_cpd_s:].find(']')
if pos_cpd_s > 0 and pos_cpd_e > 0:
pos_option_s = pos_cpd_s + 13
pos_option_e = pos_cpd_s + pos_cpd_e
option = line[pos_option_s : pos_option_e]
union_access = line[pos_option_e + 2]
if option in map_option_type and union_access in map_access_type:
if map_option_type[option] != map_access_type[union_access]:
print("%s [%d] %s should use %s not %s" % (file_path, line_no, option,
map_option_type[option], map_access_type[union_access]))
problems += 1
return problems
def fill_map_option_type(file_path):
# Read in all the options
fd = open(file_path, 'r')
for line in fd:
if line.find('unc_add_option') > 0 and line.find('UO_') > 0:
splits = line.split(',')
if len(splits) >= 3:
map_option_type[splits[1].strip()] = splits[2].strip()
fd.close()
def main():
src_dir = join(dirname(dirname(abspath(__file__))), 'src')
fill_map_option_type(join(src_dir, 'options.cpp'))
# Get a list of all the source files
ld = listdir(src_dir)
src_files = filter(ld, '*.cpp')
src_files.extend(filter(ld, '*.h'))
# Check each source file
problems = 0
for fn in src_files:
problems += check_file(join(src_dir, fn))
if problems == 0:
print("No problems found")
return EX_OK
else:
return EX_DATAERR
if __name__ == '__main__':
exit(main())
| gpl-2.0 |
jbedorf/tensorflow | tensorflow/contrib/gan/python/losses/python/losses_impl_test.py | 8 | 29577 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.losses.python import losses_impl as tfgan_losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.losses import losses as tf_losses
from tensorflow.python.platform import test
# TODO(joelshor): Use `parameterized` tests when opensourced.
class _LossesTest(object):
def init_constants(self):
self._discriminator_real_outputs_np = [-5.0, 1.4, 12.5, 2.7]
self._discriminator_gen_outputs_np = [10.0, 4.4, -5.5, 3.6]
self._weights = 2.3
self._discriminator_real_outputs = constant_op.constant(
self._discriminator_real_outputs_np, dtype=dtypes.float32)
self._discriminator_gen_outputs = constant_op.constant(
self._discriminator_gen_outputs_np, dtype=dtypes.float32)
def test_generator_all_correct(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(
self._discriminator_gen_outputs, loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
loss = self._g_loss_fn(
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
loss = self._d_loss_fn(
array_ops.reshape(self._discriminator_real_outputs, [2, 2]),
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._g_loss_fn(logits, weights=weights)
self.assertEqual(logits.dtype, loss.dtype)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [[10.0, 4.4, -5.5, 3.6]],
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
logits2 = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
generated_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._d_loss_fn(
logits, logits2, real_weights=real_weights,
generated_weights=generated_weights)
with self.cached_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [self._discriminator_real_outputs_np],
logits2: [self._discriminator_gen_outputs_np],
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, weights=self._weights)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=self._weights, generated_weights=self._weights)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs,
weights=constant_op.constant(self._weights))
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=weights, generated_weights=weights)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(self._discriminator_gen_outputs, add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class LeastSquaresLossTest(test.TestCase, _LossesTest):
"""Tests for least_squares_xxx_loss."""
def setUp(self):
super(LeastSquaresLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 17.69625
self._expected_d_loss = 41.73375
self._generator_loss_name = 'lsq_generator_loss/value'
self._discriminator_loss_name = 'lsq_discriminator_loss/add'
self._g_loss_fn = tfgan_losses.least_squares_generator_loss
self._d_loss_fn = tfgan_losses.least_squares_discriminator_loss
class ModifiedLossTest(test.TestCase, _LossesTest):
"""Tests for modified_xxx_loss."""
def setUp(self):
super(ModifiedLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 1.38582
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_modified_loss/value'
self._discriminator_loss_name = 'discriminator_modified_loss/add_1'
self._g_loss_fn = tfgan_losses.modified_generator_loss
self._d_loss_fn = tfgan_losses.modified_discriminator_loss
class MinimaxLossTest(test.TestCase, _LossesTest):
"""Tests for minimax_xxx_loss."""
def setUp(self):
super(MinimaxLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -4.82408
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_minimax_loss/Neg'
self._discriminator_loss_name = 'discriminator_minimax_loss/add_1'
self._g_loss_fn = tfgan_losses.minimax_generator_loss
self._d_loss_fn = tfgan_losses.minimax_discriminator_loss
class WassersteinLossTest(test.TestCase, _LossesTest):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(WassersteinLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -3.12500
self._expected_d_loss = 0.22500
self._generator_loss_name = 'generator_wasserstein_loss/value'
self._discriminator_loss_name = 'discriminator_wasserstein_loss/sub'
self._g_loss_fn = tfgan_losses.wasserstein_generator_loss
self._d_loss_fn = tfgan_losses.wasserstein_discriminator_loss
# TODO(joelshor): Use `parameterized` tests when opensourced.
# TODO(joelshor): Refactor this test to use the same code as the other losses.
class ACGANLossTest(test.TestCase):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(ACGANLossTest, self).setUp()
self._g_loss_fn = tfgan_losses.acgan_generator_loss
self._d_loss_fn = tfgan_losses.acgan_discriminator_loss
self._discriminator_gen_classification_logits_np = [[10.0, 4.4, -5.5, 3.6],
[-4.0, 4.4, 5.2, 4.6],
[1.1, 2.4, -3.5, 5.6],
[1.1, 2.4, -3.5, 5.6]]
self._discriminator_real_classification_logits_np = [[-2.0, 0.4, 12.5, 2.7],
[-1.2, 1.9, 12.3, 2.6],
[-2.4, -1.7, 2.5, 2.7],
[1.1, 2.4, -3.5, 5.6]]
self._one_hot_labels_np = [[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]]
self._weights = 2.3
self._discriminator_gen_classification_logits = constant_op.constant(
self._discriminator_gen_classification_logits_np, dtype=dtypes.float32)
self._discriminator_real_classification_logits = constant_op.constant(
self._discriminator_real_classification_logits_np, dtype=dtypes.float32)
self._one_hot_labels = constant_op.constant(
self._one_hot_labels_np, dtype=dtypes.float32)
self._generator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._discriminator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'discriminator_real_classification_logits':
self._discriminator_real_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._generator_loss_name = 'acgan_generator_loss/value'
self._discriminator_loss_name = 'acgan_discriminator_loss/add'
self._expected_g_loss = 3.84974
self._expected_d_loss = 9.43950
def test_generator_all_correct(self):
loss = self._g_loss_fn(**self._generator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(**self._discriminator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(loss_collection='collection', **self._generator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(loss_collection='collection', **self._discriminator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._generator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._discriminator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._generator_kwargs.items()}
loss = self._g_loss_fn(**patch_args)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._discriminator_kwargs.items()}
loss = self._d_loss_fn(**patch_args)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._g_loss_fn(gen_logits, one_hot_labels)
with self.cached_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits_and_weights(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._d_loss_fn(gen_logits, real_logits, one_hot_labels)
with self.cached_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
real_logits: self._discriminator_real_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(weights=self._weights, **self._generator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
real_weights=self._weights, generated_weights=self._weights,
**self._discriminator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(
weights=constant_op.constant(self._weights), **self._generator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(real_weights=weights, generated_weights=weights,
**self._discriminator_kwargs)
with self.cached_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(add_summaries=True, **self._generator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(add_summaries=True, **self._discriminator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class _PenaltyTest(object):
def test_all_correct(self):
loss = self._penalty_fn(**self._kwargs)
self.assertEqual(self._expected_dtype, loss.dtype)
# NOTE: Op names will change, it is inappropriate to include them in tests.
# See go/tf-breaking-change.
# self.assertEqual(self._expected_op_name, loss.op.name)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss, loss.eval(), 6)
def test_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._penalty_fn(loss_collection='collection', **self._kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_no_reduction(self):
loss = self._penalty_fn(reduction=tf_losses.Reduction.NONE, **self._kwargs)
self.assertAllEqual([self._batch_size], loss.shape)
def test_python_scalar_weight(self):
loss = self._penalty_fn(weights=2.3, **self._kwargs)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
def test_scalar_tensor_weight(self):
loss = self._penalty_fn(weights=constant_op.constant(2.3), **self._kwargs)
with self.cached_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
class GradientPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for wasserstein_gradient_penalty."""
def setUp(self):
super(GradientPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.wasserstein_gradient_penalty
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._expected_dtype = dtypes.float32
with variable_scope.variable_scope('fake_scope') as self._scope:
self._discriminator_fn(0.0, 0.0)
self._kwargs = {
'generated_data': constant_op.constant(
self._generated_data_np, dtype=self._expected_dtype),
'real_data': constant_op.constant(
self._real_data_np, dtype=self._expected_dtype),
'generator_inputs': None,
'discriminator_fn': self._discriminator_fn,
'discriminator_scope': self._scope,
}
self._expected_loss = 9.00000
self._expected_op_name = 'wasserstein_gradient_penalty/value'
self._batch_size = 1
def _discriminator_fn(self, inputs, _):
ops.add_to_collection('fake_update_ops', constant_op.constant(1.0))
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
def test_loss_with_placeholder(self):
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'])
self.assertEqual(generated_data.dtype, loss.dtype)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(self._expected_loss, loss, 5)
def test_loss_using_one_sided_mode(self):
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'],
one_sided=True)
self.assertEqual(generated_data.dtype, loss.dtype)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(self._expected_loss, loss, 5)
def test_loss_with_gradient_norm_target(self):
"""Test loss value with non default gradient norm target."""
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'],
target=2.0)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(
loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(1.0, loss, 5)
def test_reuses_scope(self):
"""Test that gradient penalty reuses discriminator scope."""
num_vars = len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(
num_vars, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
def test_works_with_get_collection(self):
"""Tests that gradient penalty works inside other scopes."""
# We ran the discriminator once in the setup, so there should be an op
# already in the collection.
self.assertEqual(1, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
# Make sure the op is added to the collection even if it's in a name scope.
with ops.name_scope('loss'):
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(2, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
# Make sure the op is added to the collection even if it's in a variable
# scope.
with variable_scope.variable_scope('loss_vscope'):
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(3, len(ops.get_collection(
'fake_update_ops', self._kwargs['discriminator_scope'].name)))
class MutualInformationPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for mutual_information_penalty."""
def setUp(self):
super(MutualInformationPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.mutual_information_penalty
self._structured_generator_inputs = [1.0, 2.0]
self._predicted_distributions = [categorical.Categorical(logits=[1.0, 2.0]),
normal.Normal([0.0], [1.0])]
self._expected_dtype = dtypes.float32
self._kwargs = {
'structured_generator_inputs': self._structured_generator_inputs,
'predicted_distributions': self._predicted_distributions,
}
self._expected_loss = 1.61610
self._expected_op_name = 'mutual_information_loss/mul_1'
self._batch_size = 2
class CombineAdversarialLossTest(test.TestCase):
"""Tests for combine_adversarial_loss."""
def setUp(self):
super(CombineAdversarialLossTest, self).setUp()
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._generated_data = constant_op.constant(
self._generated_data_np, dtype=dtypes.float32)
self._real_data = constant_op.constant(
self._real_data_np, dtype=dtypes.float32)
self._generated_inputs = None
self._expected_loss = 9.00000
def _test_correct_helper(self, use_weight_factor):
variable_list = [variables.Variable(1.0)]
main_loss = variable_list[0] * 2
adversarial_loss = variable_list[0] * 3
gradient_ratio_epsilon = 1e-6
if use_weight_factor:
weight_factor = constant_op.constant(2.0)
gradient_ratio = None
adv_coeff = 2.0
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
else:
weight_factor = None
gradient_ratio = constant_op.constant(0.5)
adv_coeff = 2.0 / (3 * 0.5 + gradient_ratio_epsilon)
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_ratio_epsilon=gradient_ratio_epsilon,
variables=variable_list)
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(expected_loss, combined_loss.eval(), 1e-5)
def test_correct_useweightfactor(self):
self._test_correct_helper(True)
def test_correct_nouseweightfactor(self):
self._test_correct_helper(False)
def _test_no_weight_skips_adversarial_loss_helper(self, use_weight_factor):
"""Test the 0 adversarial weight or grad ratio skips adversarial loss."""
main_loss = constant_op.constant(1.0)
adversarial_loss = constant_op.constant(1.0)
weight_factor = 0.0 if use_weight_factor else None
gradient_ratio = None if use_weight_factor else 0.0
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_summaries=False)
with self.test_session(use_gpu=True):
self.assertEqual(1.0, combined_loss.eval())
def test_no_weight_skips_adversarial_loss_useweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(True)
def test_no_weight_skips_adversarial_loss_nouseweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(False)
def test_stable_global_norm_avoids_overflow(self):
tensors = [array_ops.ones([4]), array_ops.ones([4, 4]) * 1e19, None]
gnorm_is_inf = math_ops.is_inf(clip_ops.global_norm(tensors))
stable_gnorm_is_inf = math_ops.is_inf(
tfgan_losses._numerically_stable_global_norm(tensors))
with self.test_session(use_gpu=True):
self.assertTrue(gnorm_is_inf.eval())
self.assertFalse(stable_gnorm_is_inf.eval())
def test_stable_global_norm_unchanged(self):
"""Test that preconditioning doesn't change global norm value."""
random_seed.set_random_seed(1234)
tensors = [random_ops.random_uniform([3]*i, -10.0, 10.0) for i in range(6)]
gnorm = clip_ops.global_norm(tensors)
precond_gnorm = tfgan_losses._numerically_stable_global_norm(tensors)
with self.test_session(use_gpu=True) as sess:
for _ in range(10): # spot check closeness on more than one sample.
gnorm_np, precond_gnorm_np = sess.run([gnorm, precond_gnorm])
self.assertNear(gnorm_np, precond_gnorm_np, 1e-4)
class CycleConsistencyLossTest(test.TestCase):
"""Tests for cycle_consistency_loss."""
def setUp(self):
super(CycleConsistencyLossTest, self).setUp()
self._data_x_np = [[1.0, 2, 3], [4, 5, 6]]
self._reconstructed_data_x_np = [[7.0, 8, 9], [10, 11, 12]]
self._data_y_np = [1.0, 9]
self._reconstructed_data_y_np = [-2.0, 3]
self._data_x = constant_op.constant(self._data_x_np, dtype=dtypes.float32)
self._reconstructed_data_x = constant_op.constant(
self._reconstructed_data_x_np, dtype=dtypes.float32)
self._data_y = constant_op.constant(self._data_y_np, dtype=dtypes.float32)
self._reconstructed_data_y = constant_op.constant(
self._reconstructed_data_y_np, dtype=dtypes.float32)
def test_correct_loss(self):
loss = tfgan_losses.cycle_consistency_loss(
self._data_x, self._reconstructed_data_x, self._data_y,
self._reconstructed_data_y)
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(5.25, loss.eval(), 1e-5)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zorojean/zulip | zproject/urls.py | 108 | 15143 | from django.conf import settings
from django.conf.urls import patterns, url, include
from django.views.generic import TemplateView, RedirectView
import os.path
import zerver.forms
# NB: There are several other pieces of code which route requests by URL:
#
# - runtornado.py has its own URL list for Tornado views. See the
# invocation of web.Application in that file.
#
# - The Nginx config knows which URLs to route to Django or Tornado.
#
# - Likewise for the local dev server in tools/run-dev.py.
urlpatterns = patterns('',
url(r'^$', 'zerver.views.home'),
# We have a desktop-specific landing page in case we change our / to not log in in the future. We don't
# want to require a new desktop app build for everyone in that case
url(r'^desktop_home/$', 'zerver.views.desktop_home'),
url(r'^accounts/login/sso/$', 'zerver.views.remote_user_sso', name='login-sso'),
url(r'^accounts/login/jwt/$', 'zerver.views.remote_user_jwt', name='login-jwt'),
url(r'^accounts/login/google/$', 'zerver.views.start_google_oauth2'),
url(r'^accounts/login/google/done/$', 'zerver.views.finish_google_oauth2'),
url(r'^accounts/login/local/$', 'zerver.views.dev_direct_login'),
# We have two entries for accounts/login to allow reverses on the Django
# view we're wrapping to continue to function.
url(r'^accounts/login/', 'zerver.views.login_page', {'template_name': 'zerver/login.html'}),
url(r'^accounts/login/', 'django.contrib.auth.views.login', {'template_name': 'zerver/login.html'}),
url(r'^accounts/logout/', 'zerver.views.logout_then_login'),
url(r'^accounts/webathena_kerberos_login/', 'zerver.views.webathena_kerberos_login'),
url(r'^accounts/password/reset/$', 'django.contrib.auth.views.password_reset',
{'post_reset_redirect' : '/accounts/password/reset/done/',
'template_name': 'zerver/reset.html',
'email_template_name': 'registration/password_reset_email.txt',
}),
url(r'^accounts/password/reset/done/$', 'django.contrib.auth.views.password_reset_done',
{'template_name': 'zerver/reset_emailed.html'}),
url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm',
{'post_reset_redirect' : '/accounts/password/done/',
'template_name': 'zerver/reset_confirm.html',
'set_password_form' : zerver.forms.LoggingSetPasswordForm}),
url(r'^accounts/password/done/$', 'django.contrib.auth.views.password_reset_complete',
{'template_name': 'zerver/reset_done.html'}),
# Avatar
url(r'^avatar/(?P<email>[\S]+)?', 'zerver.views.avatar'),
# Registration views, require a confirmation ID.
url(r'^accounts/home/', 'zerver.views.accounts_home'),
url(r'^accounts/send_confirm/(?P<email>[\S]+)?',
TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'), name='send_confirm'),
url(r'^accounts/register/', 'zerver.views.accounts_register'),
url(r'^accounts/do_confirm/(?P<confirmation_key>[\w]+)', 'confirmation.views.confirm'),
url(r'^invite/$', 'zerver.views.initial_invite_page', name='initial-invite-users'),
# Unsubscription endpoint. Used for various types of e-mails (day 1 & 2,
# missed PMs, etc.)
url(r'^accounts/unsubscribe/(?P<type>[\w]+)/(?P<token>[\w]+)',
'zerver.views.email_unsubscribe'),
# Portico-styled page used to provide email confirmation of terms acceptance.
url(r'^accounts/accept_terms/$', 'zerver.views.accounts_accept_terms'),
# Terms of service and privacy policy
url(r'^terms/$', TemplateView.as_view(template_name='zerver/terms.html')),
url(r'^terms-enterprise/$', TemplateView.as_view(template_name='zerver/terms-enterprise.html')),
url(r'^privacy/$', TemplateView.as_view(template_name='zerver/privacy.html')),
# Login/registration
url(r'^register/$', 'zerver.views.accounts_home', name='register'),
url(r'^login/$', 'zerver.views.login_page', {'template_name': 'zerver/login.html'}),
# A registration page that passes through the domain, for totally open realms.
url(r'^register/(?P<domain>\S+)/$', 'zerver.views.accounts_home_with_domain'),
# API and integrations documentation
url(r'^api/$', TemplateView.as_view(template_name='zerver/api.html')),
url(r'^api/endpoints/$', 'zerver.views.api_endpoint_docs'),
url(r'^integrations/$', TemplateView.as_view(template_name='zerver/integrations.html')),
url(r'^apps/$', TemplateView.as_view(template_name='zerver/apps.html')),
url(r'^robots\.txt$', RedirectView.as_view(url='/static/robots.txt')),
# Landing page, features pages, signup form, etc.
url(r'^hello/$', TemplateView.as_view(template_name='zerver/hello.html'),
name='landing-page'),
url(r'^new-user/$', RedirectView.as_view(url='/hello')),
url(r'^features/$', TemplateView.as_view(template_name='zerver/features.html')),
)
# These are used for voyager development. On a real voyager instance,
# these files would be served by nginx.
if settings.DEVELOPMENT and settings.LOCAL_UPLOADS_DIR is not None:
urlpatterns += patterns('',
url(r'^user_avatars/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")}),
url(r'^user_uploads/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(settings.LOCAL_UPLOADS_DIR, "files")}),
)
urlpatterns += patterns('zerver.views',
# These are json format views used by the web client. They require a logged in browser.
url(r'^json/update_pointer$', 'json_update_pointer'),
url(r'^json/get_old_messages$', 'messages.json_get_old_messages'),
url(r'^json/get_public_streams$', 'json_get_public_streams'),
url(r'^json/rename_stream$', 'json_rename_stream'),
url(r'^json/make_stream_public$', 'json_make_stream_public'),
url(r'^json/make_stream_private$', 'json_make_stream_private'),
url(r'^json/send_message$', 'messages.json_send_message'),
url(r'^json/invite_users$', 'json_invite_users'),
url(r'^json/bulk_invite_users$', 'json_bulk_invite_users'),
url(r'^json/settings/change$', 'json_change_settings'),
url(r'^json/notify_settings/change$', 'json_change_notify_settings'),
url(r'^json/ui_settings/change$', 'json_change_ui_settings'),
url(r'^json/subscriptions/remove$', 'json_remove_subscriptions'),
url(r'^json/subscriptions/add$', 'json_add_subscriptions'),
url(r'^json/subscriptions/exists$', 'json_stream_exists'),
url(r'^json/subscriptions/property$', 'json_subscription_property'),
url(r'^json/get_subscribers$', 'json_get_subscribers'),
url(r'^json/fetch_api_key$', 'json_fetch_api_key'),
url(r'^json/update_active_status$', 'json_update_active_status'),
url(r'^json/get_active_statuses$', 'json_get_active_statuses'),
url(r'^json/tutorial_send_message$', 'json_tutorial_send_message'),
url(r'^json/tutorial_status$', 'json_tutorial_status'),
url(r'^json/change_enter_sends$', 'json_change_enter_sends'),
url(r'^json/get_profile$', 'json_get_profile'),
url(r'^json/report_error$', 'json_report_error'),
url(r'^json/report_send_time$', 'json_report_send_time'),
url(r'^json/report_narrow_time$', 'json_report_narrow_time'),
url(r'^json/report_unnarrow_time$', 'json_report_unnarrow_time'),
url(r'^json/update_message_flags$', 'messages.json_update_flags'),
url(r'^json/register$', 'json_events_register'),
url(r'^json/upload_file$', 'json_upload_file'),
url(r'^json/messages_in_narrow$', 'messages.json_messages_in_narrow'),
url(r'^json/update_message$', 'messages.json_update_message'),
url(r'^json/fetch_raw_message$', 'messages.json_fetch_raw_message'),
url(r'^json/refer_friend$', 'json_refer_friend'),
url(r'^json/set_alert_words$', 'json_set_alert_words'),
url(r'^json/set_muted_topics$', 'json_set_muted_topics'),
url(r'^json/set_avatar$', 'json_set_avatar'),
url(r'^json/time_setting$', 'json_time_setting'),
url(r'^json/left_side_userlist$', 'json_left_side_userlist'),
# This json format view is used by the LEGACY pre-REST API. It
# requires an API key.
url(r'^api/v1/send_message$', 'messages.api_send_message'),
# This json format view used by the mobile apps accepts a username
# password/pair and returns an API key.
url(r'^api/v1/fetch_api_key$', 'api_fetch_api_key'),
# These are integration-specific web hook callbacks
url(r'^api/v1/external/beanstalk$' , 'webhooks.api_beanstalk_webhook'),
url(r'^api/v1/external/github$', 'webhooks.api_github_landing'),
url(r'^api/v1/external/jira$', 'webhooks.api_jira_webhook'),
url(r'^api/v1/external/pivotal$', 'webhooks.api_pivotal_webhook'),
url(r'^api/v1/external/newrelic$', 'webhooks.api_newrelic_webhook'),
url(r'^api/v1/external/bitbucket$', 'webhooks.api_bitbucket_webhook'),
url(r'^api/v1/external/desk$', 'webhooks.api_deskdotcom_webhook'),
url(r'^api/v1/external/stash$', 'webhooks.api_stash_webhook'),
url(r'^api/v1/external/freshdesk$', 'webhooks.api_freshdesk_webhook'),
url(r'^api/v1/external/zendesk$', 'webhooks.api_zendesk_webhook'),
url(r'^api/v1/external/pagerduty$', 'webhooks.api_pagerduty_webhook'),
url(r'^user_uploads/(?P<realm_id>(\d*|unk))/(?P<filename>.*)', 'get_uploaded_file'),
)
# JSON format views used by the redesigned API, accept basic auth username:password.
v1_api_and_json_patterns = patterns('zerver.views',
url(r'^export$', 'rest_dispatch',
{'GET': 'export'}),
url(r'^streams$', 'rest_dispatch',
{'GET': 'get_streams_backend'}),
# GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404)
url(r'^streams/(?P<stream_name>.*)/members$', 'rest_dispatch',
{'GET': 'get_subscribers_backend'}),
url(r'^streams/(?P<stream_name>.*)$', 'rest_dispatch',
{'HEAD': 'stream_exists_backend',
'GET': 'stream_exists_backend',
'PATCH': 'update_stream_backend',
'DELETE': 'deactivate_stream_backend'}),
url(r'^users$', 'rest_dispatch',
{'GET': 'get_members_backend',
'POST': 'create_user_backend'}),
url(r'^users/me$', 'rest_dispatch',
{'GET': 'get_profile_backend'}),
url(r'^users/me/enter-sends$', 'rest_dispatch',
{'POST': 'json_change_enter_sends'}),
url(r'^users/me/pointer$', 'rest_dispatch',
{'GET': 'get_pointer_backend',
'PUT': 'update_pointer_backend'}),
# GET lists your streams, POST bulk adds, PATCH bulk modifies/removes
url(r'^users/me/subscriptions$', 'rest_dispatch',
{'GET': 'list_subscriptions_backend',
'POST': 'add_subscriptions_backend',
'PATCH': 'update_subscriptions_backend'}),
url(r'^users/me/alert_words$', 'rest_dispatch',
{'GET': 'list_alert_words',
'PUT': 'set_alert_words',
'PATCH': 'add_alert_words',
'DELETE': 'remove_alert_words'}),
url(r'^default_streams$', 'rest_dispatch',
{'PATCH': 'add_default_stream',
'DELETE': 'remove_default_stream'}),
url(r'^realm$', 'rest_dispatch',
{'PATCH': 'update_realm'}),
url(r'^users/me/api_key/regenerate$', 'rest_dispatch',
{'POST': 'regenerate_api_key'}),
url(r'^users/me/presence$', 'rest_dispatch',
{'POST': 'update_active_status_backend'}),
# Endpoint used by iOS devices to register their
# unique APNS device token
url(r'^users/me/apns_device_token$', 'rest_dispatch',
{'POST' : 'add_apns_device_token',
'DELETE': 'remove_apns_device_token'}),
url(r'^users/me/android_gcm_reg_id$', 'rest_dispatch',
{'POST': 'add_android_reg_id',
'DELETE': 'remove_android_reg_id'}),
url(r'^users/(?P<email>.*)/reactivate$', 'rest_dispatch',
{'POST': 'reactivate_user_backend'}),
url(r'^users/(?P<email>.*)$', 'rest_dispatch',
{'PATCH': 'update_user_backend',
'DELETE': 'deactivate_user_backend'}),
url(r'^bots$', 'rest_dispatch',
{'GET': 'get_bots_backend',
'POST': 'add_bot_backend'}),
url(r'^bots/(?P<email>.*)/api_key/regenerate$', 'rest_dispatch',
{'POST': 'regenerate_bot_api_key'}),
url(r'^bots/(?P<email>.*)$', 'rest_dispatch',
{'PATCH': 'patch_bot_backend',
'DELETE': 'deactivate_bot_backend'}),
url(r'^register$', 'rest_dispatch',
{'POST': 'api_events_register'}),
# Returns a 204, used by desktop app to verify connectivity status
url(r'generate_204$', 'generate_204'),
) + patterns('zerver.views.messages',
# GET returns messages, possibly filtered, POST sends a message
url(r'^messages$', 'rest_dispatch',
{'GET': 'get_old_messages_backend',
'PATCH': 'update_message_backend',
'POST': 'send_message_backend'}),
url(r'^messages/render$', 'rest_dispatch',
{'GET': 'render_message_backend'}),
url(r'^messages/flags$', 'rest_dispatch',
{'POST': 'update_message_flags'}),
) + patterns('zerver.tornadoviews',
url(r'^events$', 'rest_dispatch',
{'GET': 'get_events_backend',
'DELETE': 'cleanup_event_queue'}),
)
if not settings.VOYAGER:
v1_api_and_json_patterns += patterns('',
# Still scoped to api/v1/, but under a different project
url(r'^deployments/', include('zilencer.urls.api')),
)
urlpatterns += patterns('',
url(r'^', include('zilencer.urls.pages')),
)
urlpatterns += patterns('',
url(r'^', include('analytics.urls')),
)
urlpatterns += patterns('',
url(r'^', include('corporate.urls')),
)
urlpatterns += patterns('zerver.tornadoviews',
# Tornado views
url(r'^json/get_events$', 'json_get_events'),
# Used internally for communication between Django and Tornado processes
url(r'^notify_tornado$', 'notify'),
)
# Include the dual-use patterns twice
urlpatterns += patterns('',
url(r'^api/v1/', include(v1_api_and_json_patterns)),
url(r'^json/', include(v1_api_and_json_patterns)),
)
if settings.DEVELOPMENT:
use_prod_static = getattr(settings, 'PIPELINE', False)
static_root = os.path.join(settings.DEPLOY_ROOT,
'prod-static/serve' if use_prod_static else 'static')
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': static_root}))
| apache-2.0 |
dezelin/scons | scons-local/SCons/Variables/EnumVariable.py | 3 | 3785 | """engine.SCons.Variables.EnumVariable
This file defines the option type for SCons allowing only specified
input-values.
Usage example:
opts = Variables()
opts.Add(EnumVariable('debug', 'debug output and symbols', 'no',
allowed_values=('yes', 'no', 'full'),
map={}, ignorecase=2))
...
if env['debug'] == 'full':
...
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/EnumVariable.py 2014/07/05 09:42:21 garyo"
__all__ = ['EnumVariable',]
import SCons.Errors
def _validator(key, val, env, vals):
if not val in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s. Valid values are: %s' % (key, val, vals))
def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe a option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Variables.Add().
'key' and 'default' are the values to be passed on to Variables.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (eg. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and
the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed
values. The 'converter' converts input values according to the
given 'map'-dictionary (unmapped input values are returned
unchanged).
"""
help = '%s (%s)' % (help, '|'.join(allowed_values))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env: \
_validator(key, val.lower(), env, allowed_values)
else:
validator = lambda key, val, env: \
_validator(key, val, env, allowed_values)
# define converter
if ignorecase == 2:
converter = lambda val: map.get(val.lower(), val).lower()
elif ignorecase == 1:
converter = lambda val: map.get(val.lower(), val)
else:
converter = lambda val: map.get(val, val)
return (key, help, default, validator, converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
Tehsmash/nova | nova/tests/unit/virt/xenapi/test_volumeops.py | 65 | 24052 | # Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VolumeOpsTestBase, self).setUp()
self._setup_mock_volumeops()
def _setup_mock_volumeops(self):
self.session = stubs.FakeSessionForVolumeTests('fake_uri')
self.ops = volumeops.VolumeOps(self.session)
class VolumeDetachTestCase(VolumeOpsTestBase):
def test_detach_volume_call(self):
registered_calls = []
def regcall(label):
def side_effect(*args, **kwargs):
registered_calls.append(label)
return side_effect
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
volumeops.volume_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
regcall('destroy_vbd'))
volumeops.volume_utils.find_sr_from_vbd(
'session', 'vbdref').WithSideEffects(
regcall('find_sr_from_vbd')).AndReturn('srref')
volumeops.volume_utils.purge_sr('session', 'srref')
self.mox.ReplayAll()
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
self.assertEqual(
['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = "vbd_ref"
self.ops.detach_volume({}, "name", "/dev/xvdd")
mock_vm.assert_called_once_with(self.session, "name")
mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = None
self.ops.detach_volume({}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_raises(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops.detach_volume, {}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = False
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
mock_shutdown.assert_called_once_with(self.session, "vm_ref")
mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
mock_destroy.assert_called_once_with(self.session, "vbd_ref")
mock_purge.assert_called_once_with(self.session, "sr_ref")
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = True
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
expected = [mock.call(self.session, "vbd_ref_1"),
mock.call(self.session, "vbd_ref_2")]
self.assertEqual(expected, mock_destroy.call_args_list)
mock_purge.assert_called_with(self.session, "sr_ref")
self.assertFalse(mock_unplug.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = []
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = ["1"]
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_detach.assert_called_once_with("vm_ref", ["1"])
def test_get_all_volume_vbd_refs_no_vbds(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = []
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
self.assertFalse(mock_conf.called)
def test_get_all_volume_vbd_refs_no_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1"]
mock_conf.return_value = {}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
mock_conf.assert_called_once_with("1")
def test_get_all_volume_vbd_refs_with_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1", "2"]
mock_conf.return_value = {"osvol": True}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual(["1", "2"], list(result))
mock_get.assert_called_once_with("vm_ref")
class AttachVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda")
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
True)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
False)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
self.ops.connect_volume({})
mock_attach.assert_called_once_with({})
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
self.assertFalse(mock_attach.called)
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.return_value = "vdi_ref"
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info, "vm_ref",
"name", 2, True)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, "name")
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
True)
@mock.patch.object(volume_utils, "forget_sr")
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver, mock_forget):
connection_info = {"data": {}}
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops._attach_volume, connection_info)
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_forget.assert_called_once_with(self.session, "sr_ref")
self.assertFalse(mock_attach.called)
def test_check_is_supported_driver_type_pass_iscsi(self):
conn_info = {"driver_volume_type": "iscsi"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_xensm(self):
conn_info = {"driver_volume_type": "xensm"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_bad(self):
conn_info = {"driver_volume_type": "bad"}
self.assertRaises(exception.VolumeDriverNotFound,
self.ops._check_is_supported_driver_type, conn_info)
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = None
mock_introduce_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
mock_introduce_sr.assert_called_once_with(self.session, "uuid",
"label", "params")
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
self.assertFalse(mock_introduce_sr.called)
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_regular(self, mock_intro):
mock_intro.return_value = "vdi"
result = self.ops._connect_hypervisor_to_volume("sr", {})
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"vdi_uuid": "id"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
vdi_uuid="id")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_lun(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"target_lun": "lun"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
target_lun="lun")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = False
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
mock_plug.assert_called_once_with("vbd", "vm")
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = True
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
self.assertFalse(mock_shutdown.called)
class FindBadVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_vbds(self, mock_get_all):
mock_get_all.return_value = []
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["1", "2"]
mock_find_sr.return_value = "sr_ref"
with mock.patch.object(self.session.SR, "scan") as mock_scan:
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
expected_find = [mock.call(self.session, "1"),
mock.call(self.session, "2")]
self.assertEqual(expected_find, mock_find_sr.call_args_list)
expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
self.assertEqual(expected_scan, mock_scan.call_args_list)
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_scan.assert_called_once_with("sr_ref")
mock_get.assert_called_once_with("vbd_ref")
self.assertEqual(["/dev/xvdb"], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['foo', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
self.assertRaises(FakeException,
self.ops.find_bad_volumes, "vm_ref")
mock_scan.assert_called_once_with("sr_ref")
class CleanupFromVDIsTestCase(VolumeOpsTestBase):
def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs):
find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
in vdi_refs]
find_sr_from_vdi.assert_has_calls(find_sr_calls)
purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
in sr_refs]
purge_sr.assert_has_calls(purge_sr_calls)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi',
side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref2']
find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
sr_refs[0]]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
purge_sr.side_effects = [test.TestingException, None]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
| apache-2.0 |
ORTI3D/ORTI3D_code | iliblast/ui_parameters.py | 1 | 3221 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_parameters.ui'
#
# Created: Sun Feb 16 10:03:57 2014
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
from parameters import BaseParms
class Ui_Parameters(object):
def setupUi(self, Parameters,gui,core):
self.gui,self.core =gui, core
self.Base = BaseParms(gui,core)
Parameters.setObjectName(_fromUtf8("Parameters"))
Parameters.resize(197, 348)
Parameters.setWindowTitle(QtGui.QApplication.translate("Parameters", "Parameters", None, QtGui.QApplication.UnicodeUTF8))
self.dictBox={}
skey = self.Base.groups.keys(); skey.sort()
i=0
for g in skey:
self.dictBox[g] = Box(Parameters,self,g,i);i+=1
self.retranslateUi(Parameters)
QtCore.QMetaObject.connectSlotsByName(Parameters)
def retranslateUi(self, Parameters): pass
class Box:
def __init__(self,Parameters,parent,gr,nb):
self.box = QtGui.QGroupBox(Parameters)
self.parent = parent
y0=20+nb*60
self.box.setGeometry(QtCore.QRect(10, y0, 170, y0+40))
self.box.setTitle(QtGui.QApplication.translate("Parameters", gr, None, QtGui.QApplication.UnicodeUTF8))
self.box.setObjectName(_fromUtf8(gr))
self.hlWidget = QtGui.QWidget(self.box)
self.hlWidget.setGeometry(QtCore.QRect(9, 15, 158, 28))
self.hlWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.hl = QtGui.QHBoxLayout(self.hlWidget)
self.hl.setMargin(0)
self.hl.setObjectName(_fromUtf8("horizontalLayout"))
for i in range(len(parent.Base.groups[gr])):
n=parent.Base.groups[gr][i]
shortName = gr[2:4]+'_'+n
# tries to find buttons in addins
butA = parent.core.addin.addButton(self,gr,i) # a list of buttons
if butA !=None:
for short,name in butA :
buta = QtGui.QPushButton(self.hlWidget)
buta.setText(QtGui.QApplication.translate("Parameters", short, None, QtGui.QApplication.UnicodeUTF8))
buta.setObjectName(_fromUtf8(name))
self.hl.addWidget(buta)
buta.clicked.connect(self.onButton)
but = QtGui.QPushButton(self.hlWidget)
but.setToolTip(QtGui.QApplication.translate("Parameters", n, None, QtGui.QApplication.UnicodeUTF8))
but.setText(QtGui.QApplication.translate("Parameters", n, None, QtGui.QApplication.UnicodeUTF8))
# icon = QtGui.QIcon()
#icon.addPixmap(QtGui.QPixmap(_fromUtf8("F:/iPHT3D/Lib2_b/utils/Ch_P.gif")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
#but.setIcon(icon)
but.setObjectName(_fromUtf8(n))
but.clicked.connect(self.onButton)
self.hl.addWidget(but)
def onButton(self):
s = self.parent.gui.sender()
name = s.objectName()
self.parent.Base.action(name)
| gpl-3.0 |
t0in4/django | tests/generic_views/test_edit.py | 199 | 19217 | from __future__ import unicode_literals
import warnings
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.client import RequestFactory
from django.utils.deprecation import RemovedInDjango110Warning
from django.views.generic.base import View
from django.views.generic.edit import CreateView, FormMixin, ModelFormMixin
from . import views
from .models import Artist, Author
from .test_forms import AuthorForm
class FormMixinTests(SimpleTestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
def test_get_prefix(self):
""" Test prefix can be set (see #18872) """
test_string = 'test'
rf = RequestFactory()
get_request = rf.get('/')
class TestFormMixin(FormMixin):
request = get_request
default_kwargs = TestFormMixin().get_form_kwargs()
self.assertIsNone(default_kwargs.get('prefix'))
set_mixin = TestFormMixin()
set_mixin.prefix = test_string
set_kwargs = set_mixin.get_form_kwargs()
self.assertEqual(test_string, set_kwargs.get('prefix'))
def test_get_form(self):
class TestFormMixin(FormMixin):
request = RequestFactory().get('/')
self.assertIsInstance(
TestFormMixin().get_form(forms.Form), forms.Form,
'get_form() should use provided form class.'
)
class FormClassTestFormMixin(TestFormMixin):
form_class = forms.Form
self.assertIsInstance(
FormClassTestFormMixin().get_form(), forms.Form,
'get_form() should fallback to get_form_class() if none is provided.'
)
def test_get_form_missing_form_class_default_value(self):
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always')
class MissingDefaultValue(FormMixin):
request = RequestFactory().get('/')
form_class = forms.Form
def get_form(self, form_class):
return form_class(**self.get_form_kwargs())
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, RemovedInDjango110Warning)
self.assertEqual(
str(w[0].message),
'`generic_views.test_edit.MissingDefaultValue.get_form` method '
'must define a default value for its `form_class` argument.'
)
self.assertIsInstance(
MissingDefaultValue().get_form(), forms.Form,
)
def test_get_context_data(self):
class FormContext(FormMixin):
request = RequestFactory().get('/')
form_class = forms.Form
self.assertIsInstance(FormContext().get_context_data()['form'], forms.Form)
@override_settings(ROOT_URLCONF='generic_views.urls')
class BasicFormTests(TestCase):
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, '/list/authors/')
class ModelFormMixinTests(SimpleTestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get('/')
self.assertEqual({'initial': {}, 'prefix': None},
mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF='generic_views.urls')
class CreateViewTests(TestCase):
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, '/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
@ignore_warnings(category=RemovedInDjango110Warning)
def test_create_with_interpolated_redirect(self):
res = self.client.post(
'/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, '/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/authors/create/interpolate_redirect_nonascii/',
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, '/%C3%A9dit/author/{}/update/'.format(pk))
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
def test_create_view_without_explicit_fields(self):
class MyCreateView(CreateView):
model = Author
message = (
"Using ModelFormMixin (base class of MyCreateView) without the "
"'fields' attribute is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
def test_define_both_fields_and_form_class(self):
class MyCreateView(CreateView):
model = Author
form_class = AuthorForm
fields = ['name']
message = "Specifying both 'fields' and 'form_class' is not permitted."
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
@override_settings(ROOT_URLCONF='generic_views.urls')
class UpdateViewTests(TestCase):
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
@ignore_warnings(category=RemovedInDjango110Warning)
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, '/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect_nonascii/' % a.pk,
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, '/%C3%A9dit/author/{}/update/'.format(pk))
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@override_settings(ROOT_URLCONF='generic_views.urls')
class DeleteViewTests(TestCase):
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
@ignore_warnings(category=RemovedInDjango110Warning)
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
# Also test with escaped chars in URL
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/{}/delete/interpolate_redirect_nonascii/'.format(a.pk))
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/%C3%A9dit/authors/create/?deleted={}'.format(a.pk))
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/delete/naive/' % a.pk)
| bsd-3-clause |
tmtowtdi/MontyLacuna | t/bldgs/intelligence.py | 1 | 2327 |
import os, sys
bindir = os.path.abspath(os.path.dirname(sys.argv[0]))
libdir = bindir + "/../../lib"
sys.path.append(libdir)
import lacuna as lac
from lacuna.exceptions import CaptchaResponseError
glc = lac.clients.Member(
config_file = bindir + "/../../etc/lacuna.cfg",
#config_section = 'my_sitter',
config_section = 'play_test',
)
my_planet = glc.get_body_byname( 'bmots rof 2.1' )
int = my_planet.get_building_coords( -5, -2 )
### Show building status
###
#view = int.view()
#print( "We have {} of a max {} spies, and {} are in training, which takes {} seconds."
# .format(view.current, view.maximum, view.in_training, view.training_costs['time'])
#)
### View spies' details, paginated
###
#spies = int.view_spies( 2 ) # look at spies on page 2
#spy = spies[0]
#print( "Your first spy is based from {}, assigned to {}, and is doing task {}."
# .format( spy.based_from.name, spy.assigned_to.name, spy.assignment)
#)
### View all spies' details, non-paginated
###
#spies = int.view_all_spies()
#spy = spies[0]
#print( "Your first spy is based from {}, assigned to {}, and is doing task {}."
# .format( spy.based_from.name, spy.assigned_to.name, spy.assignment)
#)
### Burn a spy
###
#spies = int.view_all_spies()
#last_spy = spies[-1]
#int.burn_spy( last_spy.id )
#print("Spy", last_spy.name, "has been burned.")
### Train a new spy to replace the burned one
###
#rv = int.train_spy( )
#print( rv['trained'], "new spies are being trained." )
### Subsidize that training
###
#int.subsidize_training( )
#print( "Spy training has been subsidized" )
### Rename the last spy in the list
###
#spies = int.view_all_spies()
#int.name_spy( spies[-1].id, "FlurbleBlargle" )
### Assign a spy to a task
### Assigns to the last spy in the list. He should be available if you
### followed the rest of the tests in here; we just trained him and subsidized
### his training. Change the "-1" subscript as needed if your last spy is not
### available.
#spies = int.view_all_spies()
#spy, result = int.assign_spy( spies[-1].id, 'Counter Espionage' )
#print( "The result of assigning spy {} to mission {} was {} because {}."
# .format(spy.name, spy.assignment, result.result, result.reason )
#)
### So we can see all prompts for captchas on subsequent runs.
glc.logout()
| mit |
elkingtonmcb/pattern | pattern/server/cherrypy/cherrypy/_cpwsgi.py | 41 | 16040 | """WSGI interface (see PEP 333 and 3333).
Note that WSGI environ keys and values are 'native strings'; that is,
whatever the type of "" is. For Python 2, that's a byte string; for Python 3,
it's a unicode string. But PEP 3333 says: "even if Python's str type is
actually Unicode "under the hood", the content of native strings must
still be translatable to bytes via the Latin-1 encoding!"
"""
import sys as _sys
import cherrypy as _cherrypy
from cherrypy._cpcompat import BytesIO, bytestr, ntob, ntou, py3k, unicodestr
from cherrypy import _cperror
from cherrypy.lib import httputil
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ."""
env1x = {}
url_encoding = environ[ntou('wsgi.url_encoding')]
for k, v in list(environ.items()):
if k in [ntou('PATH_INFO'), ntou('SCRIPT_NAME'), ntou('QUERY_STRING')]:
v = v.encode(url_encoding)
elif isinstance(v, unicodestr):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example::
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(RootApp,
domains={'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
})
cherrypy.tree.graft(vhost)
"""
default = None
"""Required. The default WSGI application."""
use_x_forwarded_host = True
"""If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying."""
domains = {}
"""A dict of {host header value: application} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding WSGI application
will be called instead of the default. Note that you often need
separate entries for "example.com" and "www.example.com".
In addition, "Host" headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get("HTTP_X_FORWARDED_HOST", domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect:
ir = _sys.exc_info()[1]
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
# Add the *previous* path_info + qs to redirections.
old_uri = sn + path
if qs:
old_uri += "?" + qs
redirections.append(old_uri)
if not self.recursive:
# Check to see if the new URI has been redirected to already
new_uri = sn + ir.path
if ir.query_string:
new_uri += "?" + ir.query_string
if new_uri in redirections:
ir.request.close()
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % new_uri)
# Munge the environment and try again.
environ['REQUEST_METHOD'] = "GET"
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = BytesIO()
environ['CONTENT_LENGTH'] = "0"
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
"""WSGI middleware that traps exceptions."""
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(self.nextapp, environ, start_response, self.throws)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(self.nextapp, self.environ, self.start_response)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
if py3k:
def __next__(self):
return self.trap(next, self.iter_response)
else:
def next(self):
return self.trap(self.iter_response.next)
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except:
tb = _cperror.format_exc()
#print('trapped (started %s):' % self.started_response, tb)
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ""
s, h, b = _cperror.bare_error(tb)
if py3k:
# What fun.
s = s.decode('ISO-8859-1')
h = [(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in h]
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return ntob("").join(b)
else:
return b
# WSGI-to-CP Adapter #
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
self.cpapp = cpapp
try:
if not py3k:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
environ = downgrade_wsgi_ux_to_1x(environ)
self.environ = environ
self.run()
r = _cherrypy.serving.response
outstatus = r.output_status
if not isinstance(outstatus, bytestr):
raise TypeError("response.output_status is not a byte string.")
outheaders = []
for k, v in r.header_list:
if not isinstance(k, bytestr):
raise TypeError("response.header_list key %r is not a byte string." % k)
if not isinstance(v, bytestr):
raise TypeError("response.header_list value %r is not a byte string." % v)
outheaders.append((k, v))
if py3k:
# According to PEP 3333, when using Python 3, the response status
# and headers must be bytes masquerading as unicode; that is, they
# must be of type "str" but are restricted to code points in the
# "latin-1" set.
outstatus = outstatus.decode('ISO-8859-1')
outheaders = [(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in outheaders]
self.iter_response = iter(r.body)
self.write = start_response(outstatus, outheaders)
except:
self.close()
raise
def __iter__(self):
return self
if py3k:
def __next__(self):
return next(self.iter_response)
else:
def next(self):
return self.iter_response.next()
def close(self):
"""Close and de-reference the current request and response. (Core)"""
self.cpapp.release_serving()
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host('', int(env('SERVER_PORT', 80)),
env('SERVER_NAME', ''))
remote = httputil.Host(env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1) or -1),
env('REMOTE_HOST', ''))
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', "HTTP/1.1")
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''))
qs = self.environ.get('QUERY_STRING', '')
if py3k:
# This isn't perfect; if the given PATH_INFO is in the wrong encoding,
# it may fail to match the appropriate config section URI. But meh.
old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1')
new_enc = self.cpapp.find_config(self.environ.get('PATH_INFO', ''),
"request.uri_encoding", 'utf-8')
if new_enc.lower() != old_enc.lower():
# Even though the path and qs are unicode, the WSGI server is
# required by PEP 3333 to coerce them to ISO-8859-1 masquerading
# as unicode. So we have to encode back to bytes and then decode
# again using the "correct" encoding.
try:
u_path = path.encode(old_enc).decode(new_enc)
u_qs = qs.encode(old_enc).decode(new_enc)
except (UnicodeEncodeError, UnicodeDecodeError):
# Just pass them through without transcoding and hope.
pass
else:
# Only set transcoded values if they both succeed.
path = u_path
qs = u_qs
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr',
}
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
# We assume all incoming header keys are uppercase already.
if cgiName in self.headerNames:
yield self.headerNames[cgiName], environ[cgiName]
elif cgiName[:5] == "HTTP_":
# Hackish attempt at recovering original header names.
translatedHeader = cgiName[5:].replace("_", "-")
yield translatedHeader, environ[cgiName]
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application."""
pipeline = [('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
"""A list of (name, wsgiapp) pairs. Each 'wsgiapp' MUST be a
constructor that takes an initial, positional 'nextapp' argument,
plus optional keyword arguments, and returns a WSGI application
(that takes environ and start_response arguments). The 'name' can
be any you choose, and will correspond to keys in self.config."""
head = None
"""Rather than nest all apps in the pipeline on each call, it's only
done the first time, and the result is memoized into self.head. Set
this to None again if you change self.pipeline after calling self."""
config = {}
"""A dict whose keys match names listed in the pipeline. Each
value is a further dict which will be passed to the corresponding
named WSGI callable (from the pipeline) as keyword arguments."""
response_class = AppResponse
"""The class to instantiate and return as the next app in the WSGI chain."""
def __init__(self, cpapp, pipeline=None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead,
so that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
# Create and nest the WSGI apps in our pipeline (in reverse order).
# Then memoize the result in self.head.
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == "pipeline":
# Note this allows multiple 'wsgi.pipeline' config entries
# (but each entry will be processed in a 'random' order).
# It should also allow developers to set default middleware
# in code (passed to self.__init__) that deployers can add to
# (but not remove) via config.
self.pipeline.extend(v)
elif k == "response_class":
self.response_class = v
else:
name, arg = k.split(".", 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
| bsd-3-clause |
hgl888/chromium-crosswalk-efl | build/android/generate_emma_html.py | 96 | 2874 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Aggregates EMMA coverage files to produce html output."""
import fnmatch
import json
import optparse
import os
import sys
from pylib import cmd_helper
from pylib import constants
def _GetFilesWithExt(root_dir, ext):
"""Gets all files with a given extension.
Args:
root_dir: Directory in which to search for files.
ext: Extension to look for (including dot)
Returns:
A list of absolute paths to files that match.
"""
files = []
for root, _, filenames in os.walk(root_dir):
basenames = fnmatch.filter(filenames, '*.' + ext)
files.extend([os.path.join(root, basename)
for basename in basenames])
return files
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--output', help='HTML output filename.')
option_parser.add_option('--coverage-dir', default=None,
help=('Root of the directory in which to search for '
'coverage data (.ec) files.'))
option_parser.add_option('--metadata-dir', default=None,
help=('Root of the directory in which to search for '
'coverage metadata (.em) files.'))
option_parser.add_option('--cleanup', action='store_true',
help=('If set, removes coverage files generated at '
'runtime.'))
options, _ = option_parser.parse_args()
if not (options.coverage_dir and options.metadata_dir and options.output):
option_parser.error('One or more mandatory options are missing.')
coverage_files = _GetFilesWithExt(options.coverage_dir, 'ec')
metadata_files = _GetFilesWithExt(options.metadata_dir, 'em')
print 'Found coverage files: %s' % str(coverage_files)
print 'Found metadata files: %s' % str(metadata_files)
sources = []
for f in metadata_files:
sources_file = os.path.splitext(f)[0] + '_sources.txt'
with open(sources_file, 'r') as sf:
sources.extend(json.load(sf))
sources = [os.path.join(constants.DIR_SOURCE_ROOT, s) for s in sources]
print 'Sources: %s' % sources
input_args = []
for f in coverage_files + metadata_files:
input_args.append('-in')
input_args.append(f)
output_args = ['-Dreport.html.out.file', options.output]
source_args = ['-sp', ','.join(sources)]
exit_code = cmd_helper.RunCmd(
['java', '-cp',
os.path.join(constants.ANDROID_SDK_ROOT, 'tools', 'lib', 'emma.jar'),
'emma', 'report', '-r', 'html']
+ input_args + output_args + source_args)
if options.cleanup:
for f in coverage_files:
os.remove(f)
return exit_code
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
coreynicholson/youtube-dl | youtube_dl/extractor/ruv.py | 49 | 3359 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
unified_timestamp,
)
class RuvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ruv\.is/(?:sarpurinn/[^/]+|node)/(?P<id>[^/]+(?:/\d+)?)'
_TESTS = [{
# m3u8
'url': 'http://ruv.is/sarpurinn/ruv-aukaras/fh-valur/20170516',
'md5': '66347652f4e13e71936817102acc1724',
'info_dict': {
'id': '1144499',
'display_id': 'fh-valur/20170516',
'ext': 'mp4',
'title': 'FH - Valur',
'description': 'Bein útsending frá 3. leik FH og Vals í úrslitum Olísdeildar karla í handbolta.',
'timestamp': 1494963600,
'upload_date': '20170516',
},
}, {
# mp3
'url': 'http://ruv.is/sarpurinn/ras-2/morgunutvarpid/20170619',
'md5': '395ea250c8a13e5fdb39d4670ef85378',
'info_dict': {
'id': '1153630',
'display_id': 'morgunutvarpid/20170619',
'ext': 'mp3',
'title': 'Morgunútvarpið',
'description': 'md5:a4cf1202c0a1645ca096b06525915418',
'timestamp': 1497855000,
'upload_date': '20170619',
},
}, {
'url': 'http://ruv.is/sarpurinn/ruv/frettir/20170614',
'only_matching': True,
}, {
'url': 'http://www.ruv.is/node/1151854',
'only_matching': True,
}, {
'url': 'http://ruv.is/sarpurinn/klippa/secret-soltice-hefst-a-morgun',
'only_matching': True,
}, {
'url': 'http://ruv.is/sarpurinn/ras-1/morgunvaktin/20170619',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
FIELD_RE = r'video\.%s\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'
media_url = self._html_search_regex(
FIELD_RE % 'src', webpage, 'video URL', group='url')
video_id = self._search_regex(
r'<link\b[^>]+\bhref=["\']https?://www\.ruv\.is/node/(\d+)',
webpage, 'video id', default=display_id)
ext = determine_ext(media_url)
if ext == 'm3u8':
formats = self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
elif ext == 'mp3':
formats = [{
'format_id': 'mp3',
'url': media_url,
'vcodec': 'none',
}]
else:
formats = [{
'url': media_url,
}]
description = self._og_search_description(webpage, default=None)
thumbnail = self._og_search_thumbnail(
webpage, default=None) or self._search_regex(
FIELD_RE % 'poster', webpage, 'thumbnail', fatal=False)
timestamp = unified_timestamp(self._html_search_meta(
'article:published_time', webpage, 'timestamp', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'formats': formats,
}
| unlicense |
Parisson/TimeSide | timeside/plugins/grapher/waveform_transparent.py | 1 | 2582 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2010 Guillaume Pellerin <[email protected]>
# Copyright (c) 2010 Olivier Guilyardi <[email protected]>
# This file is part of TimeSide.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from timeside.core import implements, interfacedoc
from timeside.core.api import IGrapher
from timeside.plugins.grapher.waveform_simple import Waveform
from . utils import peaks
class WaveformTransparent(Waveform):
"""Transparent waveform.
"""
implements(IGrapher)
@interfacedoc
def __init__(self, width=1024, height=256, bg_color=None,
color_scheme='default'):
super(WaveformTransparent, self).__init__(
width, height, bg_color, color_scheme)
self.line_color = (255, 255, 255)
@staticmethod
@interfacedoc
def id():
return "waveform_transparent"
@staticmethod
@interfacedoc
def name():
return "Waveform transparent"
@staticmethod
@interfacedoc
def version():
return "1.0"
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None,
totalframes=None):
super(WaveformTransparent, self).setup(
channels, samplerate, blocksize, totalframes)
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
buffer = frames[:, 0]
buffer.shape = (len(buffer), 1)
for samples, end in self.pixels_adapter.process(buffer, eod):
if self.pixel_cursor < self.image_width - 1:
self.draw_peaks_inverted(
self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
if self.pixel_cursor == self.image_width - 1:
self.draw_peaks_inverted(
self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
return frames, eod
| agpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/django/db/backends/mysql/validation.py | 47 | 2611 | from django.core import checks
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils.version import get_docs_version
class DatabaseValidation(BaseDatabaseValidation):
def check(self, **kwargs):
issues = super(DatabaseValidation, self).check(**kwargs)
issues.extend(self._check_sql_mode(**kwargs))
return issues
def _check_sql_mode(self, **kwargs):
with self.connection.cursor() as cursor:
cursor.execute("SELECT @@sql_mode")
sql_mode = cursor.fetchone()
modes = set(sql_mode[0].split(',') if sql_mode else ())
if not (modes & {'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES'}):
return [checks.Warning(
"MySQL Strict Mode is not set for database connection '%s'" % self.connection.alias,
hint="MySQL's Strict Mode fixes many data integrity problems in MySQL, "
"such as data truncation upon insertion, by escalating warnings into "
"errors. It is strongly recommended you activate it. See: "
"https://docs.djangoproject.com/en/%s/ref/databases/#mysql-sql-mode"
% (get_docs_version(),),
id='mysql.W002',
)]
return []
def check_field(self, field, **kwargs):
"""
MySQL has the following field length restriction:
No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
"""
errors = super(DatabaseValidation, self).check_field(field, **kwargs)
# Ignore any related fields.
if getattr(field, 'remote_field', None):
return errors
# Ignore fields with unsupported features.
db_supports_all_required_features = all(
getattr(self.connection.features, feature, False)
for feature in field.model._meta.required_db_features
)
if not db_supports_all_required_features:
return errors
field_type = field.db_type(self.connection)
# Ignore non-concrete fields.
if field_type is None:
return errors
if (field_type.startswith('varchar') and field.unique and
(field.max_length is None or int(field.max_length) > 255)):
errors.append(
checks.Error(
'MySQL does not allow unique CharFields to have a max_length > 255.',
obj=field,
id='mysql.E001',
)
)
return errors
| mit |
philipdexter/rain | rain/lexer.py | 1 | 3765 | from . import error as Q
from . import module as M
from .token import bool_token
from .token import coord
from .token import dedent_token
from .token import end_token
from .token import float_token
from .token import indent_token
from .token import int_token
from .token import keyword_token
from .token import name_token
from .token import newline_token
from .token import null_token
from .token import operator_token
from .token import string_token
from .token import symbol_token
from .token import table_token
from collections import OrderedDict
import re
OPERATORS = (
'->', '::',
'<=', '>=', '>', '<', '==', '!=',
'*', '/', '+', '-',
'&', '|', '!', '$',
)
KW_OPERATORS = (
)
KEYWORDS = (
'as', 'break', 'catch', 'continue', 'else', 'export', 'for', 'foreign',
'func', 'if', 'import', 'in', 'var', 'library', 'link', 'loop', 'macro',
'pass', 'return', 'save', 'until', 'while', 'with',
)
def factory(data, *, pos=coord()):
if data.lower() in KEYWORDS:
return keyword_token(data.lower(), pos=pos)
elif data.lower() in KW_OPERATORS:
return operator_token(data.lower(), pos=pos)
else:
return name_token(M.normalize_name(data), pos=pos)
raw = OrderedDict()
raw[r'#.*'] = None
raw[r'""|"(.*?[^\\])"'] = string_token
raw[r'(?:0|-?[1-9][0-9]*)\.[0-9]+'] = float_token
raw[r'0|-?[1-9][0-9]*'] = int_token
raw[r'true|false'] = bool_token
raw[r'null'] = null_token
raw[r'table'] = table_token
raw[r'[a-zA-Z_][a-zA-Z0-9_]*'] = factory
raw['|'.join(re.escape(x) for x in OPERATORS)] = operator_token
raw[r'.'] = symbol_token
rules = OrderedDict()
for k, v in raw.items():
rules[re.compile(k)] = v
indent = re.compile('^[ ]*')
ignore_whitespace = []
def stream(source):
indents = [0]
line = 1
col = 1
def skip(amt):
nonlocal source, col
source = source[amt:]
col += amt
last = None
while source:
if source[0] == '\n':
# skip repeated newlines
while source and source[0] == '\n':
skip(1)
col = 1
line += 1
# get this line's indentation
depth = indent.match(source)
depth_amt = len(depth.group(0))
# skip this line if it was just an indentation
if source and source[depth_amt] == '\n':
skip(1)
col = 1
line += 1
continue
# handle indents
if not ignore_whitespace:
if depth_amt > indents[-1]:
last = indent_token(pos=coord(line, col, len=depth_amt))
yield last
indents.append(depth_amt)
# handle newlines at the same indentation
else:
if not isinstance(last, (type(None), indent_token, newline_token)):
last = newline_token(pos=coord(line, col))
yield last
# handle dedents
while depth_amt < indents[-1]:
last = newline_token(pos=coord(line, col))
yield dedent_token(pos=coord(line, col))
yield last
del indents[-1]
skip(depth_amt)
if not source:
break
# skip internal whitespace
if source[0].isspace():
skip(1)
continue
# tokenize
for rule, kind in rules.items():
match = rule.match(source)
if match:
value = match.group(0)
if kind:
last = kind(value, pos=coord(line, col, len=len(value)))
if last in (symbol_token('['), symbol_token('{'), symbol_token('(')):
ignore_whitespace.append(True)
elif last in (symbol_token(']'), symbol_token('}'), symbol_token(')')):
if ignore_whitespace:
ignore_whitespace.pop()
else:
Q.abort('unmatched brace', pos=coord(line, col))
yield last
skip(len(value))
break
yield end_token(pos=coord(line, col))
| mit |
amghost/myblog | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/formatter.py | 198 | 2921 | # -*- coding: utf-8 -*-
"""
pygments.formatter
~~~~~~~~~~~~~~~~~~
Base formatter class.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import codecs
from pygments.util import get_bool_opt
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
if isinstance(style, basestring):
return get_style_by_name(style)
return style
class Formatter(object):
"""
Converts a token stream to text.
Options accepted:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Name of the formatter
name = None
#: Shortcuts for the formatter
aliases = []
#: fn match rules
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
if self.encoding == 'guess':
# can happen for pygmentize -O encoding=guess
self.encoding = 'utf-8'
self.encoding = options.get('outencoding', None) or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
Return the style definitions for the current style as a string.
``arg`` is an additional argument whose meaning depends on the
formatter used. Note that ``arg`` can also be a list or tuple
for some formatters like the html formatter.
"""
return ''
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile)
| mit |
Oslandia/vizitown_plugin | twisted/words/xish/xmlstream.py | 49 | 8526 | # -*- test-case-name: twisted.words.test.test_xmlstream -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XML Stream processing.
An XML Stream is defined as a connection over which two XML documents are
exchanged during the lifetime of the connection, one for each direction. The
unit of interaction is a direct child element of the root element (stanza).
The most prominent use of XML Streams is Jabber, but this module is generically
usable. See Twisted Words for Jabber specific protocol support.
Maintainer: Ralph Meijer
"""
from twisted.python import failure
from twisted.internet import protocol
from twisted.words.xish import domish, utility
STREAM_CONNECTED_EVENT = intern("//event/stream/connected")
STREAM_START_EVENT = intern("//event/stream/start")
STREAM_END_EVENT = intern("//event/stream/end")
STREAM_ERROR_EVENT = intern("//event/stream/error")
class XmlStream(protocol.Protocol, utility.EventDispatcher):
""" Generic Streaming XML protocol handler.
This protocol handler will parse incoming data as XML and dispatch events
accordingly. Incoming stanzas can be handled by registering observers using
XPath-like expressions that are matched against each stanza. See
L{utility.EventDispatcher} for details.
"""
def __init__(self):
utility.EventDispatcher.__init__(self)
self.stream = None
self.rawDataOutFn = None
self.rawDataInFn = None
def _initializeStream(self):
""" Sets up XML Parser. """
self.stream = domish.elementStream()
self.stream.DocumentStartEvent = self.onDocumentStart
self.stream.ElementEvent = self.onElement
self.stream.DocumentEndEvent = self.onDocumentEnd
### --------------------------------------------------------------
###
### Protocol events
###
### --------------------------------------------------------------
def connectionMade(self):
""" Called when a connection is made.
Sets up the XML parser and dispatches the L{STREAM_CONNECTED_EVENT}
event indicating the connection has been established.
"""
self._initializeStream()
self.dispatch(self, STREAM_CONNECTED_EVENT)
def dataReceived(self, data):
""" Called whenever data is received.
Passes the data to the XML parser. This can result in calls to the
DOM handlers. If a parse error occurs, the L{STREAM_ERROR_EVENT} event
is called to allow for cleanup actions, followed by dropping the
connection.
"""
try:
if self.rawDataInFn:
self.rawDataInFn(data)
self.stream.parse(data)
except domish.ParserError:
self.dispatch(failure.Failure(), STREAM_ERROR_EVENT)
self.transport.loseConnection()
def connectionLost(self, reason):
""" Called when the connection is shut down.
Dispatches the L{STREAM_END_EVENT}.
"""
self.dispatch(reason, STREAM_END_EVENT)
self.stream = None
### --------------------------------------------------------------
###
### DOM events
###
### --------------------------------------------------------------
def onDocumentStart(self, rootElement):
""" Called whenever the start tag of a root element has been received.
Dispatches the L{STREAM_START_EVENT}.
"""
self.dispatch(self, STREAM_START_EVENT)
def onElement(self, element):
""" Called whenever a direct child element of the root element has
been received.
Dispatches the received element.
"""
self.dispatch(element)
def onDocumentEnd(self):
""" Called whenever the end tag of the root element has been received.
Closes the connection. This causes C{connectionLost} being called.
"""
self.transport.loseConnection()
def setDispatchFn(self, fn):
""" Set another function to handle elements. """
self.stream.ElementEvent = fn
def resetDispatchFn(self):
""" Set the default function (C{onElement}) to handle elements. """
self.stream.ElementEvent = self.onElement
def send(self, obj):
""" Send data over the stream.
Sends the given C{obj} over the connection. C{obj} may be instances of
L{domish.Element}, C{unicode} and C{str}. The first two will be
properly serialized and/or encoded. C{str} objects must be in UTF-8
encoding.
Note: because it is easy to make mistakes in maintaining a properly
encoded C{str} object, it is advised to use C{unicode} objects
everywhere when dealing with XML Streams.
@param obj: Object to be sent over the stream.
@type obj: L{domish.Element}, L{domish} or C{str}
"""
if domish.IElement.providedBy(obj):
obj = obj.toXml()
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
if self.rawDataOutFn:
self.rawDataOutFn(obj)
self.transport.write(obj)
class BootstrapMixin(object):
"""
XmlStream factory mixin to install bootstrap event observers.
This mixin is for factories providing
L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>} to make
sure bootstrap event observers are set up on protocols, before incoming
data is processed. Such protocols typically derive from
L{utility.EventDispatcher}, like L{XmlStream}.
You can set up bootstrap event observers using C{addBootstrap}. The
C{event} and C{fn} parameters correspond with the C{event} and
C{observerfn} arguments to L{utility.EventDispatcher.addObserver}.
@since: 8.2.
@ivar bootstraps: The list of registered bootstrap event observers.
@type bootstrap: C{list}
"""
def __init__(self):
self.bootstraps = []
def installBootstraps(self, dispatcher):
"""
Install registered bootstrap observers.
@param dispatcher: Event dispatcher to add the observers to.
@type dispatcher: L{utility.EventDispatcher}
"""
for event, fn in self.bootstraps:
dispatcher.addObserver(event, fn)
def addBootstrap(self, event, fn):
"""
Add a bootstrap event handler.
@param event: The event to register an observer for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The observer callable to be registered.
"""
self.bootstraps.append((event, fn))
def removeBootstrap(self, event, fn):
"""
Remove a bootstrap event handler.
@param event: The event the observer is registered for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The registered observer callable.
"""
self.bootstraps.remove((event, fn))
class XmlStreamFactoryMixin(BootstrapMixin):
"""
XmlStream factory mixin that takes care of event handlers.
All positional and keyword arguments passed to create this factory are
passed on as-is to the protocol.
@ivar args: Positional arguments passed to the protocol upon instantiation.
@type args: C{tuple}.
@ivar kwargs: Keyword arguments passed to the protocol upon instantiation.
@type kwargs: C{dict}.
"""
def __init__(self, *args, **kwargs):
BootstrapMixin.__init__(self)
self.args = args
self.kwargs = kwargs
def buildProtocol(self, addr):
"""
Create an instance of XmlStream.
The returned instance will have bootstrap event observers registered
and will proceed to handle input on an incoming connection.
"""
xs = self.protocol(*self.args, **self.kwargs)
xs.factory = self
self.installBootstraps(xs)
return xs
class XmlStreamFactory(XmlStreamFactoryMixin,
protocol.ReconnectingClientFactory):
"""
Factory for XmlStream protocol objects as a reconnection client.
"""
protocol = XmlStream
def buildProtocol(self, addr):
"""
Create a protocol instance.
Overrides L{XmlStreamFactoryMixin.buildProtocol} to work with
a L{ReconnectingClientFactory}. As this is called upon having an
connection established, we are resetting the delay for reconnection
attempts when the connection is lost again.
"""
self.resetDelay()
return XmlStreamFactoryMixin.buildProtocol(self, addr)
| gpl-2.0 |
drpaneas/linuxed.gr | lib/python2.7/site-packages/docutils/writers/odf_odt/pygmentsformatter.py | 244 | 4671 | # $Id: pygmentsformatter.py 5853 2009-01-19 21:02:02Z dkuhlman $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
| mit |
kyrsjo/AcdOpti | src/acdOptiGui/infoFrames/AnalysisExportedResults.py | 1 | 7845 | # -*- coding: utf8 -*-
#
# Copyright 2011 Kyrre Ness Sjøbæk
# This file is part of AcdOpti.
#
# AcdOpti is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AcdOpti is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AcdOpti. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk
from InfoFrameComponent import InfoFrameComponent
from SolverSetup import SolverSetup
from acdOpti.AcdOptiExceptions import AcdOptiException_analysis_runAnalysis
from acdOpti.analysis.AnalysisInterface import AnalysisInterface
class AnalysisExportedResults(InfoFrameComponent):
"""
Shows a tree with the exported results from an analysis,
and has a button for runAnalysis/clearLockdown
"""
analysis = None
__settingsScroll = None
__settingsView = None
__settingsModel = None
__settingsCols = None
__settingsRenders = None
__localSolverButton = None
__scrollWindow = None
__treeView = None
__treeModel = None
__treeCols = None
__treeRenders = None
__lockdownRunButton = None
def __init__(self, frameManager, analysis):
InfoFrameComponent.__init__(self, frameManager)
self.analysis = analysis
assert isinstance(self.analysis, AnalysisInterface)
self.baseWidget = gtk.VBox()
if self.analysis.settings != None:
self.__settingsModel = gtk.ListStore(str,str) #Key, value
self.__settingsView = gtk.TreeView(self.__settingsModel)
self.__settingsCols = []
self.__settingsRenders = []
self.__settingsRenders.append(gtk.CellRendererText())
self.__settingsCols.append(gtk.TreeViewColumn("Key", self.__settingsRenders[-1], text=0))
self.__settingsView.append_column(self.__settingsCols[-1])
self.__settingsRenders.append(gtk.CellRendererText())
self.__settingsRenders[-1].set_property("editable", True)
self.__settingsRenders[-1].connect('edited', self.event_cellRenderer_settingsValue_edited, None)
self.__settingsCols.append(gtk.TreeViewColumn("Value", self.__settingsRenders[-1], text=1))
self.__settingsView.append_column(self.__settingsCols[-1])
self.__settingsScroll = gtk.ScrolledWindow()
self.__settingsScroll.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.__settingsScroll.set_size_request(-1,125)
self.__settingsScroll.add(self.__settingsView)
self.baseWidget.pack_start(self.__settingsScroll, expand=False, padding=5)
if self.analysis.localSolver != None:
self.__localSolverButton = gtk.Button("_Setup local solver...")
self.__localSolverButton.connect("clicked", self.event_button_localSolver, None)
self.baseWidget.pack_start(self.__localSolverButton, expand=False, padding=5)
self.__treeModel = gtk.TreeStore(str, str)
self.__treeView = gtk.TreeView(self.__treeModel)
self.__treeCols = []
self.__treeRenders = []
self.__treeRenders.append(gtk.CellRendererText())
self.__treeCols.append(gtk.TreeViewColumn("Key", self.__treeRenders[-1], text=0))
self.__treeRenders[-1].set_property("editable", True)
self.__treeView.append_column(self.__treeCols[-1])
self.__treeRenders.append(gtk.CellRendererText())
self.__treeCols.append(gtk.TreeViewColumn("Value", self.__treeRenders[-1], text=1))
self.__treeRenders[-1].set_property("editable", True)
self.__treeView.append_column(self.__treeCols[-1])
self.__scrollWindow = gtk.ScrolledWindow()
self.__scrollWindow.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.__scrollWindow.add(self.__treeView)
self.baseWidget.pack_start(self.__scrollWindow, expand=True, padding=5)
self.baseWidget.pack_start(gtk.HSeparator(), expand=False, padding=10)
self.__lockdownRunButton = gtk.Button("_Run analysis") #Updated in __updateGui()
self.__lockdownRunButton.connect("clicked", self.event_button_lockdownRun, None)
self.baseWidget.pack_start(self.__lockdownRunButton, expand=False)
self.__updateGui()
self.baseWidget.show_all()
def __updateGui(self):
print "AnalysisExportedResults::__updateGui()"
#SettingsView
if self.analysis.settings != None:
self.__settingsModel.clear()
for (k,v) in self.analysis.settings:
self.__settingsModel.append([k,v])
if self.analysis.lockdown:
self.__settingsView.set_sensitive(False)
else:
self.__settingsView.set_sensitive(True)
#TreeView
self.__treeModel.clear()
self.__updateTable_recursive(self.analysis.exportResults, None, self.__treeModel)
self.__treeView.expand_all()
#Button
if self.analysis.lockdown:
self.__lockdownRunButton.set_label("_Clear lockdown")
else:
self.__lockdownRunButton.set_label("_Run analysis")
if self.analysis.runConfig.status == "finished":
self.__lockdownRunButton.set_sensitive(True)
else:
self.__lockdownRunButton.set_sensitive(False)
#Main window project explorer
self.frameManager.mainWindow.updateProjectExplorer()
def __updateTable_recursive(self,exportedDict,parentIter,model):
for (k,v) in exportedDict:
if type(v) == str:
model.append(parentIter, [k,v])
else:
#DataDict
iter = model.append(parentIter, [k,""])
self.__updateTable_recursive(v, iter, model)
def event_button_lockdownRun(self, widget, data=None):
print "AnalysisExportedResults::event_button_lockdownRun()"
if self.analysis.lockdown:
self.analysis.clearLockdown()
else:
try:
self.analysis.runAnalysis()
self.makePing()
except AcdOptiException_analysis_runAnalysis as e:
self.makePing()
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Couldn't run analysis, error message:\n'" + str(e.args) + "'")
mDia.run()
mDia.destroy()
self.__updateGui()
def event_button_localSolver(self, widget, data=None):
print "AnalysisExportedResults::event_button_localSolver()"
self.frameManager.push(SolverSetup(self.frameManager,self.analysis.localSolver))
def event_cellRenderer_settingsValue_edited(self, cell, path, new_text, user_data=None):
print "AnalysisExportedResults::event_cellRenderer_settingsValue_edited(), path='" + str(path) + "', new_text='" + new_text + "'"
idx = int(path)
self.analysis.settings.setValSingle(idx,new_text)
self.__settingsModel[idx][1] = new_text
| gpl-3.0 |
xxsergzzxx/python-for-android | python3-alpha/python3-src/Lib/multiprocessing/managers.py | 46 | 36645 | #
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import queue
from traceback import format_exc
from pickle import PicklingError
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler
from multiprocessing.util import Finalize, info
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
ForkingPickler.register(view_type, rebuild_as_list)
import copyreg
copyreg.pickle(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=5)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return type(self).from_address, \
(self._address, self._authkey, self._serializer)
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/urlpatterns_reverse/namespace_urls.py | 19 | 2458 | from django.conf.urls import include, url
from . import views
class URLObject(object):
def __init__(self, app_name, namespace):
self.app_name = app_name
self.namespace = namespace
def urls(self):
return ([
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
], self.app_name, self.namespace)
urls = property(urls)
testobj1 = URLObject('testapp', 'test-ns1')
testobj2 = URLObject('testapp', 'test-ns2')
default_testobj = URLObject('testapp', 'testapp')
otherobj1 = URLObject('nodefault', 'other-ns1')
otherobj2 = URLObject('nodefault', 'other-ns2')
urlpatterns = [
url(r'^normal/$', views.empty_view, name='normal-view'),
url(r'^normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='normal-view'),
url(r'^resolver_match/$', views.pass_resolver_match_view, name='test-resolver-match'),
url(r'^\+\\\$\*/$', views.empty_view, name='special-view'),
url(r'^mixed_args/([0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='mixed-args'),
url(r'^no_kwargs/([0-9]+)/([0-9]+)/$', views.empty_view, name='no-kwargs'),
url(r'^view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance, name='view-class'),
url(r'^unnamed/normal/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view),
url(r'^unnamed/view_class/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.view_class_instance),
url(r'^test1/', include(testobj1.urls)),
url(r'^test2/', include(testobj2.urls)),
url(r'^default/', include(default_testobj.urls)),
url(r'^other1/', include(otherobj1.urls)),
url(r'^other[246]/', include(otherobj2.urls)),
url(r'^ns-included[135]/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns1')),
url(r'^ns-included2/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-ns2')),
url(r'^included/', include('urlpatterns_reverse.included_namespace_urls')),
url(r'^inc(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls', namespace='inc-ns5')),
url(r'^ns-outer/(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_namespace_urls', namespace='inc-outer')),
url(r'^\+\\\$\*/', include('urlpatterns_reverse.namespace_urls', namespace='special')),
]
| mit |
HaliteChallenge/Halite-II | apiserver/apiserver/web/leaderboard.py | 1 | 6215 | """
Leaderboard API endpoints - get/sort/filter the leaderboard
"""
import operator
import flask
import sqlalchemy
from .. import config, model, util
from . import util as api_util
from .blueprint import web_api
_COUNT_KEY = 'count'
_LEADERBOARD_ALIAS = 'full_leaderboard'
def _count_leaderboard_query(where_clause):
"""
Considering the desired clause, return the number of distinct users in leaderboard
:param where_clause: Clause to filter by
:return: Number of distinct users
"""
full_leaderboard = model.ranked_bots_users.select().where(where_clause).reduce_columns().alias(_LEADERBOARD_ALIAS)
return sqlalchemy.sql.select([
sqlalchemy.sql.func.count(sqlalchemy.distinct(full_leaderboard.c.user_id))
]).select_from(full_leaderboard)
@web_api.route("/leaderboard")
@util.cross_origin(methods=["GET"])
def leaderboard():
result = []
offset, limit = api_util.get_offset_limit(default_limit=250,
max_limit=10000)
where_clause, order_clause, manual_sort = api_util.get_sort_filter({
"user_id": model.ranked_bots_users.c.user_id,
"username": model.ranked_bots_users.c.username,
"level": model.ranked_bots_users.c.player_level,
"organization_id": model.ranked_bots_users.c.organization_id,
"organization_name": model.ranked_bots_users.c.organization_name,
"country_code": model.ranked_bots_users.c.country_code,
"country_subdivision_code": model.ranked_bots_users.c.country_subdivision_code,
"version_number": model.ranked_bots_users.c.num_submissions,
"num_games": model.ranked_bots_users.c.num_games,
"rank": model.ranked_bots_users.c.rank,
"language": model.ranked_bots_users.c.language,
}, ["tier"])
if not order_clause:
order_clause = [model.ranked_bots_users.c.rank]
with model.engine.connect() as conn:
if _COUNT_KEY in flask.request.args:
return str(api_util.get_value(conn.execute(_count_leaderboard_query(where_clause))))
total_users = api_util.get_value(conn.execute(model.total_ranked_users))
tier_filter = None
tier_thresholds = util.tier_thresholds(total_users)
for (field, op, val) in manual_sort:
if field == "tier":
column = model.ranked_bots_users.c.rank
tier = val
if val in tier_thresholds:
val = tier_thresholds[val]
else:
raise util.APIError(
400,
message="Tier " + str(val) + " is not recognized.")
next_tier_dict = {
config.TIER_0_NAME: None,
config.TIER_1_NAME: tier_thresholds[config.TIER_0_NAME],
config.TIER_2_NAME: tier_thresholds[config.TIER_1_NAME],
config.TIER_3_NAME: tier_thresholds[config.TIER_2_NAME],
config.TIER_4_NAME: tier_thresholds[config.TIER_3_NAME],
}
# Negate the filter, since tier and rank are sorted
# opposite of each other
if op in (operator.gt, operator.lt, operator.ge, operator.le):
if op in (operator.gt, operator.le):
val = next_tier_dict.get(tier, val)
clause = {
operator.gt: operator.le,
operator.lt: operator.gt,
operator.ge: operator.le,
operator.le: operator.gt,
}[op](column, val)
elif op is operator.eq or op is operator.ne:
clause = column <= val
next_tier = next_tier_dict.get(tier)
if next_tier is not None:
clause &= column > next_tier
if op is operator.ne:
clause = ~clause
else:
raise util.APIError(
400,
message="Comparison operator not supported for tier field.")
if tier_filter is None:
tier_filter = clause
else:
tier_filter |= clause
if tier_filter is not None:
where_clause &= tier_filter
query = conn.execute(
model.ranked_bots_users.select()
.where(where_clause).order_by(*order_clause)
.offset(offset).limit(limit).reduce_columns())
for row in query.fetchall():
user = {
"user_id": row["user_id"],
"username": row["username"],
"level": row["player_level"],
"organization_id": row["organization_id"],
"organization": row["organization_name"],
"version_number": int(row["num_submissions"]),
"compile_status": row["compile_status"],
"num_games": int(row["num_games"]),
"score": float(row["score"]),
"language": row["language"],
"country": row["country_code"],
"rank": int(row["rank"]) if row["rank"] is not None else None,
"update_time": row["update_time"],
"mu": row["mu"],
"sigma": row["sigma"],
}
if total_users and row["rank"] is not None:
user["tier"] = util.tier(row["rank"], total_users)
else:
user["tier"] = None
result.append(user)
return flask.jsonify(result)
@web_api.route("/leagues")
@util.cross_origin(methods=["GET"])
def leagues():
result = []
with model.engine.connect() as conn:
query = conn.execute(
model.leagues.select())
for row in query.fetchall():
league = {
"id": row["id"],
"category": row["category"],
"name": row["name"],
"description": row["description"],
"query": row["query"],
}
result.append(league)
return flask.jsonify(result)
| mit |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/xml/etree/__init__.py | 1200 | 1604 | # $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
| apache-2.0 |
BeyondTheClouds/nova | nova/tests/functional/regressions/test_bug_1548980.py | 11 | 3560 | # Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_config import cfg
import nova.scheduler.utils
import nova.servicegroup
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
class TestServerGet(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerGet, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# The non-admin API client is fine to stay at 2.1 since it just creates
# and deletes the server.
self.api = api_fixture.api
self.admin_api = api_fixture.admin_api
# The admin API client needs to be at microversion 2.16 to exhibit the
# regression.
self.admin_api.microversion = '2.16'
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor', manager=CONF.conductor.manager)
self.flags(scheduler_driver='chance_scheduler')
self.start_service('scheduler')
self.network = self.start_service('network')
self.compute = self.start_service('compute')
self.consoleauth = self.start_service('consoleauth')
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_list_deleted_instances(self):
"""Regression test for bug #1548980.
Before fixing this bug, listing deleted instances returned a 404
because lazy-loading services from a deleted instance failed. Now
we should be able to list the deleted instance and the host_state
attribute should be "".
"""
server = dict(name='server1',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server = self.api.post_server({'server': server})
self.api.delete_server(server['id'])
# Wait 30 seconds for it to be gone.
for x in range(30):
try:
self.api.get_server(server['id'])
time.sleep(1)
except client.OpenStackApiNotFoundException:
break
else:
self.fail('Timed out waiting to delete server: %s' % server['id'])
servers = self.admin_api.get_servers(search_opts={'deleted': 1})
self.assertEqual(1, len(servers))
self.assertEqual(server['id'], servers[0]['id'])
# host_status is returned in the 2.16 microversion and since the server
# is deleted it should be the empty string
self.assertEqual(0, len(servers[0]['host_status']))
| apache-2.0 |
jjz/mailin | python/DNS/Class.py | 31 | 1469 | """
$Id: Class.py,v 1.6.2.1 2011/03/16 20:06:39 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License. See LICENSE for details.
CLASS values (section 3.2.4)
"""
IN = 1 # the Internet
CS = 2 # the CSNET class (Obsolete - used only for examples in
# some obsolete RFCs)
CH = 3 # the CHAOS class. When someone shows me python running on
# a Symbolics Lisp machine, I'll look at implementing this.
HS = 4 # Hesiod [Dyer 87]
# QCLASS values (section 3.2.5)
ANY = 255 # any class
# Construct reverse mapping dictionary
_names = dir()
classmap = {}
for _name in _names:
if _name[0] != '_': classmap[eval(_name)] = _name
def classstr(klass):
if classmap.has_key(klass): return classmap[klass]
else: return `klass`
#
# $Log: Class.py,v $
# Revision 1.6.2.1 2011/03/16 20:06:39 customdesigned
# Refer to explicit LICENSE file.
#
# Revision 1.6 2002/04/23 12:52:19 anthonybaxter
# cleanup whitespace.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| mit |
gfyoung/pandas | pandas/tests/series/methods/test_astype.py | 1 | 15104 | from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see GH#9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series(["1 days"])
tm.assert_series_equal(ser, expected)
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize(
"values",
[
Series(["x", "y", "z"], dtype="string"),
Series(["x", "y", "z"], dtype="category"),
Series(3 * [Timestamp("2020-01-01", tz="UTC")]),
Series(3 * [Interval(0, 1)]),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
def test_astype_ignores_errors_for_extension_dtypes(self, values, errors):
# https://github.com/pandas-dev/pandas/issues/35471
if errors == "ignore":
expected = values
result = values.astype(float, errors="ignore")
tm.assert_series_equal(result, expected)
else:
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
values.astype(float, errors=errors)
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_to_str(self, dtype):
# https://github.com/pandas-dev/pandas/issues/36451
s = Series([0.1], dtype=dtype)
result = s.astype(str)
expected = Series(["0.1"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"value, string_value",
[
(None, "None"),
(np.nan, "nan"),
(NA, "<NA>"),
],
)
def test_astype_to_str_preserves_na(self, value, string_value):
# https://github.com/pandas-dev/pandas/issues/36904
s = Series(["a", "b", value], dtype=object)
result = s.astype(str)
expected = Series(["a", "b", string_value], dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_unicode(self):
# see GH#7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode()]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
class TestAstypeCategorical:
def test_astype_categorical_invalid_conversions(self):
# invalid conversion (these are NOT a dtype)
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.randint(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
with pytest.raises(TypeError, match=msg):
ser.astype(Categorical)
with pytest.raises(TypeError, match=msg):
ser.astype("object").astype(Categorical)
def test_astype_categoricaldtype(self):
s = Series(["a", "b", "a"])
result = s.astype(CategoricalDtype(["a", "b"], ordered=True))
expected = Series(Categorical(["a", "b", "a"], ordered=True))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b"], ordered=False))
expected = Series(Categorical(["a", "b", "a"], ordered=False))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
expected = Series(
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
)
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH#10696, GH#18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
s = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = s.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = Series(s_data, name=name, dtype=exp_dtype)
tm.assert_series_equal(result, expected)
# different categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = s.astype(dtype)
expected = Series(s_data, name=name, dtype=dtype)
tm.assert_series_equal(result, expected)
if dtype_ordered is False:
# not specifying ordered, so only test once
expected = s
result = s.astype("category")
tm.assert_series_equal(result, expected)
def test_astype_bool_missing_to_categorical(self):
# GH-19182
s = Series([True, False, np.nan])
assert s.dtypes == np.object_
result = s.astype(CategoricalDtype(categories=[True, False]))
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
tm.assert_series_equal(result, expected)
def test_astype_categories_raises(self):
# deprecated GH#17636, removed in GH#27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
| bsd-3-clause |
swigger/gdb-ios | gdb/testsuite/gdb.perf/gmonster-null-lookup.py | 13 | 1871 | # Copyright (C) 2015-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test handling of lookup of a symbol that doesn't exist.
# Efficient handling of this case is important, and not just for typos.
# Sometimes the debug info for the needed object isn't present.
from perftest import perftest
from perftest import measure
from perftest import utils
class NullLookup(perftest.TestCaseWithBasicMeasurements):
def __init__(self, name, run_names, binfile):
# We want to measure time in this test.
super(NullLookup, self).__init__(name)
self.run_names = run_names
self.binfile = binfile
def warm_up(self):
pass
def execute_test(self):
for run in self.run_names:
this_run_binfile = "%s-%s" % (self.binfile,
utils.convert_spaces(run))
utils.select_file(this_run_binfile)
utils.runto_main()
utils.safe_execute("mt expand-symtabs")
iteration = 5
while iteration > 0:
utils.safe_execute("mt flush-symbol-cache")
func = lambda: utils.safe_execute("p symbol_not_found")
self.measure.measure(func, run)
iteration -= 1
| gpl-2.0 |
s4n7h0/Gooey | gooey/_tmp/example_argparse_souce_in_main.py | 9 | 4235 | #!/usr/local/bin/python2.7
# encoding: utf-8
'''
bin.example_argparse_souce -- shortdesc
bin.example_argparse_souce is a description
It defines classes_and_methods
@author: user_name
@copyright: 2013 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from gooey import Gooey
__all__ = []
__version__ = 0.1
__date__ = '2013-12-13'
__updated__ = '2013-12-13'
DEBUG = 0
TESTRUN = 0
PROFILE = 0
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
@property
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by user_name on %s.
Copyright 2013 organization_name. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
# Setup argument parser
parser = ArgumentParser(description='Example Argparse Program', formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("filename", help="filename")
parser.add_argument("-r", "--recursive", dest="recurse", action="store_true",
help="recurse into subfolders [default: %(default)s]")
parser.add_argument("-v", "--verbose", dest="verbose", action="count",
help="set verbosity level [default: %(default)s]")
parser.add_argument("-i", "--include", action="append",
help="only include paths matching this regex pattern. Note: exclude is given preference over include. [default: %(default)s]",
metavar="RE")
parser.add_argument("-m", "--mycoolargument", help="mycoolargument")
parser.add_argument("-e", "--exclude", dest="exclude",
help="exclude paths matching this regex pattern. [default: %(default)s]", metavar="RE")
parser.add_argument('-V', '--version', action='version')
parser.add_argument('-T', '--tester', choices=['yes', 'no'])
parser.add_argument(dest="paths", help="paths to folder(s) with source file(s) [default: %(default)s]",
metavar="path", nargs='+')
# for i in parser._actions:
# print i
# Process arguments
args = parser.parse_args()
paths = args.paths
verbose = args.verbose
recurse = args.recurse
inpat = args.include
expat = args.exclude
if verbose > 0:
print("Verbose mode on")
if recurse:
print("Recursive mode on")
else:
print("Recursive mode off")
if inpat and expat and inpat == expat:
raise CLIError("include and exclude pattern are equal! Nothing will be processed.")
for inpath in paths:
### do something with inpath ###
print(inpath)
return 0
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-h")
# sys.argv.append("-v")
sys.argv.append("-r")
main()
sys.exit()
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'bin.example_argparse_souce_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main()) | mit |
isandlaTech/cohorte-devtools | qualifier/deploy/cohorte-home/repo/herald/transports/peer_contact.py | 2 | 5226 | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 1.0.1
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Bundle version
import herald.version
__version__=herald.version.__version__
# ------------------------------------------------------------------------------
import logging
# ------------------------------------------------------------------------------
# Prefix to all discovery messages
SUBJECT_DISCOVERY_PREFIX = "herald/directory/discovery"
# First message: Initial contact message, containing our dump
SUBJECT_DISCOVERY_STEP_1 = SUBJECT_DISCOVERY_PREFIX + "/step1"
# Second message: let the remote peer send its dump
SUBJECT_DISCOVERY_STEP_2 = SUBJECT_DISCOVERY_PREFIX + "/step2"
# Third message: the remote peer acknowledge, notify our listeners
SUBJECT_DISCOVERY_STEP_3 = SUBJECT_DISCOVERY_PREFIX + "/step3"
# ------------------------------------------------------------------------------
class PeerContact(object):
"""
Standard peer discovery algorithm
"""
def __init__(self, directory, dump_hook, logname=None):
"""
Sets up members
:param directory: THe Herald Core Directory
:param dump_hook: A method that takes a parsed dump dictionary as
parameter and returns a patched one
:param logname: Name of the class logger
"""
self._directory = directory
self._hook = dump_hook
self._logger = logging.getLogger(logname or __name__)
self.__delayed_notifs = {}
def __load_dump(self, message):
"""
Calls the hook method to modify the loaded peer description before
giving it to the directory
:param message: The received Herald message
:return: The updated peer description
"""
dump = message.content
if self._hook is not None:
# Call the hook
try:
updated_dump = self._hook(message, dump)
if updated_dump is not None:
# Use the new description
dump = updated_dump
except (TypeError, ValueError) as ex:
self._logger("Invalid description hook: %s", ex)
return dump
def clear(self):
"""
Clears the pending notification objects
"""
self.__delayed_notifs.clear()
def herald_message(self, herald_svc, message):
"""
Handles a message received by Herald
:param herald_svc: Herald service
:param message: Received message
"""
subject = message.subject
if subject == SUBJECT_DISCOVERY_STEP_1:
# Step 1: Register the remote peer and reply with our dump
try:
# Delayed registration
notification = self._directory.register_delayed(
self.__load_dump(message))
peer = notification.peer
if peer is not None:
# Registration succeeded
self.__delayed_notifs[peer.uid] = notification
# Reply with our dump
herald_svc.reply(
message, self._directory.get_local_peer().dump(),
SUBJECT_DISCOVERY_STEP_2)
except ValueError:
self._logger.error("Error registering a discovered peer")
elif subject == SUBJECT_DISCOVERY_STEP_2:
# Step 2: Register the dump, notify local listeners, then let
# the remote peer notify its listeners
try:
# Register the peer
notification = self._directory.register_delayed(
self.__load_dump(message))
if notification.peer is not None:
# Let the remote peer notify its listeners
herald_svc.reply(message, None, SUBJECT_DISCOVERY_STEP_3)
# Now we can notify listeners
notification.notify()
except ValueError:
self._logger.error("Error registering a peer using the "
"description it sent")
elif subject == SUBJECT_DISCOVERY_STEP_3:
# Step 3: notify local listeners about the remote peer
try:
self.__delayed_notifs.pop(message.sender).notify()
except KeyError:
# Unknown peer
pass
else:
# Unknown subject
self._logger.warning("Unknown discovery step: %s", subject)
| apache-2.0 |
n0m4dz/odoo | addons/account_analytic_plans/wizard/account_crossovered_analytic.py | 341 | 2972 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_crossovered_analytic(osv.osv_memory):
_name = "account.crossovered.analytic"
_description = "Print Crossovered Analytic"
_columns = {
'date1': fields.date('Start Date', required=True),
'date2': fields.date('End Date', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'crossovered_journal_rel', 'crossover_id', 'journal_id', 'Analytic Journal'),
'ref': fields.many2one('account.analytic.account', 'Analytic Account Reference', required=True),
'empty_line': fields.boolean('Dont show empty lines'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
cr.execute('SELECT account_id FROM account_analytic_line')
res = cr.fetchall()
acc_ids = [x[0] for x in res]
data = self.read(cr, uid, ids, context=context)[0]
data['ref'] = data['ref'][0]
obj_acc = self.pool.get('account.analytic.account').browse(cr, uid, data['ref'], context=context)
name = obj_acc.name
account_ids = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', [data['ref']])], context=context)
flag = True
for acc in account_ids:
if acc in acc_ids:
flag = False
break
if flag:
raise osv.except_osv(_('User Error!'),_('There are no analytic lines related to account %s.' % name))
datas = {
'ids': [],
'model': 'account.analytic.account',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_analytic_plans.report_crossoveredanalyticplans', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
madan96/sympy | sympy/series/tests/test_fourier.py | 21 | 3982 | from sympy import (symbols, pi, Piecewise, sin, cos, sinc, Rational,
oo, fourier_series, Add)
from sympy.series.fourier import FourierSeries
from sympy.utilities.pytest import raises
x, y, z = symbols('x y z')
fo = fourier_series(x, (x, -pi, pi))
fe = fourier_series(x**2, (-pi, pi))
fp = fourier_series(Piecewise((0, x < 0), (pi, True)), (x, -pi, pi))
def test_FourierSeries():
assert fourier_series(1, (-pi, pi)) == 1
assert (Piecewise((0, x < 0), (pi, True)).
fourier_series((x, -pi, pi)).truncate()) == fp.truncate()
assert isinstance(fo, FourierSeries)
assert fo.function == x
assert fo.x == x
assert fo.period == (-pi, pi)
assert fo.term(3) == 2*sin(3*x) / 3
assert fe.term(3) == -4*cos(3*x) / 9
assert fp.term(3) == 2*sin(3*x) / 3
assert fo.as_leading_term(x) == 2*sin(x)
assert fe.as_leading_term(x) == pi**2 / 3
assert fp.as_leading_term(x) == pi / 2
assert fo.truncate() == 2*sin(x) - sin(2*x) + (2*sin(3*x) / 3)
assert fe.truncate() == -4*cos(x) + cos(2*x) + pi**2 / 3
assert fp.truncate() == 2*sin(x) + (2*sin(3*x) / 3) + pi / 2
fot = fo.truncate(n=None)
s = [0, 2*sin(x), -sin(2*x)]
for i, t in enumerate(fot):
if i == 3:
break
assert s[i] == t
def _check_iter(f, i):
for ind, t in enumerate(f):
assert t == f[ind]
if ind == i:
break
_check_iter(fo, 3)
_check_iter(fe, 3)
_check_iter(fp, 3)
assert fo.subs(x, x**2) == fo
raises(ValueError, lambda: fourier_series(x, (0, 1, 2)))
raises(ValueError, lambda: fourier_series(x, (x, 0, oo)))
raises(ValueError, lambda: fourier_series(x*y, (0, oo)))
def test_FourierSeries_2():
p = Piecewise((0, x < 0), (x, True))
f = fourier_series(p, (x, -2, 2))
assert f.term(3) == (2*sin(3*pi*x / 2) / (3*pi) -
4*cos(3*pi*x / 2) / (9*pi**2))
assert f.truncate() == (2*sin(pi*x / 2) / pi - sin(pi*x) / pi -
4*cos(pi*x / 2) / pi**2 + Rational(1, 2))
def test_fourier_series_square_wave():
"""Test if fourier_series approximates discontinuous function correctly."""
square_wave = Piecewise((1, x < pi), (-1, True))
s = fourier_series(square_wave, (x, 0, 2*pi))
assert s.truncate(3) == 4 / pi * sin(x) + 4 / (3 * pi) * sin(3 * x) + \
4 / (5 * pi) * sin(5 * x)
assert s.sigma_approximation(4) == 4 / pi * sin(x) * sinc(pi / 4) + \
4 / (3 * pi) * sin(3 * x) * sinc(3 * pi / 4)
def test_FourierSeries__operations():
fes = fe.scale(-1).shift(pi**2)
assert fes.truncate() == 4*cos(x) - cos(2*x) + 2*pi**2 / 3
assert fp.shift(-pi/2).truncate() == (2*sin(x) + (2*sin(3*x) / 3) +
(2*sin(5*x) / 5))
fos = fo.scale(3)
assert fos.truncate() == 6*sin(x) - 3*sin(2*x) + 2*sin(3*x)
fx = fe.scalex(2).shiftx(1)
assert fx.truncate() == -4*cos(2*x + 2) + cos(4*x + 4) + pi**2 / 3
fl = fe.scalex(3).shift(-pi).scalex(2).shiftx(1).scale(4)
assert fl.truncate() == (-16*cos(6*x + 6) + 4*cos(12*x + 12) -
4*pi + 4*pi**2 / 3)
raises(ValueError, lambda: fo.shift(x))
raises(ValueError, lambda: fo.shiftx(sin(x)))
raises(ValueError, lambda: fo.scale(x*y))
raises(ValueError, lambda: fo.scalex(x**2))
def test_FourierSeries__neg():
assert (-fo).truncate() == -2*sin(x) + sin(2*x) - (2*sin(3*x) / 3)
assert (-fe).truncate() == +4*cos(x) - cos(2*x) - pi**2 / 3
def test_FourierSeries__add__sub():
assert fo + fo == fo.scale(2)
assert fo - fo == 0
assert -fe - fe == fe.scale(-2)
assert (fo + fe).truncate() == 2*sin(x) - sin(2*x) - 4*cos(x) + cos(2*x) \
+ pi**2 / 3
assert (fo - fe).truncate() == 2*sin(x) - sin(2*x) + 4*cos(x) - cos(2*x) \
- pi**2 / 3
assert isinstance(fo + 1, Add)
raises(ValueError, lambda: fo + fourier_series(x, (x, 0, 2)))
| bsd-3-clause |
AnhellO/DAS_Sistemas | Ago-Dic-2019/DanielM/PracticaUno/3.10_EveryFunction.py | 1 | 1589 | # 3-10. Every Function: Think of something you could store in a list.
# For example, you could make a list of mountains, rivers, countries, cities, languages, or anything else you’d like.
# Write a program that creates a list containing these items and then uses each function introduced
# in this chapter at least once.
countries = ['Mexico', 'United States', 'Canada', 'Germany', 'Russia']
# --------------------------------------------------------------------------
print(countries[2])
# --------------------------------------------------------------------------
message = "I live in " + countries[0] + "."
print(message)
# --------------------------------------------------------------------------
print(countries)
del (countries[1])
countries.insert(1, 'Japan')
print(countries)
# --------------------------------------------------------------------------
countries.append('Chile')
print(countries)
# --------------------------------------------------------------------------
print(countries.pop())
print(countries)
# --------------------------------------------------------------------------
print(sorted(countries))
# --------------------------------------------------------------------------
print(sorted(countries, reverse=True))
# --------------------------------------------------------------------------
countries.reverse()
print(countries)
# --------------------------------------------------------------------------
countries.sort()
print(countries)
# --------------------------------------------------------------------------
countries.sort(reverse=True)
print(countries) | mit |
talhajaved/nyuadmarket | flask/lib/python2.7/site-packages/whoosh/idsets.py | 52 | 19132 | """
An implementation of an object that acts like a collection of on/off bits.
"""
import operator
from array import array
from bisect import bisect_left, bisect_right, insort
from whoosh.compat import integer_types, izip, izip_longest, next, xrange
from whoosh.util.numeric import bytes_for_bits
# Number of '1' bits in each byte (0-255)
_1SPERBYTE = array('B', [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2,
2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,
3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3,
3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5,
5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4,
3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5,
5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4,
4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7,
6, 7, 7, 8])
class DocIdSet(object):
"""Base class for a set of positive integers, implementing a subset of the
built-in ``set`` type's interface with extra docid-related methods.
This is a superclass for alternative set implementations to the built-in
``set`` which are more memory-efficient and specialized toward storing
sorted lists of positive integers, though they will inevitably be slower
than ``set`` for most operations since they're pure Python.
"""
def __eq__(self, other):
for a, b in izip(self, other):
if a != b:
return False
return True
def __neq__(self, other):
return not self.__eq__(other)
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __contains__(self, i):
raise NotImplementedError
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def copy(self):
raise NotImplementedError
def add(self, n):
raise NotImplementedError
def discard(self, n):
raise NotImplementedError
def update(self, other):
add = self.add
for i in other:
add(i)
def intersection_update(self, other):
for n in self:
if n not in other:
self.discard(n)
def difference_update(self, other):
for n in other:
self.discard(n)
def invert_update(self, size):
"""Updates the set in-place to contain numbers in the range
``[0 - size)`` except numbers that are in this set.
"""
for i in xrange(size):
if i in self:
self.discard(i)
else:
self.add(i)
def intersection(self, other):
c = self.copy()
c.intersection_update(other)
return c
def union(self, other):
c = self.copy()
c.update(other)
return c
def difference(self, other):
c = self.copy()
c.difference_update(other)
return c
def invert(self, size):
c = self.copy()
c.invert_update(size)
return c
def isdisjoint(self, other):
a = self
b = other
if len(other) < len(self):
a, b = other, self
for num in a:
if num in b:
return False
return True
def before(self, i):
"""Returns the previous integer in the set before ``i``, or None.
"""
raise NotImplementedError
def after(self, i):
"""Returns the next integer in the set after ``i``, or None.
"""
raise NotImplementedError
def first(self):
"""Returns the first (lowest) integer in the set.
"""
raise NotImplementedError
def last(self):
"""Returns the last (highest) integer in the set.
"""
raise NotImplementedError
class BaseBitSet(DocIdSet):
# Methods to override
def byte_count(self):
raise NotImplementedError
def _get_byte(self, i):
raise NotImplementedError
def _iter_bytes(self):
raise NotImplementedError
# Base implementations
def __len__(self):
return sum(_1SPERBYTE[b] for b in self._iter_bytes())
def __iter__(self):
base = 0
for byte in self._iter_bytes():
for i in xrange(8):
if byte & (1 << i):
yield base + i
base += 8
def __nonzero__(self):
return any(n for n in self._iter_bytes())
__bool__ = __nonzero__
def __contains__(self, i):
bucket = i // 8
if bucket >= self.byte_count():
return False
return bool(self._get_byte(bucket) & (1 << (i & 7)))
def first(self):
return self.after(-1)
def last(self):
return self.before(self.byte_count() * 8 + 1)
def before(self, i):
_get_byte = self._get_byte
size = self.byte_count() * 8
if i <= 0:
return None
elif i >= size:
i = size - 1
else:
i -= 1
bucket = i // 8
while i >= 0:
byte = _get_byte(bucket)
if not byte:
bucket -= 1
i = bucket * 8 + 7
continue
if byte & (1 << (i & 7)):
return i
if i % 8 == 0:
bucket -= 1
i -= 1
return None
def after(self, i):
_get_byte = self._get_byte
size = self.byte_count() * 8
if i >= size:
return None
elif i < 0:
i = 0
else:
i += 1
bucket = i // 8
while i < size:
byte = _get_byte(bucket)
if not byte:
bucket += 1
i = bucket * 8
continue
if byte & (1 << (i & 7)):
return i
i += 1
if i % 8 == 0:
bucket += 1
return None
class OnDiskBitSet(BaseBitSet):
"""A DocIdSet backed by an array of bits on disk.
>>> st = RamStorage()
>>> f = st.create_file("test.bin")
>>> bs = BitSet([1, 10, 15, 7, 2])
>>> bytecount = bs.to_disk(f)
>>> f.close()
>>> # ...
>>> f = st.open_file("test.bin")
>>> odbs = OnDiskBitSet(f, bytecount)
>>> list(odbs)
[1, 2, 7, 10, 15]
"""
def __init__(self, dbfile, basepos, bytecount):
"""
:param dbfile: a :class:`~whoosh.filedb.structfile.StructFile` object
to read from.
:param basepos: the base position of the bytes in the given file.
:param bytecount: the number of bytes to use for the bit array.
"""
self._dbfile = dbfile
self._basepos = basepos
self._bytecount = bytecount
def __repr__(self):
return "%s(%s, %d, %d)" % (self.__class__.__name__, self.dbfile,
self._basepos, self.bytecount)
def byte_count(self):
return self._bytecount
def _get_byte(self, n):
return self._dbfile.get_byte(self._basepos + n)
def _iter_bytes(self):
dbfile = self._dbfile
dbfile.seek(self._basepos)
for _ in xrange(self._bytecount):
yield dbfile.read_byte()
class BitSet(BaseBitSet):
"""A DocIdSet backed by an array of bits. This can also be useful as a bit
array (e.g. for a Bloom filter). It is much more memory efficient than a
large built-in set of integers, but wastes memory for sparse sets.
"""
def __init__(self, source=None, size=0):
"""
:param maxsize: the maximum size of the bit array.
:param source: an iterable of positive integers to add to this set.
:param bits: an array of unsigned bytes ("B") to use as the underlying
bit array. This is used by some of the object's methods.
"""
# If the source is a list, tuple, or set, we can guess the size
if not size and isinstance(source, (list, tuple, set, frozenset)):
size = max(source)
bytecount = bytes_for_bits(size)
self.bits = array("B", (0 for _ in xrange(bytecount)))
if source:
add = self.add
for num in source:
add(num)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, list(self))
def byte_count(self):
return len(self.bits)
def _get_byte(self, n):
return self.bits[n]
def _iter_bytes(self):
return iter(self.bits)
def _trim(self):
bits = self.bits
last = len(self.bits) - 1
while last >= 0 and not bits[last]:
last -= 1
del self.bits[last + 1:]
def _resize(self, tosize):
curlength = len(self.bits)
newlength = bytes_for_bits(tosize)
if newlength > curlength:
self.bits.extend((0,) * (newlength - curlength))
elif newlength < curlength:
del self.bits[newlength + 1:]
def _zero_extra_bits(self, size):
bits = self.bits
spill = size - ((len(bits) - 1) * 8)
if spill:
mask = 2 ** spill - 1
bits[-1] = bits[-1] & mask
def _logic(self, obj, op, other):
objbits = obj.bits
for i, (byte1, byte2) in enumerate(izip_longest(objbits, other.bits,
fillvalue=0)):
value = op(byte1, byte2) & 0xFF
if i >= len(objbits):
objbits.append(value)
else:
objbits[i] = value
obj._trim()
return obj
def to_disk(self, dbfile):
dbfile.write_array(self.bits)
return len(self.bits)
@classmethod
def from_bytes(cls, bs):
b = cls()
b.bits = array("B", bs)
return b
@classmethod
def from_disk(cls, dbfile, bytecount):
return cls.from_bytes(dbfile.read_array("B", bytecount))
def copy(self):
b = self.__class__()
b.bits = array("B", iter(self.bits))
return b
def clear(self):
for i in xrange(len(self.bits)):
self.bits[i] = 0
def add(self, i):
bucket = i >> 3
if bucket >= len(self.bits):
self._resize(i + 1)
self.bits[bucket] |= 1 << (i & 7)
def discard(self, i):
bucket = i >> 3
self.bits[bucket] &= ~(1 << (i & 7))
def _resize_to_other(self, other):
if isinstance(other, (list, tuple, set, frozenset)):
maxbit = max(other)
if maxbit // 8 > len(self.bits):
self._resize(maxbit)
def update(self, iterable):
self._resize_to_other(iterable)
DocIdSet.update(self, iterable)
def intersection_update(self, other):
if isinstance(other, BitSet):
return self._logic(self, operator.__and__, other)
discard = self.discard
for n in self:
if n not in other:
discard(n)
def difference_update(self, other):
if isinstance(other, BitSet):
return self._logic(self, lambda x, y: x & ~y, other)
discard = self.discard
for n in other:
discard(n)
def invert_update(self, size):
bits = self.bits
for i in xrange(len(bits)):
bits[i] = ~bits[i] & 0xFF
self._zero_extra_bits(size)
def union(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), operator.__or__, other)
b = self.copy()
b.update(other)
return b
def intersection(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), operator.__and__, other)
return BitSet(source=(n for n in self if n in other))
def difference(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), lambda x, y: x & ~y, other)
return BitSet(source=(n for n in self if n not in other))
class SortedIntSet(DocIdSet):
"""A DocIdSet backed by a sorted array of integers.
"""
def __init__(self, source=None, typecode="I"):
if source:
self.data = array(typecode, sorted(source))
else:
self.data = array(typecode)
self.typecode = typecode
def copy(self):
sis = SortedIntSet()
sis.data = array(self.typecode, self.data)
return sis
def size(self):
return len(self.data) * self.data.itemsize
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.data)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __nonzero__(self):
return bool(self.data)
__bool__ = __nonzero__
def __contains__(self, i):
data = self.data
if not data or i < data[0] or i > data[-1]:
return False
pos = bisect_left(data, i)
if pos == len(data):
return False
return data[pos] == i
def add(self, i):
data = self.data
if not data or i > data[-1]:
data.append(i)
else:
mn = data[0]
mx = data[-1]
if i == mn or i == mx:
return
elif i > mx:
data.append(i)
elif i < mn:
data.insert(0, i)
else:
pos = bisect_left(data, i)
if data[pos] != i:
data.insert(pos, i)
def discard(self, i):
data = self.data
pos = bisect_left(data, i)
if data[pos] == i:
data.pop(pos)
def clear(self):
self.data = array(self.typecode)
def intersection_update(self, other):
self.data = array(self.typecode, (num for num in self if num in other))
def difference_update(self, other):
self.data = array(self.typecode,
(num for num in self if num not in other))
def intersection(self, other):
return SortedIntSet((num for num in self if num in other))
def difference(self, other):
return SortedIntSet((num for num in self if num not in other))
def first(self):
return self.data[0]
def last(self):
return self.data[-1]
def before(self, i):
data = self.data
pos = bisect_left(data, i)
if pos < 1:
return None
else:
return data[pos - 1]
def after(self, i):
data = self.data
if not data or i >= data[-1]:
return None
elif i < data[0]:
return data[0]
pos = bisect_right(data, i)
return data[pos]
class ReverseIdSet(DocIdSet):
"""
Wraps a DocIdSet object and reverses its semantics, so docs in the wrapped
set are not in this set, and vice-versa.
"""
def __init__(self, idset, limit):
"""
:param idset: the DocIdSet object to wrap.
:param limit: the highest possible ID plus one.
"""
self.idset = idset
self.limit = limit
def __len__(self):
return self.limit - len(self.idset)
def __contains__(self, i):
return i not in self.idset
def __iter__(self):
ids = iter(self.idset)
try:
nx = next(ids)
except StopIteration:
nx = -1
for i in xrange(self.limit):
if i == nx:
try:
nx = next(ids)
except StopIteration:
nx = -1
else:
yield i
def add(self, n):
self.idset.discard(n)
def discard(self, n):
self.idset.add(n)
def first(self):
for i in self:
return i
def last(self):
idset = self.idset
maxid = self.limit - 1
if idset.last() < maxid - 1:
return maxid
for i in xrange(maxid, -1, -1):
if i not in idset:
return i
ROARING_CUTOFF = 1 << 12
class RoaringIdSet(DocIdSet):
"""
Separates IDs into ranges of 2^16 bits, and stores each range in the most
efficient type of doc set, either a BitSet (if the range has >= 2^12 IDs)
or a sorted ID set of 16-bit shorts.
"""
cutoff = 2**12
def __init__(self, source=None):
self.idsets = []
if source:
self.update(source)
def __len__(self):
if not self.idsets:
return 0
return sum(len(idset) for idset in self.idsets)
def __contains__(self, n):
bucket = n >> 16
if bucket >= len(self.idsets):
return False
return (n - (bucket << 16)) in self.idsets[bucket]
def __iter__(self):
for i, idset in self.idsets:
floor = i << 16
for n in idset:
yield floor + n
def _find(self, n):
bucket = n >> 16
floor = n << 16
if bucket >= len(self.idsets):
self.idsets.extend([SortedIntSet() for _
in xrange(len(self.idsets), bucket + 1)])
idset = self.idsets[bucket]
return bucket, floor, idset
def add(self, n):
bucket, floor, idset = self._find(n)
oldlen = len(idset)
idset.add(n - floor)
if oldlen <= ROARING_CUTOFF < len(idset):
self.idsets[bucket] = BitSet(idset)
def discard(self, n):
bucket, floor, idset = self._find(n)
oldlen = len(idset)
idset.discard(n - floor)
if oldlen > ROARING_CUTOFF >= len(idset):
self.idsets[bucket] = SortedIntSet(idset)
class MultiIdSet(DocIdSet):
"""Wraps multiple SERIAL sub-DocIdSet objects and presents them as an
aggregated, read-only set.
"""
def __init__(self, idsets, offsets):
"""
:param idsets: a list of DocIdSet objects.
:param offsets: a list of offsets corresponding to the DocIdSet objects
in ``idsets``.
"""
assert len(idsets) == len(offsets)
self.idsets = idsets
self.offsets = offsets
def _document_set(self, n):
offsets = self.offsets
return max(bisect_left(offsets, n), len(self.offsets) - 1)
def _set_and_docnum(self, n):
setnum = self._document_set(n)
offset = self.offsets[setnum]
return self.idsets[setnum], n - offset
def __len__(self):
return sum(len(idset) for idset in self.idsets)
def __iter__(self):
for idset, offset in izip(self.idsets, self.offsets):
for docnum in idset:
yield docnum + offset
def __contains__(self, item):
idset, n = self._set_and_docnum(item)
return n in idset
| mit |
jpotter/angel | lib/angel/util/terminal.py | 2 | 1563 | import os
def terminal_stdout_supports_color():
return os.isatty(1)
def terminal_stderr_supports_color():
return os.isatty(2)
def terminal_get_size():
''' Return a tuple of (width, height, is_true_size); where is_true_size is false when the size is only a guess. '''
# Based on http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
env = os.environ
is_true_size = True
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
if 'LINES' not in env or 'COLUMNS' not in env:
is_true_size = False
return int(cr[1]), int(cr[0]), is_true_size
def terminal_width():
"""Return the width of the terminal, or best guess when we can't detect it."""
return terminal_get_size()[0]
def terminal_height():
"""Return the height of the terminal, or the best guess when we can't detect it."""
return terminal_get_size()[1]
def terminal_width_is_true_size():
"""Return true if our width/height functions are returning best-guesses, instead of detecting it correctly."""
return terminal_get_size()[2] | apache-2.0 |
jakemadison/The_Insolvency_Solver | app/__init__.py | 1 | 1913 | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import os
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from config import basedir
import logging
def setup_logger(logger_instance):
if logger.handlers: # prevents the loading of duplicate handlers/log output
return
formatter = logging.Formatter('(%(asctime)s: %(name)s: %(levelname)s): %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger_instance.addHandler(ch)
ch = logging.FileHandler(basedir+'/insolvency_logger.log')
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger = logging.getLogger(__name__)
setup_logger(logger)
logger.setLevel(logging.DEBUG)
logger.info('completed logger config. beginning to load application.')
app = Flask(__name__)
app.config.from_object('config')
try:
from local_settings import OAUTH_CREDS
app.config['OAUTH_CREDENTIALS'] = OAUTH_CREDS
except ImportError, e:
pass
# Attempting to set up engine here:
db = SQLAlchemy(app)
engine = db.engine
db_session = db.session
lm = LoginManager()
lm.init_app(app)
oid = OpenID(app, os.path.join(basedir, 'tmp'))
from app import views, models
# register social blueprint:
# from social.apps.flask_app.routes import social_auth
# app.register_blueprint(social_auth)
# failing here;
try:
# from social.apps.flask_app.default.models import init_social
# from social.apps.flask_app.template_filters import backends
# app.context_processor(backends)
# init_social(app, db)
pass
# from app.models import User
# from social.apps.flask_app.default import models as social_models
# User.Base.metadata.create_all(engine) # create all tables using metadata
# social_models.PSABase.metadata.create_all(engine)
except KeyError, e:
logger.error('key error again: {0}'.format(e))
| agpl-3.0 |
glawler/watcher-visualization | src/clients/gpsFeeder/gpsFeeder.py | 2 | 2909 | #!/usr/bin/env python
# @file gpsFeeder.py
# @author Geoff Lawler <[email protected]>
# @date 2009-07-15
import gps, os, time, socket, subprocess
def connect():
while 1:
try:
session = gps.gps()
except socket.error:
print "Unable to connect to gpsd, trying again in just a sec."
time.sleep(1)
else:
return session
def main(serverName, dataAddress):
session=connect()
while 1:
while 1:
try:
session.query('admosy')
except socket.error:
session=connect()
else:
break
# a = altitude, d = date/time, m=mode,
# o=postion/fix, s=status, y=satellites
print
print ' GPS reading'
print '----------------------------------------'
print 'latitude ' , session.fix.latitude
print 'longitude ' , session.fix.longitude
print 'time utc ' , session.utc, session.fix.time
print 'altitude ' , session.fix.altitude
print 'eph ' , session.fix.eph
print 'epv ' , session.fix.epv
print 'ept ' , session.fix.ept
print 'speed ' , session.fix.speed
print 'climb ' , session.fix.climb
print
print ' Satellites (total of', len(session.satellites) , ' in view)'
for i in session.satellites:
print '\t', i
if session.fix.latitude and session.fix.longitude:
try:
# GTL -- need to check the return val here.
sendGPSArgs=['sendGPSMessage','-x', str(session.fix.longitude),'-y', str(session.fix.latitude),'-z', str(session.fix.altitude),'-s', str(serverName)];
if dataAddress != None:
sendGPSArgs.append('-n')
sendGPSArgs.append(dataAddress)
retCode=subprocess.call(sendGPSArgs);
except OSError:
print 'Caught exception when trying to run gpsMessageTest, is it in your $PATH?'
print 'If not, type \'export PATH=$PATH:/path/to/dir/with/gpsMessageTest/in/it\' in this shell'
sys.exit(1)
time.sleep(1)
if __name__ == '__main__':
import sys
from optparse import OptionParser
parser=OptionParser('Usage: %prog -s watcherdServerName [-a localNodeAddress]')
parser.add_option('-s', '--serverName', dest='serverName', help='machine name/ip address where watcherd is running')
parser.add_option('-a', '--address', dest='dataAddress', help='local host data interface address, where the gps data "comes from"')
(options, args)=parser.parse_args()
if options.serverName==None:
print 'You must give a servername on the command line: use the \'-s server\' option to do so.'
else:
main(options.serverName, options.dataAddress)
| agpl-3.0 |
acamposruiz/quecoins | pybb/migrations/0019_remove_attachment_fields.py | 2 | 12520 | # encoding: utf-8
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Attachment.hash'
db.delete_column('pybb_attachment', 'hash')
# Deleting field 'Attachment.content_type'
db.delete_column('pybb_attachment', 'content_type')
# Deleting field 'Attachment.path'
db.delete_column('pybb_attachment', 'path')
# Deleting field 'Attachment.name'
db.delete_column('pybb_attachment', 'name')
def backwards(self, orm):
# Adding field 'Attachment.hash'
db.add_column('pybb_attachment', 'hash', self.gf('django.db.models.fields.CharField')(blank=True, default='', max_length=40, db_index=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'Attachment.content_type'
raise RuntimeError("Cannot reverse this migration. 'Attachment.content_type' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Attachment.path'
raise RuntimeError("Cannot reverse this migration. 'Attachment.path' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Attachment.name'
raise RuntimeError("Cannot reverse this migration. 'Attachment.name' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pybb.attachment': {
'Meta': {'object_name': 'Attachment'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['pybb.Post']"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'pybb.category': {
'Meta': {'ordering': "['position']", 'object_name': 'Category'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'pybb.forum': {
'Meta': {'ordering': "['position']", 'object_name': 'Forum'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'forums'", 'to': "orm['pybb.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name), 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'readed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'readed_forums'", 'symmetrical': 'False', 'through': "orm['pybb.ForumReadTracker']", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'topic_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'pybb.forumreadtracker': {
'Meta': {'object_name': 'ForumReadTracker'},
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pybb.Forum']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.post': {
'Meta': {'ordering': "['created']", 'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'body_text': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['pybb.Topic']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'user_ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15', 'blank': 'True'})
},
'pybb.profile': {
'Meta': {'object_name': 'Profile'},
'autosubscribe': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '10', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'show_signatures': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'signature_html': ('django.db.models.fields.TextField', [], {'max_length': '1054', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '3.0'}),
'user': ('annoying.fields.AutoOneToOneField', [], {'related_name': "'pybb_profile'", 'unique': 'True', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.topic': {
'Meta': {'ordering': "['-created']", 'object_name': 'Topic'},
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': "orm['pybb.Forum']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'on_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'readed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'readed_topics'", 'symmetrical': 'False', 'through': "orm['pybb.TopicReadTracker']", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'pybb.topicreadtracker': {
'Meta': {'object_name': 'TopicReadTracker'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pybb.Topic']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
}
}
complete_apps = ['pybb']
| bsd-2-clause |
elkingtonmcb/nupic | src/regions/PictureSensorExplorers/horizontal.py | 17 | 1768 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines HorizontalPictureExplorer, an explorer for
PictureSensor.
"""
# Third-party imports
import numpy
# Local imports
from nupic.regions.PictureSensorExplorers.random import RandomPictureExplorer
class HorizontalPictureExplorer(RandomPictureExplorer):
"""
Specialization of 'random' explorer that allows horizontal
sweeps only.
"""
def initSequence(self, state, params):
# Invoke base class
super(HorizontalPictureExplorer, self).initSequence(state, params)
# Force vertical velocity to be zero
state['velocityY'] = 0
# Make sure we don't allow stationary (no velocity)
if state['velocityX'] == 0:
state['velocityX'] = self._rng.choice(numpy.array([-1, 1], dtype=int) \
* max(1, params['minVelocity']))
| agpl-3.0 |
clemensv/qpid-proton | proton-j/src/main/resources/curl.py | 13 | 1678 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
from org.apache.qpid.proton.messenger.impl import Address
def pn_url():
return Address()
def pn_url_parse(urlstr):
return Address(urlstr)
def pn_url_free(url): pass
def pn_url_clear(url):
url.clear();
def pn_url_str(url): return url.toString()
def pn_url_get_scheme(url): return url.getScheme()
def pn_url_get_username(url): return url.getUser()
def pn_url_get_password(url): return url.getPass()
def pn_url_get_host(url): return url.getHost() or None
def pn_url_get_port(url): return url.getPort()
def pn_url_get_path(url): return url.getName()
def pn_url_set_scheme(url, value): url.setScheme(value)
def pn_url_set_username(url, value): url.setUser(value)
def pn_url_set_password(url, value): url.setPass(value)
def pn_url_set_host(url, value): url.setHost(value)
def pn_url_set_port(url, value): url.setPort(value)
def pn_url_set_path(url, value): url.setName(value)
| apache-2.0 |
luxus/home-assistant | tests/components/test_recorder.py | 4 | 2116 | """The tests for the Recorder component."""
# pylint: disable=too-many-public-methods,protected-access
import unittest
import os
from homeassistant.const import MATCH_ALL
from homeassistant.components import recorder
from tests.common import get_test_home_assistant
class TestRecorder(unittest.TestCase):
"""Test the chromecast module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
recorder.setup(self.hass, {})
self.hass.start()
recorder._INSTANCE.block_till_done()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
recorder._INSTANCE.block_till_done()
os.remove(self.hass.config.path(recorder.DB_FILE))
def test_saving_state(self):
"""Test saving and restoring a state."""
entity_id = 'test.recorder'
state = 'restoring_from_db'
attributes = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.states.set(entity_id, state, attributes)
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
states = recorder.query_states('SELECT * FROM states')
self.assertEqual(1, len(states))
self.assertEqual(self.hass.states.get(entity_id), states[0])
def test_saving_event(self):
"""Test saving and restoring an event."""
event_type = 'EVENT_TEST'
event_data = {'test_attr': 5, 'test_attr_10': 'nice'}
events = []
def event_listener(event):
"""Record events from eventbus."""
if event.event_type == event_type:
events.append(event)
self.hass.bus.listen(MATCH_ALL, event_listener)
self.hass.bus.fire(event_type, event_data)
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
db_events = recorder.query_events(
'SELECT * FROM events WHERE event_type = ?', (event_type, ))
self.assertEqual(events, db_events)
| mit |
nirvn/QGIS | python/plugins/processing/algs/qgis/DeleteColumn.py | 1 | 3179 | # -*- coding: utf-8 -*-
"""
***************************************************************************
DeleteColumn.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterField,
QgsProcessing)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DeleteColumn(QgisFeatureBasedAlgorithm):
COLUMNS = 'COLUMN'
def tags(self):
return self.tr('drop,delete,remove,fields,columns,attributes').split(',')
def group(self):
return self.tr('Vector table')
def __init__(self):
super().__init__()
self.fields_to_delete = []
self.field_indices = []
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterField(self.COLUMNS,
self.tr('Fields to drop'),
None, 'INPUT', QgsProcessingParameterField.Any, True))
def inputLayerTypes(self):
return [QgsProcessing.TypeVector]
def name(self):
return 'deletecolumn'
def displayName(self):
return self.tr('Drop field(s)')
def outputName(self):
return self.tr('Fields dropped')
def prepareAlgorithm(self, parameters, context, feedback):
self.fields_to_delete = self.parameterAsFields(parameters, self.COLUMNS, context)
return True
def outputFields(self, input_fields):
# loop through twice - first we need to build up a list of original attribute indices
for f in self.fields_to_delete:
index = input_fields.lookupField(f)
self.field_indices.append(index)
# important - make sure we remove from the end so we aren't changing used indices as we go
self.field_indices.sort(reverse=True)
# this second time we make a cleaned version of the fields
for index in self.field_indices:
input_fields.remove(index)
return input_fields
def processFeature(self, feature, feedback):
attributes = feature.attributes()
for index in self.field_indices:
del attributes[index]
feature.setAttributes(attributes)
return feature
| gpl-2.0 |
nicoTrombon/DjangoPolls | env/Lib/site-packages/django/views/csrf.py | 437 | 5057 | from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Engine
from django.utils.translation import ugettext as _
from django.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function passes a <code>request</code> to the template's <a
href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a>
method.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
def csrf_failure(request, reason=""):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
t = Engine().from_string(CSRF_FAILURE_TEMPLATE)
c = Context({
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'docs_version': get_docs_version(),
'more': _("More information is available with DEBUG=True."),
})
return HttpResponseForbidden(t.render(c), content_type='text/html')
| bsd-3-clause |
kawashiro/dewyatochka2 | src/dewyatochka/core/network/xmpp/client/_base.py | 1 | 4043 | # -*- coding: UTF-8
""" Common client implementation
Private module, for internal use only
Classes
=======
Client -- Abstract XMPP client
Command -- Abstract command to extend client functionality
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from dewyatochka.core.network.entity import Message
from dewyatochka.core.network.xmpp.entity import JID
from dewyatochka.core.network.xmpp.exception import C2SConnectionError, ClientDisconnectedError
__all__ = ['Client', 'Command']
class Command(metaclass=ABCMeta):
""" Abstract command to extend client functionality
Commands are intended to be attached to a XMPP client
and invoked on provided attribute call on client
"""
def __init__(self, client):
""" Create command attached to a client instance
:param Client client:
"""
self._client = client
@abstractmethod
def __call__(self, *args, **kwargs): # pragma: no cover
""" Command is invokable
:param tuple args:
:param dict kwargs:
:returns: Depends on command implementation
"""
pass
@abstractproperty
def name(self) -> str: # pragma: no cover
""" Command unique name
Command is to be attached as client's attribute
named as the command is.
:return str:
"""
pass
class Client(metaclass=ABCMeta):
""" Abstract XMPP client """
def __init__(self, host: str, login: str, password: str, port=5222, location=''):
""" Initialize XMPP client instance
:param str host: XMPP server host
:param str login: User login
:param str password: User password
:param int port: XMPP server port, default 5222
:param str location: XMPP resource, default ''
:return Client:
"""
self._jid = JID(login, host, location)
self._server = host, port
self._password = password
self._commands = {}
def add_command(self, command: Command):
""" Attach a command to client
:param Command command: Command instance
:return None:
"""
self._commands[command.name] = command
def get_command(self, command: str) -> Command:
""" Get command by name
:param str command: Command name as .Command.name provides
:return Command:
"""
try:
return self._commands[command]
except KeyError:
raise RuntimeError('Command {} is not implemented in xmpp-client {}'.format(command, self.__class__))
@abstractmethod
def connect(self): # pragma: no cover
""" Establish connection to the server
:return None:
"""
pass
@abstractmethod
def disconnect(self, wait=True, notify=True): # pragma: no cover
""" Close connection
:param bool wait: Wait until all received messages are processed
:param bool notify: Notify reader about disconnection
:return None:
"""
pass
@abstractmethod
def read(self) -> Message: # pragma: no cover
""" Read next message from stream
:return Message:
"""
pass
@property
def jid(self) -> JID:
""" Authorized as JID
:return JID:
"""
return self._jid
def __getattr__(self, item) -> Command:
""" Get command as an attribute
:param str item:
:return Command:
"""
return self.get_command(item)
def __enter__(self):
""" Entering in with statement
:return Client:
"""
return self
def __exit__(self, *args) -> bool:
""" Close connection on exit if needed
:param tuple args:
:return bool:
"""
exc_instance = args[1]
if isinstance(exc_instance, ClientDisconnectedError):
return True
if not isinstance(args[1], C2SConnectionError):
self.disconnect(wait=args[1] is None)
return False
| gpl-3.0 |
poulpito/Flexget | flexget/plugins/sites/redirect.py | 9 | 1549 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('redirect_url')
class UrlRewriteRedirect(object):
"""Rewrites urls which actually redirect somewhere else."""
def __init__(self):
self.processed = set()
def on_task_start(self, task, config):
self.processed = set()
def on_task_urlrewrite(self, task, config):
if not config:
return
for entry in task.accepted:
if not any(entry['url'].startswith(adapter) for adapter in task.requests.adapters):
continue
elif entry['url'] in self.processed:
continue
auth = None
if 'download_auth' in entry:
auth = entry['download_auth']
log.debug('Custom auth enabled for %s url_redirect: %s' % (entry['title'], entry['download_auth']))
try:
r = task.requests.head(entry['url'], auth=auth, allow_redirects=True)
except Exception:
pass
else:
if r.status_code < 400 and r.url != entry['url']:
entry['url'] = r.url
# Make sure we don't try to rewrite this url again
self.processed.add(entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteRedirect, 'redirect_url', api_ver=2)
| mit |
gareging/SDN_Framework | ryu/ofproto/ofproto_protocol.py | 36 | 2205 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_0_parser
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.ofproto import ofproto_v1_4
from ryu.ofproto import ofproto_v1_4_parser
from ryu.ofproto import ofproto_v1_5
from ryu.ofproto import ofproto_v1_5_parser
_versions = {
ofproto_v1_0.OFP_VERSION: (ofproto_v1_0, ofproto_v1_0_parser),
ofproto_v1_2.OFP_VERSION: (ofproto_v1_2, ofproto_v1_2_parser),
ofproto_v1_3.OFP_VERSION: (ofproto_v1_3, ofproto_v1_3_parser),
ofproto_v1_4.OFP_VERSION: (ofproto_v1_4, ofproto_v1_4_parser),
ofproto_v1_5.OFP_VERSION: (ofproto_v1_5, ofproto_v1_5_parser),
}
# OF versions supported by every apps in this process (intersection)
_supported_versions = set(_versions.keys())
def set_app_supported_versions(vers):
global _supported_versions
_supported_versions &= set(vers)
assert _supported_versions, 'No OpenFlow version is available'
class ProtocolDesc(object):
"""
OpenFlow protocol version flavor descriptor
"""
def __init__(self, version=None):
if version is None:
version = max(_supported_versions)
self.set_version(version)
def set_version(self, version):
assert version in _supported_versions
(self.ofproto, self.ofproto_parser) = _versions[version]
@property
def supported_ofp_version(self):
return _supported_versions
| apache-2.0 |
py-geek/City-Air | venv/lib/python2.7/site-packages/pip/pep425tags.py | 91 | 4309 | """Generate and work with PEP 425 Compatibility Tags."""
from __future__ import absolute_import
import re
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
return ''.join(map(str, sys.version_info[:2]))
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, noarch=False):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
major = sys.version_info[0]
# Support all previous minor Python versions.
for minor in range(sys.version_info[1], -1, -1):
versions.append(''.join(map(str, (major, minor))))
impl = get_abbr_impl()
abis = []
try:
soabi = sysconfig.get_config_var('SOABI')
except IOError as e: # Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
soabi = None
if soabi and soabi.startswith('cpython-'):
abis[0:0] = ['cp' + soabi.split('-', 1)[-1]]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
if not noarch:
arch = get_platform()
if sys.platform == 'darwin':
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
actual_arches = [actual_arch]
if actual_arch in ('i386', 'ppc'):
actual_arches.append('fat')
if actual_arch in ('i386', 'x86_64'):
actual_arches.append('intel')
if actual_arch in ('i386', 'ppc', 'x86_64'):
actual_arches.append('fat3')
if actual_arch in ('ppc64', 'x86_64'):
actual_arches.append('fat64')
if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
actual_arches.append('universal')
tpl = '{0}_{1}_%i_%s'.format(name, major)
arches = []
for m in range(int(minor) + 1):
for a in actual_arches:
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
supported_tags = get_supported()
supported_tags_noarch = get_supported(noarch=True)
| mit |
Ksys-labs/l4linux | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
ilastik/ilastik-0.5 | ilastik/gui/volumeeditor.py | 1 | 115328 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 C Sommer, C Straehle, U Koethe, FA Hamprecht. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE ABOVE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of their employers.
"""
Dataset Editor Dialog based on PyQt4
"""
import time
try:
from OpenGL.GL import *
from OpenGL.GLU import *
except Exception, e:
print e
pass
from PyQt4 import QtCore, QtOpenGL
from PyQt4.QtCore import pyqtSignal
import sip
import numpy, qimage2ndarray
import os.path
from collections import deque
import threading
from ilastik.core.volume import DataAccessor
from shortcutmanager import *
from ilastik.gui.quadsplitter import QuadView
import ilastik.gui.exportDialog as exportDialog
from ilastik.gui.iconMgr import ilastikIcons
from ilastik.gui.view3d import OverviewScene
# Local import
#from spyderlib.config import get_icon, get_font
##mixin to enable label access
#class VolumeLabelAccessor():
#def __init__():
#self._labels = None
##extend ndarray with _label attribute
#numpy.ndarray.__base__ += (VolumeLabelAccessor, )
def rgb(r, g, b):
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (QtGui.qRgb(r, g, b) & 0xffffff) - 0x1000000
#*******************************************************************************
# P a t c h A c c e s s o r *
#*******************************************************************************
class PatchAccessor():
def __init__(self, size_x,size_y, blockSize = 128):
self._blockSize = blockSize
self.size_x = size_x
self.size_y = size_y
self._cX = int(numpy.ceil(1.0 * size_x / self._blockSize))
#last blocks can be very small -> merge them with the secondlast one
self._cXend = size_x % self._blockSize
if self._cXend < self._blockSize / 3 and self._cXend != 0 and self._cX > 1:
self._cX -= 1
else:
self._cXend = 0
self._cY = int(numpy.ceil(1.0 * size_y / self._blockSize))
#last blocks can be very small -> merge them with the secondlast one
self._cYend = size_y % self._blockSize
if self._cYend < self._blockSize / 3 and self._cYend != 0 and self._cY > 1:
self._cY -= 1
else:
self._cYend = 0
self.patchCount = self._cX * self._cY
def getPatchBounds(self, blockNum, overlap = 0):
z = int(numpy.floor(blockNum / (self._cX*self._cY)))
rest = blockNum % (self._cX*self._cY)
y = int(numpy.floor(rest / self._cX))
x = rest % self._cX
startx = max(0, x*self._blockSize - overlap)
endx = min(self.size_x, (x+1)*self._blockSize + overlap)
if x+1 >= self._cX:
endx = self.size_x
starty = max(0, y*self._blockSize - overlap)
endy = min(self.size_y, (y+1)*self._blockSize + overlap)
if y+1 >= self._cY:
endy = self.size_y
return [startx,endx,starty,endy]
def getPatchesForRect(self,startx,starty,endx,endy):
sx = int(numpy.floor(1.0 * startx / self._blockSize))
ex = int(numpy.ceil(1.0 * endx / self._blockSize))
sy = int(numpy.floor(1.0 * starty / self._blockSize))
ey = int(numpy.ceil(1.0 * endy / self._blockSize))
if ey > self._cY:
ey = self._cY
if ex > self._cX :
ex = self._cX
nums = []
for y in range(sy,ey):
nums += range(y*self._cX+sx,y*self._cX+ex)
return nums
#abstract base class for undo redo stuff
#*******************************************************************************
# S t a t e *
#*******************************************************************************
class State():
def __init__(self):
pass
def restore(self):
pass
#*******************************************************************************
# L a b e l S t a t e *
#*******************************************************************************
class LabelState(State):
def __init__(self, title, axis, num, offsets, shape, timeAxis, volumeEditor, erasing, labels, labelNumber):
self.title = title
self.time = timeAxis
self.num = num
self.offsets = offsets
self.axis = axis
self.erasing = erasing
self.labelNumber = labelNumber
self.labels = labels
self.clock = time.clock()
self.dataBefore = volumeEditor.labelWidget.overlayItem.getSubSlice(self.offsets, self.labels.shape, self.num, self.axis, self.time, 0).copy()
def restore(self, volumeEditor):
temp = volumeEditor.labelWidget.overlayItem.getSubSlice(self.offsets, self.labels.shape, self.num, self.axis, self.time, 0).copy()
restore = numpy.where(self.labels > 0, self.dataBefore, 0)
stuff = numpy.where(self.labels > 0, self.dataBefore + 1, 0)
erase = numpy.where(stuff == 1, 1, 0)
self.dataBefore = temp
#volumeEditor.labels._data.setSubSlice(self.offsets, temp, self.num, self.axis, self.time, 0)
volumeEditor.setLabels(self.offsets, self.axis, self.num, restore, False)
volumeEditor.setLabels(self.offsets, self.axis, self.num, erase, True)
if volumeEditor.sliceSelectors[self.axis].value() != self.num:
volumeEditor.sliceSelectors[self.axis].setValue(self.num)
else:
#volumeEditor.repaint()
#repainting is already done automatically by the setLabels function
pass
self.erasing = not(self.erasing)
#*******************************************************************************
# H i s t o r y M a n a g e r *
#*******************************************************************************
class HistoryManager(QtCore.QObject):
def __init__(self, parent, maxSize = 3000):
QtCore.QObject.__init__(self)
self.volumeEditor = parent
self.maxSize = maxSize
self._history = []
self.current = -1
def append(self, state):
if self.current + 1 < len(self._history):
self._history = self._history[0:self.current+1]
self._history.append(state)
if len(self._history) > self.maxSize:
self._history = self._history[len(self._history)-self.maxSize:len(self._history)]
self.current = len(self._history) - 1
def undo(self):
if self.current >= 0:
self._history[self.current].restore(self.volumeEditor)
self.current -= 1
def redo(self):
if self.current < len(self._history) - 1:
self._history[self.current + 1].restore(self.volumeEditor)
self.current += 1
def serialize(self, grp, name='_history'):
histGrp = grp.create_group(name)
for i, hist in enumerate(self._history):
histItemGrp = histGrp.create_group('%04d'%i)
histItemGrp.create_dataset('labels',data=hist.labels)
histItemGrp.create_dataset('axis',data=hist.axis)
histItemGrp.create_dataset('slice',data=hist.num)
histItemGrp.create_dataset('labelNumber',data=hist.labelNumber)
histItemGrp.create_dataset('offsets',data=hist.offsets)
histItemGrp.create_dataset('time',data=hist.time)
histItemGrp.create_dataset('erasing',data=hist.erasing)
histItemGrp.create_dataset('clock',data=hist.clock)
def removeLabel(self, number):
tobedeleted = []
for index, item in enumerate(self._history):
if item.labelNumber != number:
item.dataBefore = numpy.where(item.dataBefore == number, 0, item.dataBefore)
item.dataBefore = numpy.where(item.dataBefore > number, item.dataBefore - 1, item.dataBefore)
item.labels = numpy.where(item.labels == number, 0, item.labels)
item.labels = numpy.where(item.labels > number, item.labels - 1, item.labels)
else:
#if item.erasing == False:
#item.restore(self.volumeEditor)
tobedeleted.append(index - len(tobedeleted))
if index <= self.current:
self.current -= 1
for val in tobedeleted:
it = self._history[val]
self._history.__delitem__(val)
del it
def clear(self):
self._history = []
#*******************************************************************************
# V o l u m e U p d a t e *
#*******************************************************************************
class VolumeUpdate():
def __init__(self, data, offsets, sizes, erasing):
self.offsets = offsets
self._data = data
self.sizes = sizes
self.erasing = erasing
def applyTo(self, dataAcc):
offsets = self.offsets
sizes = self.sizes
#TODO: move part of function into DataAccessor class !! e.g. setSubVolume or somethign
tempData = dataAcc[offsets[0]:offsets[0]+sizes[0],offsets[1]:offsets[1]+sizes[1],offsets[2]:offsets[2]+sizes[2],offsets[3]:offsets[3]+sizes[3],offsets[4]:offsets[4]+sizes[4]].copy()
if self.erasing == True:
tempData = numpy.where(self._data > 0, 0, tempData)
else:
tempData = numpy.where(self._data > 0, self._data, tempData)
dataAcc[offsets[0]:offsets[0]+sizes[0],offsets[1]:offsets[1]+sizes[1],offsets[2]:offsets[2]+sizes[2],offsets[3]:offsets[3]+sizes[3],offsets[4]:offsets[4]+sizes[4]] = tempData
#*******************************************************************************
# D u m m y L a b e l W i d g e t *
#*******************************************************************************
class DummyLabelWidget(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setFixedSize(QtCore.QSize(0,0))
self.volumeLabels = None
def currentItem(self):
return None
#*******************************************************************************
# D u m m y O v e r l a y L i s t W i d g e t *
#*******************************************************************************
class DummyOverlayListWidget(QtGui.QWidget):
def __init__(self, parent):
QtGui.QWidget.__init__(self)
self.volumeEditor = parent
self.overlays = []
#*******************************************************************************
# V o l u m e E d i t o r *
#*******************************************************************************
class VolumeEditor(QtGui.QWidget):
changedSlice = pyqtSignal(int,int)
@property
def useOpenGL(self):
return self.sharedOpenglWidget is not None
"""Array Editor Dialog"""
def __init__(self, image, parent, name="", font=None,
readonly=False, size=(400, 300), sharedOpenglWidget = None):
QtGui.QWidget.__init__(self, parent)
self.ilastik = parent
self.name = name
self.grid = None #in 3D mode hold the quad view widget, otherwise remains none
title = name
#Controls the trade-off of speed and flickering when scrolling through this slice view
self.fastRepaint = True
self.interactionLog = None
self.labelsAlpha = 1.0
#Bordermargin settings - they control the blue markers that signal the region from wich the
#labels are not used for trainig
self.useBorderMargin = False
self.borderMargin = 0
#this setting controls the rescaling of the displayed _data to the full 0-255 range
self.normalizeData = False
#this settings controls the timer interval during interactive mode
#set to 0 to wait for complete brushstrokes !
self.drawUpdateInterval = 300
self.sharedOpenGLWidget = sharedOpenglWidget
if self.sharedOpenGLWidget is not None:
print "Enabling OpenGL rendering"
else:
print "Disabling OpenGL rendering"
self.embedded = True
QtGui.QPixmapCache.setCacheLimit(100000)
if issubclass(image.__class__, DataAccessor):
self.image = image
elif issubclass(image.__class__, Volume):
self.image = image._data
else:
self.image = DataAccessor(image)
self.save_thread = ImageSaveThread(self)
self.selectedTime = 0
self.selectedChannel = 0
self.pendingLabels = []
#self.setAccessibleName(self.name)
self._history = HistoryManager(self)
self.drawManager = DrawManager(self)
self.imageScenes = []
self.imageScenes.append(ImageScene(self, (self.image.shape[2], self.image.shape[3], self.image.shape[1]), 0 ,self.drawManager))
if self.image.shape[1] != 1:
self.overview = OverviewScene(self, self.image.shape[1:4])
self.overview.changedSlice.connect(self.changeSlice)
self.changedSlice.connect(self.overview.ChangeSlice)
self.imageScenes.append(ImageScene(self, (self.image.shape[1], self.image.shape[3], self.image.shape[2]), 1 ,self.drawManager))
self.imageScenes.append(ImageScene(self, (self.image.shape[1], self.image.shape[2], self.image.shape[3]), 2 ,self.drawManager))
self.grid = QuadView(self)
self.grid.addWidget(0, self.imageScenes[2])
self.grid.addWidget(1, self.imageScenes[0])
self.grid.addWidget(2, self.imageScenes[1])
self.grid.addWidget(3, self.overview)
else:
self.overview = OverviewSceneDummy(self, self.image.shape[1:4])
for scene in self.imageScenes:
self.changedSlice.connect(scene.updateSliceIntersection)
self.viewingLayout = QtGui.QVBoxLayout()
self.viewingLayout.setContentsMargins(10,2,0,2)
self.viewingLayout.setSpacing(0)
labelLayout = QtGui.QHBoxLayout()
labelLayout.setMargin(0)
labelLayout.setSpacing(5)
labelLayout.setContentsMargins(0,0,0,0)
self.posLabel = QtGui.QLabel()
self.pixelValuesLabel = QtGui.QLabel()
labelLayout.addWidget(self.posLabel)
labelLayout.addWidget(self.pixelValuesLabel)
labelLayout.addStretch()
#self.viewingLayout.addLayout(self.grid)
if self.image.shape[1] != 1:
self.viewingLayout.addWidget(self.grid)
self.grid.setContentsMargins(0,0,10,0)
else:
self.viewingLayout.addWidget(self.imageScenes[0])
self.viewingLayout.addLayout(labelLayout)
#right side toolbox
self.toolBox = QtGui.QWidget()
self.toolBoxLayout = QtGui.QVBoxLayout()
self.toolBoxLayout.setMargin(5)
self.toolBox.setLayout(self.toolBoxLayout)
#self.toolBox.setMaximumWidth(190)
#self.toolBox.setMinimumWidth(190)
self.labelWidget = None
self.setLabelWidget(DummyLabelWidget())
self.toolBoxLayout.addStretch()
#Slice Selector Combo Box in right side toolbox
self.sliceSelectors = []
sliceSpin = QtGui.QSpinBox()
sliceSpin.setEnabled(True)
self.connect(sliceSpin, QtCore.SIGNAL("valueChanged(int)"), self.changeSliceX)
if self.image.shape[1] > 1 and self.image.shape[2] > 1 and self.image.shape[3] > 1: #only show when needed
tempLay = QtGui.QHBoxLayout()
tempLay.addWidget(QtGui.QLabel("<pre>X:</pre>"))
tempLay.addWidget(sliceSpin, 1)
self.toolBoxLayout.addLayout(tempLay)
sliceSpin.setRange(0,self.image.shape[1] - 1)
self.sliceSelectors.append(sliceSpin)
sliceSpin = QtGui.QSpinBox()
sliceSpin.setEnabled(True)
self.connect(sliceSpin, QtCore.SIGNAL("valueChanged(int)"), self.changeSliceY)
if self.image.shape[1] > 1 and self.image.shape[3] > 1: #only show when needed
tempLay = QtGui.QHBoxLayout()
tempLay.addWidget(QtGui.QLabel("<pre>Y:</pre>"))
tempLay.addWidget(sliceSpin, 1)
self.toolBoxLayout.addLayout(tempLay)
sliceSpin.setRange(0,self.image.shape[2] - 1)
self.sliceSelectors.append(sliceSpin)
sliceSpin = QtGui.QSpinBox()
sliceSpin.setEnabled(True)
self.connect(sliceSpin, QtCore.SIGNAL("valueChanged(int)"), self.changeSliceZ)
if self.image.shape[1] > 1 and self.image.shape[2] > 1 : #only show when needed
tempLay = QtGui.QHBoxLayout()
tempLay.addWidget(QtGui.QLabel("<pre>Z:</pre>"))
tempLay.addWidget(sliceSpin, 1)
self.toolBoxLayout.addLayout(tempLay)
sliceSpin.setRange(0,self.image.shape[3] - 1)
self.sliceSelectors.append(sliceSpin)
# Check box for slice intersection marks
sliceIntersectionBox = QtGui.QCheckBox("Slice Intersection")
sliceIntersectionBox.setEnabled(True)
self.toolBoxLayout.addWidget(sliceIntersectionBox)
for scene in self.imageScenes:
self.connect(sliceIntersectionBox, QtCore.SIGNAL("stateChanged(int)"), scene.setSliceIntersection)
sliceIntersectionBox.setCheckState(QtCore.Qt.Checked)
self.selSlices = []
self.selSlices.append(0)
self.selSlices.append(0)
self.selSlices.append(0)
#Channel Selector Combo Box in right side toolbox
self.channelLayout = QtGui.QHBoxLayout()
self.channelSpinLabel = QtGui.QLabel("Channel:")
self.channelSpin = QtGui.QSpinBox()
self.channelSpin.setEnabled(True)
self.connect(self.channelSpin, QtCore.SIGNAL("valueChanged(int)"), self.setChannel)
self.channelEditBtn = QtGui.QPushButton('Edit channels')
self.connect(self.channelEditBtn, QtCore.SIGNAL("clicked()"), self.on_editChannels)
self.toolBoxLayout.addWidget(self.channelSpinLabel)
self.channelLayout.addWidget(self.channelSpin)
self.channelLayout.addWidget(self.channelEditBtn)
self.toolBoxLayout.addLayout(self.channelLayout)
if self.image.shape[-1] == 1 or self.image.rgb is True: #only show when needed
self.channelSpin.setVisible(False)
self.channelSpinLabel.setVisible(False)
self.channelEditBtn.setVisible(False)
self.channelSpin.setRange(0,self.image.shape[-1] - 1)
#Overlay selector
self.overlayWidget = DummyOverlayListWidget(self)
self.toolBoxLayout.addWidget( self.overlayWidget)
self.toolBoxLayout.setAlignment( QtCore.Qt.AlignTop )
# Make the dialog act as a window and stay on top
if self.embedded == False:
pass
#self.setWindowFlags(self.flags() | QtCore.Qt.Window | QtCore.Qt.WindowStaysOnTopHint)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
#self.setWindowIcon(get_icon('edit.png'))
self.setWindowTitle(self.tr("Volume") + \
"%s" % (" - "+str(title) if str(title) else ""))
#start viewing in the center of the volume
self.changeSliceX(numpy.floor((self.image.shape[1] - 1) / 2))
self.changeSliceY(numpy.floor((self.image.shape[2] - 1) / 2))
self.changeSliceZ(numpy.floor((self.image.shape[3] - 1) / 2))
##undo/redo and other shortcuts
self.shortcutUndo = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Z"), self, self.historyUndo, self.historyUndo)
shortcutManager.register(self.shortcutUndo, "Labeling", "History undo")
self.shortcutRedo = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Shift+Z"), self, self.historyRedo, self.historyRedo)
shortcutManager.register(self.shortcutRedo, "Labeling", "History redo")
self.shortcutRedo2 = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Y"), self, self.historyRedo, self.historyRedo)
shortcutManager.register(self.shortcutRedo2, "Labeling", "History redo")
self.togglePredictionSC = QtGui.QShortcut(QtGui.QKeySequence("Space"), self, self.togglePrediction, self.togglePrediction)
shortcutManager.register(self.togglePredictionSC, "Overlays", "Invert overlay visibility")
self.shortcutNextLabel = QtGui.QShortcut(QtGui.QKeySequence("l"), self, self.nextLabel, self.nextLabel)
shortcutManager.register(self.shortcutNextLabel, "Labeling", "Go to next label (cyclic, forward)")
self.shortcutPrevLabel = QtGui.QShortcut(QtGui.QKeySequence("k"), self, self.prevLabel, self.prevLabel )
shortcutManager.register(self.shortcutPrevLabel, "Labeling", "Go to previous label (cyclic, backwards)")
self.shortcutToggleFullscreenX = QtGui.QShortcut(QtGui.QKeySequence("x"), self, self.toggleFullscreenX, self.toggleFullscreenX )
shortcutManager.register(self.shortcutToggleFullscreenX, "Navigation", "Enlarge slice view x to full size")
self.shortcutToggleFullscreenY = QtGui.QShortcut(QtGui.QKeySequence("y"), self, self.toggleFullscreenY, self.toggleFullscreenY )
shortcutManager.register(self.shortcutToggleFullscreenY, "Navigation", "Enlarge slice view y to full size")
self.shortcutToggleFullscreenZ = QtGui.QShortcut(QtGui.QKeySequence("z"), self, self.toggleFullscreenZ, self.toggleFullscreenZ )
shortcutManager.register(self.shortcutToggleFullscreenZ, "Navigation", "Enlarge slice view z to full size")
self.shortcutNextChannel = QtGui.QShortcut(QtGui.QKeySequence("q"), self, self.nextChannel, self.nextChannel )
shortcutManager.register(self.shortcutNextChannel, "Navigation", "Switch to next channel")
self.shortcutPreviousChannel = QtGui.QShortcut(QtGui.QKeySequence("a"), self, self.previousChannel, self.previousChannel )
shortcutManager.register(self.shortcutPreviousChannel, "Navigation", "Switch to previous channel")
for index, scene in enumerate(self.imageScenes):
scene.shortcutZoomIn = QtGui.QShortcut(QtGui.QKeySequence("+"), scene, scene.zoomIn, scene.zoomIn )
scene.shortcutZoomIn.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutZoomOut = QtGui.QShortcut(QtGui.QKeySequence("-"), scene, scene.zoomOut, scene.zoomOut )
scene.shortcutZoomOut.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutSliceUp = QtGui.QShortcut(QtGui.QKeySequence("p"), scene, scene.sliceUp, scene.sliceUp )
scene.shortcutSliceUp.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutSliceDown = QtGui.QShortcut(QtGui.QKeySequence("o"), scene, scene.sliceDown, scene.sliceDown )
scene.shortcutSliceDown.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutSliceUp2 = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Up"), scene, scene.sliceUp, scene.sliceUp )
scene.shortcutSliceUp2.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutSliceDown2 = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Down"), scene, scene.sliceDown, scene.sliceDown )
scene.shortcutSliceDown2.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutSliceUp10 = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Shift+Up"), scene, scene.sliceUp10, scene.sliceUp10 )
scene.shortcutSliceUp10.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutSliceDown10 = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+Shift+Down"), scene, scene.sliceDown10, scene.sliceDown10 )
scene.shortcutSliceDown10.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutBrushSizeUp = QtGui.QShortcut(QtGui.QKeySequence("n"), scene, scene.brushSmaller)
scene.shortcutBrushSizeUp.setContext(QtCore.Qt.WidgetShortcut)
scene.shortcutBrushSizeDown = QtGui.QShortcut(QtGui.QKeySequence("m"), scene, scene.brushBigger)
scene.shortcutBrushSizeDown.setContext(QtCore.Qt.WidgetShortcut)
#add shortcuts of the imagescenes to the help text szstem
shortcutManager.register(self.imageScenes[0].shortcutZoomIn, "Navigation", "Zoom in")
shortcutManager.register(self.imageScenes[0].shortcutZoomOut, "Navigation", "Zoom out")
shortcutManager.register(self.imageScenes[0].shortcutSliceUp, "Navigation", "Slice up")
shortcutManager.register(self.imageScenes[0].shortcutSliceDown, "Navigation", "Slice down")
shortcutManager.register(self.imageScenes[0].shortcutSliceUp2, "Navigation", "Slice up")
shortcutManager.register(self.imageScenes[0].shortcutSliceDown2, "Navigation", "Slice down")
shortcutManager.register(self.imageScenes[0].shortcutSliceUp10, "Navigation", "10 slices up")
shortcutManager.register(self.imageScenes[0].shortcutSliceDown10, "Navigation", "10 slices down")
shortcutManager.register(self.imageScenes[0].shortcutBrushSizeUp, "Labeling", "Increase brush size")
shortcutManager.register(self.imageScenes[0].shortcutBrushSizeDown, "Labeling", "Decrease brush size")
self.shortcutUndo.setContext(QtCore.Qt.ApplicationShortcut )
self.shortcutRedo.setContext(QtCore.Qt.ApplicationShortcut )
self.shortcutRedo2.setContext(QtCore.Qt.ApplicationShortcut )
self.shortcutUndo.setEnabled(True)
self.shortcutRedo.setEnabled(True)
self.shortcutRedo2.setEnabled(True)
self.togglePredictionSC.setEnabled(True)
self.connect(self, QtCore.SIGNAL("destroyed()"), self.widgetDestroyed)
self.focusAxis = 0
self.splitter = QtGui.QSplitter()
self.splitter.setContentsMargins(0,0,0,0)
tempWidget = QtGui.QWidget()
tempWidget.setLayout(self.viewingLayout)
self.splitter.addWidget(tempWidget)
self.splitter.addWidget(self.toolBox)
splitterLayout = QtGui.QVBoxLayout()
splitterLayout.setMargin(0)
splitterLayout.setSpacing(0)
splitterLayout.addWidget(self.splitter)
self.setLayout(splitterLayout)
# Tried to resolve ugly splitter handle problem fro windows
# Still it does not look good
# http://stackoverflow.com/questions/2545577/qsplitter-becoming-undistinguishable-between-qwidget-and-qtabwidget
# sHandle = self.splitter.handle(1)
# v = QtGui.QVBoxLayout(sHandle)
# v.setSpacing(5)
# v.setMargin(5)
#
# l = QtGui.QFrame(sHandle)
# l.setFrameShape(QtGui.QFrame.VLine)
# l.setFrameShadow(QtGui.QFrame.Sunken)
#
# v.addWidget(l)
#
# sHandle.setLayout(v)
self.updateGeometry()
self.update()
if self.grid:
self.grid.update()
def toggleFullscreenX(self):
self.maximizeSliceView(0)
def toggleFullscreenY(self):
self.maximizeSliceView(1)
def toggleFullscreenZ(self):
self.maximizeSliceView(2)
def nextChannel(self):
self.channelSpin.setValue(self.selectedChannel + 1)
def previousChannel(self):
self.channelSpin.setValue(self.selectedChannel - 1)
def toggleFullscreen3D(self):
v = [self.imageScenes[i].isVisible() for i in range(3)]
if any(v):
for i in range(3):
self.imageScenes[i].setVisible(False)
else:
for i in range(3):
self.imageScenes[i].setVisible(True)
def maximizeSliceView(self, axis):
if axis == 2:
self.grid.toggleMaximized(0)
if axis == 1:
self.grid.toggleMaximized(2)
if axis == 0:
self.grid.toggleMaximized(1)
def nextLabel(self):
self.labelWidget.nextLabel()
def prevLabel(self):
self.labelWidget.nextLabel()
def onLabelSelected(self):
print "onLabelSelected() Warning: am i used anymore?"
# if self.labelWidget.currentItem() is not None:
# self.drawManager.setBrushColor(self.labelWidget.currentItem().color)
# for i in range(3):
# self.imageScenes[i].crossHairCursor.setColor(self.labelWidget.currentItem().color)
def onOverlaySelected(self, index):
if self.labelWidget.currentItem() is not None:
pass
def focusNextPrevChild(self, forward = True):
if forward is True:
self.focusAxis += 1
if self.focusAxis > 2:
self.focusAxis = 0
else:
self.focusAxis -= 1
if self.focusAxis < 0:
self.focusAxis = 2
if len(self.imageScenes) > 2:
self.imageScenes[self.focusAxis].setFocus()
return True
def widgetDestroyed(self):
pass
def cleanUp(self):
QtGui.QApplication.processEvents()
print "VolumeEditor: cleaning up "
for index, s in enumerate( self.imageScenes ):
s.cleanUp()
s.close()
s.deleteLater()
self.imageScenes = []
self.save_thread.stopped = True
self.save_thread.imagePending.set()
self.save_thread.wait()
QtGui.QApplication.processEvents()
print "finished saving thread"
def on_editChannels(self):
from ilastik.gui.channelEditDialog import EditChannelsDialog
dlg = EditChannelsDialog(self.ilastik.project.dataMgr.selectedChannels, self.ilastik.project.dataMgr[0]._dataVol._data.shape[-1], self)
result = dlg.exec_()
if result is not None:
self.ilastik.project.dataMgr.selectedChannels = result
def togglePrediction(self):
for index, item in enumerate(self.overlayWidget.overlays):
item.visible = not(item.visible)
s = None
if item.visible:
s = QtCore.Qt.Checked
else:
s = QtCore.Qt.Unchecked
self.overlayWidget.overlayListWidget.item(index).setCheckState(s)
self.repaint()
def setLabelsAlpha(self, num):
print "####################### function not used anymore"
def getPendingLabels(self):
temp = self.pendingLabels
self.pendingLabels = []
return temp
def historyUndo(self):
self._history.undo()
def historyRedo(self):
self._history.redo()
def addOverlay(self, visible, data, name, color, alpha, colorTab = None):
ov = VolumeOverlay(data,name, color, alpha, colorTab, visible)
self.overlayWidget.addOverlay(ov)
def addOverlayObject(self, ov):
self.overlayWidget.addOverlay(ov)
def repaint(self):
for i in range(3):
tempImage = None
tempLabels = None
tempoverlays = []
for index, item in enumerate(reversed(self.overlayWidget.overlays)):
if item.visible:
tempoverlays.append(item.getOverlaySlice(self.selSlices[i],i, self.selectedTime, item.channel))
if len(self.overlayWidget.overlays) > 0:
tempImage = self.overlayWidget.getOverlayRef("Raw Data")._data.getSlice(self.selSlices[i], i, self.selectedTime, self.overlayWidget.getOverlayRef("Raw Data").channel)
else:
tempImage = None
# if self.labelWidget.volumeLabels is not None:
# if self.labelWidget.volumeLabels.data is not None:
# tempLabels = self.labelWidget.volumeLabels.data.getSlice(self.selSlices[i],i, self.selectedTime, 0)
if len(self.imageScenes) > i:
self.imageScenes[i].displayNewSlice(tempImage, tempoverlays, fastPreview = False)
def on_saveAsImage(self):
sliceOffsetCheck = False
if self.image.shape[1]>1:
#stack z-view is stored in imageScenes[2], for no apparent reason
sliceOffsetCheck = True
timeOffsetCheck = self.image.shape[0]>1
formatList = QtGui.QImageWriter.supportedImageFormats()
formatList = [x for x in formatList if x in ['png', 'tif']]
expdlg = exportDialog.ExportDialog(formatList, timeOffsetCheck, sliceOffsetCheck, None, parent=self.ilastik)
expdlg.exec_()
try:
tempname = str(expdlg.path.text()) + "/" + str(expdlg.prefix.text())
filename = str(QtCore.QDir.convertSeparators(tempname))
self.save_thread.start()
stuff = (filename, expdlg.timeOffset, expdlg.sliceOffset, expdlg.format)
self.save_thread.queue.append(stuff)
self.save_thread.imagePending.set()
except:
pass
def setLabelWidget(self, widget):
"""
Public interface function for setting the labelWidget toolBox
"""
if self.labelWidget is not None:
self.toolBoxLayout.removeWidget(self.labelWidget)
self.labelWidget.close()
del self.labelWidget
self.labelWidget = widget
self.connect(self.labelWidget, QtCore.SIGNAL("itemSelectionChanged()"), self.onLabelSelected)
self.toolBoxLayout.insertWidget( 0, self.labelWidget)
if isinstance(widget, DummyLabelWidget):
oldMargins = list(self.toolBoxLayout.getContentsMargins())
oldMargins[1] = 0
self.toolBoxLayout.setContentsMargins(oldMargins[0],oldMargins[1],oldMargins[2],oldMargins[3])
def setOverlayWidget(self, widget):
"""
Public interface function for setting the overlayWidget toolBox
"""
if self.overlayWidget is not None:
self.toolBoxLayout.removeWidget(self.overlayWidget)
self.overlayWidget.close()
del self.overlayWidget
self.overlayWidget = widget
self.connect(self.overlayWidget , QtCore.SIGNAL("selectedOverlay(int)"), self.onOverlaySelected)
self.toolBoxLayout.insertWidget( 1, self.overlayWidget)
self.ilastik.project.dataMgr[self.ilastik._activeImageNumber].overlayMgr.ilastik = self.ilastik
def get_copy(self):
"""Return modified text"""
return unicode(self.edit.toPlainText())
def setRgbMode(self, mode):
"""
change display mode of 3-channel images to either rgb, or 3-channels
mode can bei either True or False
"""
if self.image.shape[-1] == 3:
self.image.rgb = mode
self.channelSpin.setVisible(not mode)
self.channelSpinLabel.setVisible(not mode)
def setUseBorderMargin(self, use):
self.useBorderMargin = use
self.setBorderMargin(self.borderMargin)
def setFastRepaint(self, fastRepaint):
self.fastRepaint = fastRepaint
def setBorderMargin(self, margin):
#print "******** setBorderMargin", margin
if margin != self.borderMargin:
for imgScene in self.imageScenes:
imgScene.__borderMarginIndicator__(margin)
self.borderMargin = margin
for imgScene in self.imageScenes:
if imgScene.border is not None:
imgScene.border.setVisible(self.useBorderMargin)
self.repaint()
def changeSliceX(self, num):
self.changeSlice(num, 0)
def changeSliceY(self, num):
self.changeSlice(num, 1)
def changeSliceZ(self, num):
self.changeSlice(num, 2)
def setChannel(self, channel):
if len(self.overlayWidget.overlays) > 0:
ov = self.overlayWidget.getOverlayRef("Raw Data")
if ov.shape[-1] == self.image.shape[-1]:
self.overlayWidget.getOverlayRef("Raw Data").channel = channel
self.selectedChannel = channel
for i in range(3):
self.changeSlice(self.selSlices[i], i)
def setTime(self, time):
self.selectedTime = time
for i in range(3):
self.changeSlice(self.selSlices[i], i)
def updateTimeSliceForSaving(self, time, num, axis):
self.imageScenes[axis].thread.freeQueue.clear()
if self.sliceSelectors[axis].value() != num:
#this will emit the signal and change the slice
self.sliceSelectors[axis].setValue(num)
elif self.selectedTime!=time:
#if only the time is changed, we don't want to update all 3 slices
self.selectedTime = time
self.changeSlice(num, axis)
else:
#no need to update, just save the current image
self.imageScenes[axis].thread.freeQueue.set()
def changeSlice(self, num, axis):
if self.interactionLog is not None:
self.interactionLog.append("%f: changeSlice(axis,number) %d,%d" % (time.clock(),axis,num))
self.selSlices[axis] = num
tempImage = None
tempLabels = None
tempoverlays = []
#This bloody call is recursive, be careful!
self.sliceSelectors[axis].setValue(num)
for index, item in enumerate(reversed(self.overlayWidget.overlays)):
if item.visible:
tempoverlays.append(item.getOverlaySlice(num,axis, self.selectedTime, item.channel))
if len(self.overlayWidget.overlays) > 0:
tempImage = self.overlayWidget.getOverlayRef("Raw Data")._data.getSlice(num, axis, self.selectedTime, self.selectedChannel)
else:
tempImage = None
self.selSlices[axis] = num
if len(self.imageScenes) > axis:
self.imageScenes[axis].sliceNumber = num
self.imageScenes[axis].displayNewSlice(tempImage, tempoverlays)
#print "VolumeEditor.changedSlice(%s, %d)" % (num, axis)
self.changedSlice.emit(num, axis)
def closeEvent(self, event):
event.accept()
def wheelEvent(self, event):
keys = QtGui.QApplication.keyboardModifiers()
k_ctrl = (keys == QtCore.Qt.ControlModifier)
if k_ctrl is True:
if event.delta() > 0:
scaleFactor = 1.1
else:
scaleFactor = 0.9
self.imageScenes[0].doScale(scaleFactor)
self.imageScenes[1].doScale(scaleFactor)
self.imageScenes[2].doScale(scaleFactor)
def setLabels(self, offsets, axis, num, labels, erase):
"""
offsets: labels is a 2D matrix in the image plane perpendicular to axis, which is offset from the origin
of the slice by the 2D offsets verctor
axis: the axis (x=0, y=1 or z=2 which is perpendicular to the image plane
num: position of the image plane perpendicular to axis on which the 'labels' were drawn
labels 2D matrix of new labels
erase boolean whether we are erasing or not. This changes how we interprete the update defined through
'labels'
"""
if axis == 0:
offsets5 = (self.selectedTime,num,offsets[0],offsets[1],0)
sizes5 = (1,1,labels.shape[0], labels.shape[1],1)
elif axis == 1:
offsets5 = (self.selectedTime,offsets[0],num,offsets[1],0)
sizes5 = (1,labels.shape[0],1, labels.shape[1],1)
else:
offsets5 = (self.selectedTime,offsets[0],offsets[1],num,0)
sizes5 = (1,labels.shape[0], labels.shape[1],1,1)
vu = VolumeUpdate(labels.reshape(sizes5),offsets5, sizes5, erase)
vu.applyTo(self.labelWidget.overlayItem)
self.pendingLabels.append(vu)
patches = self.imageScenes[axis].patchAccessor.getPatchesForRect(offsets[0], offsets[1],offsets[0]+labels.shape[0], offsets[1]+labels.shape[1])
tempImage = None
tempLabels = None
tempoverlays = []
for index, item in enumerate(reversed(self.overlayWidget.overlays)):
if item.visible:
tempoverlays.append(item.getOverlaySlice(self.selSlices[axis],axis, self.selectedTime, 0))
if len(self.overlayWidget.overlays) > 0:
tempImage = self.overlayWidget.getOverlayRef("Raw Data")._data.getSlice(num, axis, self.selectedTime, self.selectedChannel)
else:
tempImage = None
self.imageScenes[axis].updatePatches(patches, tempImage, tempoverlays)
self.emit(QtCore.SIGNAL('newLabelsPending()'))
def pushLabelsToLabelWidget(self):
newLabels = self.getPendingLabels()
self.labelWidget.labelMgr.newLabels(newLabels)
def getVisibleState(self):
#TODO: ugly, make nicer
vs = [self.selectedTime, self.selSlices[0], self.selSlices[1], self.selSlices[2], self.selectedChannel]
return vs
def show(self):
QtGui.QWidget.show(self)
#*******************************************************************************
# D r a w M a n a g e r *
#*******************************************************************************
class DrawManager(QtCore.QObject):
def __init__(self, parent):
QtCore.QObject.__init__(self)
self.volumeEditor = parent
self.shape = None
self.brushSize = 3
#self.initBoundingBox()
self.penVis = QtGui.QPen(QtCore.Qt.white, 3, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)
self.penDraw = QtGui.QPen(QtCore.Qt.white, 3, QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)
self.penDraw.setColor(QtCore.Qt.white)
self.pos = None
self.erasing = False
self.lines = []
self.scene = QtGui.QGraphicsScene()
def copy(self):
"""
make a shallow copy of DrawManager - needed for python 2.5 compatibility
"""
cp = DrawManager(self.parent)
cp.volumeEditor = self.volumeEditor
cp.shape = self.shape
cp.brushSize = self.brushSize
cp.penVis = self.penVis
cp.penDraw = self.penDraw
cp.pos = self.pos
cp.erasing = self.erasing
cp.lines = self.lines
cp.scene = self.scene
return cp
def initBoundingBox(self):
self.leftMost = self.shape[0]
self.rightMost = 0
self.topMost = self.shape[1]
self.bottomMost = 0
def growBoundingBox(self):
self.leftMost = max(0,self.leftMost - self.brushSize -1)
self.topMost = max(0,self.topMost - self.brushSize -1 )
self.rightMost = min(self.shape[0],self.rightMost + self.brushSize + 1)
self.bottomMost = min(self.shape[1],self.bottomMost + self.brushSize + 1)
def toggleErase(self):
self.erasing = not(self.erasing)
self.updateCrossHair()
def setErasing(self):
self.erasing = True
self.updateCrossHair()
def disableErasing(self):
self.erasing = False
self.updateCrossHair()
def updateCrossHair(self):
if self.erasing == True:
color = QtGui.QColor("black")
else:
color = self.volumeEditor.labelWidget.currentItem().color
for i in self.volumeEditor.imageScenes:
i.crossHairCursor.setColor(color)
def setBrushSize(self, size):
for i in self.volumeEditor.imageScenes:
i.crossHairCursor.setBrushSize(size)
self.brushSize = size
self.penVis.setWidth(size)
self.penDraw.setWidth(size)
def getBrushSize(self):
return self.brushSize
def setBrushColor(self, color):
self.penVis.setColor(color)
def getCurrentPenPixmap(self):
pixmap = QtGui.QPixmap(self.brushSize, self.brushSize)
if self.erasing == True or not self.volumeEditor.labelWidget.currentItem():
self.penVis.setColor(QtCore.Qt.black)
else:
self.penVis.setColor(self.volumeEditor.labelWidget.currentItem().color)
painter = QtGui.QPainter(pixmap)
painter.setPen(self.penVis)
painter.drawPoint(QtGui.Q)
def beginDraw(self, pos, shape):
self.shape = shape
self.initBoundingBox()
self.scene.clear()
if self.erasing == True or not self.volumeEditor.labelWidget.currentItem():
self.penVis.setColor(QtCore.Qt.black)
else:
self.penVis.setColor(self.volumeEditor.labelWidget.currentItem().color)
self.pos = QtCore.QPointF(pos.x()+0.0001, pos.y()+0.0001)
line = self.moveTo(pos)
return line
def endDraw(self, pos):
self.moveTo(pos)
self.growBoundingBox()
tempi = QtGui.QImage(self.rightMost - self.leftMost, self.bottomMost - self.topMost, QtGui.QImage.Format_ARGB32_Premultiplied) #TODO: format
tempi.fill(0)
painter = QtGui.QPainter(tempi)
self.scene.render(painter, QtCore.QRectF(0,0, self.rightMost - self.leftMost, self.bottomMost - self.topMost),
QtCore.QRectF(self.leftMost, self.topMost, self.rightMost - self.leftMost, self.bottomMost - self.topMost))
oldLeft = self.leftMost
oldTop = self.topMost
return (oldLeft, oldTop, tempi) #TODO: hackish, probably return a class ??
def dumpDraw(self, pos):
res = self.endDraw(pos)
self.beginDraw(pos, self.shape)
return res
def moveTo(self, pos):
lineVis = QtGui.QGraphicsLineItem(self.pos.x(), self.pos.y(),pos.x(), pos.y())
lineVis.setPen(self.penVis)
line = QtGui.QGraphicsLineItem(self.pos.x(), self.pos.y(),pos.x(), pos.y())
line.setPen(self.penDraw)
self.scene.addItem(line)
self.pos = pos
x = pos.x()
y = pos.y()
#update bounding Box :
if x > self.rightMost:
self.rightMost = x
if x < self.leftMost:
self.leftMost = x
if y > self.bottomMost:
self.bottomMost = y
if y < self.topMost:
self.topMost = y
return lineVis
#*******************************************************************************
# I m a g e S a v e T h r e a d *
#*******************************************************************************
class ImageSaveThread(QtCore.QThread):
def __init__(self, parent):
QtCore.QThread.__init__(self, None)
self.ve = parent
self.queue = deque()
self.imageSaved = threading.Event()
self.imageSaved.clear()
self.imagePending = threading.Event()
self.imagePending.clear()
self.stopped = False
self.previousSlice = None
def run(self):
while not self.stopped:
self.imagePending.wait()
while len(self.queue)>0:
stuff = self.queue.pop()
if stuff is not None:
filename, timeOffset, sliceOffset, format = stuff
if self.ve.image.shape[1]>1:
axis = 2
self.previousSlice = self.ve.sliceSelectors[axis].value()
for t in range(self.ve.image.shape[0]):
for z in range(self.ve.image.shape[3]):
self.filename = filename
if (self.ve.image.shape[0]>1):
self.filename = self.filename + ("_time%03i" %(t+timeOffset))
self.filename = self.filename + ("_z%05i" %(z+sliceOffset))
self.filename = self.filename + "." + format
#only change the z slice display
self.ve.imageScenes[axis].thread.queue.clear()
self.ve.imageScenes[axis].thread.freeQueue.wait()
self.ve.updateTimeSliceForSaving(t, z, axis)
self.ve.imageScenes[axis].thread.freeQueue.wait()
self.ve.imageScenes[axis].saveSlice(self.filename)
else:
axis = 0
for t in range(self.ve.image.shape[0]):
self.filename = filename
if (self.ve.image.shape[0]>1):
self.filename = self.filename + ("_time%03i" %(t+timeOffset))
self.filename = self.filename + "." + format
self.ve.imageScenes[axis].thread.queue.clear()
self.ve.imageScenes[axis].thread.freeQueue.wait()
self.ve.updateTimeSliceForSaving(t, self.ve.selSlices[0], axis)
self.ve.imageScenes[axis].thread.freeQueue.wait()
self.ve.imageScenes[axis].saveSlice(self.filename)
self.imageSaved.set()
self.imagePending.clear()
if self.previousSlice is not None:
self.ve.sliceSelectors[axis].setValue(self.previousSlice)
self.previousSlice = None
#*******************************************************************************
# I m a g e S c e n e R e n d e r T h r e a d *
#*******************************************************************************
class ImageSceneRenderThread(QtCore.QThread):
def __init__(self, parent):
QtCore.QThread.__init__(self, None)
self.imageScene = parent
self.patchAccessor = parent.patchAccessor
self.volumeEditor = parent.volumeEditor
#self.queue = deque(maxlen=1) #python 2.6
self.queue = deque() #python 2.5
self.outQueue = deque()
self.dataPending = threading.Event()
self.dataPending.clear()
self.newerDataPending = threading.Event()
self.newerDataPending.clear()
self.freeQueue = threading.Event()
self.freeQueue.clear()
self.stopped = False
#if self.imageScene.openglWidget is not None:
# self.contextPixmap = QtGui.QPixmap(2,2)
# self.context = QtOpenGL.QGLContext(self.imageScene.openglWidget.context().format(), self.contextPixmap)
# self.context.create(self.imageScene.openglWidget.context())
#else:
# self.context = None
def run(self):
#self.context.makeCurrent()
while not self.stopped:
self.emit(QtCore.SIGNAL('finishedQueue()'))
self.dataPending.wait()
self.newerDataPending.clear()
self.freeQueue.clear()
while len(self.queue) > 0:
stuff = self.queue.pop()
if stuff is not None:
nums, origimage, overlays , min, max = stuff
for patchNr in nums:
if self.newerDataPending.isSet():
self.newerDataPending.clear()
break
bounds = self.patchAccessor.getPatchBounds(patchNr)
if self.imageScene.openglWidget is None:
p = QtGui.QPainter(self.imageScene.scene.image)
p.translate(bounds[0],bounds[2])
else:
p = QtGui.QPainter(self.imageScene.imagePatches[patchNr])
p.eraseRect(0,0,bounds[1]-bounds[0],bounds[3]-bounds[2])
#add overlays
for index, origitem in enumerate(overlays):
p.setOpacity(origitem.alpha)
itemcolorTable = origitem.colorTable
itemdata = origitem._data[bounds[0]:bounds[1],bounds[2]:bounds[3]]
origitemColor = None
if isinstance(origitem.color, long) or isinstance(origitem.color, int):
origitemColor = QtGui.QColor.fromRgba(long(origitem.color))
else:
origitemColor = origitem.color
# if itemdata is uint16
# convert it for displayporpuse
if itemcolorTable is None and itemdata.dtype == numpy.uint16:
print '*** Normalizing your data for display purpose'
print '*** I assume you have 12bit data'
itemdata = (itemdata*255.0/4095.0).astype(numpy.uint8)
if itemcolorTable != None:
if itemdata.dtype != 'uint8':
"""
if the item is larger we take the values module 256
since QImage supports only 8Bit Indexed images
"""
olditemdata = itemdata
itemdata = numpy.ndarray(olditemdata.shape, 'float32')
#print "moduo", olditemdata.shape, olditemdata.dtype
if olditemdata.dtype == 'uint32':
itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,24),24)[:]
elif olditemdata.dtype == 'uint64':
itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,56),56)[:]
elif olditemdata.dtype == 'int32':
itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,24),24)[:]
elif olditemdata.dtype == 'int64':
itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,56),56)[:]
elif olditemdata.dtype == 'uint16':
itemdata[:] = numpy.right_shift(numpy.left_shift(olditemdata,8),8)[:]
else:
#raise TypeError(str(olditemdata.dtype) + ' <- unsupported image _data type (in the rendering thread, you know) ')
# TODO: Workaround: tried to fix the problem
# with the segmentation display, somehow it arrieves
# here in float32
print TypeError(str(olditemdata.dtype) + ': unsupported dtype of overlay in ImageSceneRenderThread.run()')
continue
if len(itemdata.shape) > 2 and itemdata.shape[2] > 1:
image0 = qimage2ndarray.array2qimage(itemdata.swapaxes(0,1), normalize=False)
else:
image0 = qimage2ndarray.gray2qimage(itemdata.swapaxes(0,1), normalize=False)
image0.setColorTable(itemcolorTable[:])
else:
if origitem.min is not None and origitem.max is not None:
normalize = (origitem.min, origitem.max)
else:
normalize = False
if origitem.autoAlphaChannel is False:
if len(itemdata.shape) == 3 and itemdata.shape[2] == 3:
image1 = qimage2ndarray.array2qimage(itemdata.swapaxes(0,1), normalize)
image0 = image1
else:
tempdat = numpy.zeros(itemdata.shape[0:2] + (3,), 'float32')
tempdat[:,:,0] = origitemColor.redF()*itemdata[:]
tempdat[:,:,1] = origitemColor.greenF()*itemdata[:]
tempdat[:,:,2] = origitemColor.blueF()*itemdata[:]
image1 = qimage2ndarray.array2qimage(tempdat.swapaxes(0,1), normalize)
image0 = image1
else:
image1 = qimage2ndarray.array2qimage(itemdata.swapaxes(0,1), normalize)
image0 = QtGui.QImage(itemdata.shape[0],itemdata.shape[1],QtGui.QImage.Format_ARGB32)#qimage2ndarray.array2qimage(itemdata.swapaxes(0,1), normalize=False)
image0.fill(origitemColor.rgba())
image0.setAlphaChannel(image1)
p.drawImage(0,0, image0)
p.end()
self.outQueue.append(patchNr)
# if self.imageScene.scene.tex > -1:
# self.context.makeCurrent()
# glBindTexture(GL_TEXTURE_2D,self.imageScene.scene.tex)
# b = self.imageScene.patchAccessor.getPatchBounds(patchNr,0)
# glTexSubImage2D(GL_TEXTURE_2D, 0, b[0], b[2], b[1]-b[0], b[3]-b[2], GL_RGB, GL_UNSIGNED_BYTE, ctypes.c_void_p(self.imageScene.imagePatches[patchNr].bits().__int__()))
#
# self.outQueue.clear()
self.dataPending.clear()
#*******************************************************************************
# C r o s s H a i r C u r s o r *
#*******************************************************************************
class CrossHairCursor(QtGui.QGraphicsItem) :
modeYPosition = 0
modeXPosition = 1
modeXYPosition = 2
def boundingRect(self):
return QtCore.QRectF(0,0, self.width, self.height)
def __init__(self, width, height):
QtGui.QGraphicsItem.__init__(self)
self.width = width
self.height = height
self.penDotted = QtGui.QPen(QtCore.Qt.red, 2, QtCore.Qt.DotLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)
self.penDotted.setCosmetic(True)
self.penSolid = QtGui.QPen(QtCore.Qt.red, 2)
self.penSolid.setCosmetic(True)
self.x = 0
self.y = 0
self.brushSize = 0
self.mode = self.modeXYPosition
def setColor(self, color):
self.penDotted = QtGui.QPen(color, 2, QtCore.Qt.DotLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin)
self.penDotted.setCosmetic(True)
self.penSolid = QtGui.QPen(color, 2)
self.penSolid.setCosmetic(True)
self.update()
def showXPosition(self, x, y):
"""only mark the x position by displaying a line f(y) = x"""
self.setVisible(True)
self.mode = self.modeXPosition
self.setPos(x, y - int(y))
def showYPosition(self, y, x):
"""only mark the y position by displaying a line f(x) = y"""
self.setVisible(True)
self.mode = self.modeYPosition
self.setPos(x - int(x), y)
def showXYPosition(self, x,y):
"""mark the (x,y) position by displaying a cross hair cursor
including a circle indicating the current brush size"""
self.setVisible(True)
self.mode = self.modeXYPosition
self.setPos(x,y)
def paint(self, painter, option, widget=None):
painter.setPen(self.penDotted)
if self.mode == self.modeXPosition:
painter.drawLine(QtCore.QPointF(self.x+0.5, 0), QtCore.QPointF(self.x+0.5, self.height))
elif self.mode == self.modeYPosition:
painter.drawLine(QtCore.QPointF(0, self.y), QtCore.QPointF(self.width, self.y))
else:
painter.drawLine(QtCore.QPointF(0.0,self.y), QtCore.QPointF(self.x -0.5*self.brushSize, self.y))
painter.drawLine(QtCore.QPointF(self.x+0.5*self.brushSize, self.y), QtCore.QPointF(self.width, self.y))
painter.drawLine(QtCore.QPointF(self.x, 0), QtCore.QPointF(self.x, self.y-0.5*self.brushSize))
painter.drawLine(QtCore.QPointF(self.x, self.y+0.5*self.brushSize), QtCore.QPointF(self.x, self.height))
painter.setPen(self.penSolid)
painter.drawEllipse(QtCore.QPointF(self.x, self.y), 0.5*self.brushSize, 0.5*self.brushSize)
def setPos(self, x, y):
self.x = x
self.y = y
self.update()
def setBrushSize(self, size):
self.brushSize = size
self.update()
#*******************************************************************************
# S l i c e I n t e r s e c t i o n M a r k e r *
#*******************************************************************************
class SliceIntersectionMarker(QtGui.QGraphicsItem) :
def boundingRect(self):
return QtCore.QRectF(0,0, self.width, self.height)
def __init__(self, width, height):
QtGui.QGraphicsItem.__init__(self)
self.width = width
self.height = height
self.penX = QtGui.QPen(QtCore.Qt.red, 2)
self.penX.setCosmetic(True)
self.penY = QtGui.QPen(QtCore.Qt.green, 2)
self.penY.setCosmetic(True)
self.x = 0
self.y = 0
self.isVisible = False
def setPosition(self, x, y):
self.x = x
self.y = y
self.update()
def setPositionX(self, x):
self.setPosition(x, self.y)
def setPositionY(self, y):
self.setPosition(self.x, y)
def setColor(self, colorX, colorY):
self.penX = QtGui.QPen(colorX, 2)
self.penX.setCosmetic(True)
self.penY = QtGui.QPen(colorY, 2)
self.penY.setCosmetic(True)
self.update()
def setVisibility(self, state):
if state == True:
self.isVisible = True
else:
self.isVisible = False
self.update()
def paint(self, painter, option, widget=None):
if self.isVisible:
painter.setPen(self.penY)
painter.drawLine(QtCore.QPointF(0.0,self.y), QtCore.QPointF(self.width, self.y))
painter.setPen(self.penX)
painter.drawLine(QtCore.QPointF(self.x, 0), QtCore.QPointF(self.x, self.height))
def setPos(self, x, y):
self.x = x
self.y = y
self.update()
#*******************************************************************************
# I m a g e G r a p h i c s I t e m *
#*******************************************************************************
class ImageGraphicsItem(QtGui.QGraphicsItem):
def __init__(self, image):
QtGui.QGraphicsItem.__init__(self)
self.image = image
def paint(self,painter, options, widget):
painter.setClipRect( options.exposedRect )
painter.drawImage(0,0,self.image)
def boundingRect(self):
return QtCore.QRectF(self.image.rect())
#*******************************************************************************
# C u s t o m G r a p h i c s S c e n e *
#*******************************************************************************
class CustomGraphicsScene( QtGui.QGraphicsScene):#, QtOpenGL.QGLWidget):
def __init__(self,parent,widget,image):
QtGui.QGraphicsScene.__init__(self)
#QtOpenGL.QGLWidget.__init__(self)
self._widget = widget
self.imageScene = parent
self.image = image
self.images = []
self.bgColor = QtGui.QColor(QtCore.Qt.black)
self.tex = -1
def drawBackground(self, painter, rect):
#painter.fillRect(rect,self.bgBrush)
if self._widget != None:
self._widget.context().makeCurrent()
glClearColor(self.bgColor.redF(),self.bgColor.greenF(),self.bgColor.blueF(),1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
if self.tex > -1:
#self._widget.drawTexture(QtCore.QRectF(self.image.rect()),self.tex)
d = painter.device()
dc = sip.cast(d,QtOpenGL.QGLFramebufferObject)
rect = QtCore.QRectF(self.image.rect())
tl = rect.topLeft()
br = rect.bottomRight()
#flip coordinates since the texture is flipped
#this is due to qimage having another representation thatn OpenGL
rect.setCoords(tl.x(),br.y(),br.x(),tl.y())
#switch corrdinates if qt version is small
painter.beginNativePainting()
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
dc.drawTexture(rect,self.tex)
painter.endNativePainting()
else:
painter.setClipRect(rect)
painter.drawImage(0,0,self.image)
#*******************************************************************************
# I m a g e S c e n e *
#*******************************************************************************
#TODO: ImageScene should not care/know about what axis it is!
class ImageScene(QtGui.QGraphicsView):
#axisColor = [QtGui.QColor("red"), QtGui.QColor("green"), QtGui.QColor("blue")]
axisColor = [QtGui.QColor(255,0,0,255), QtGui.QColor(0,255,0,255), QtGui.QColor(0,0,255,255)]
def __borderMarginIndicator__(self, margin):
print "__borderMarginIndicator__()", margin
"""
update the border margin indicator (left, right, top, bottom)
to reflect the new given margin
"""
self.margin = margin
if self.border:
self.scene.removeItem(self.border)
borderPath = QtGui.QPainterPath()
borderPath.setFillRule(QtCore.Qt.WindingFill)
borderPath.addRect(0,0, margin, self.imShape[1])
borderPath.addRect(0,0, self.imShape[0], margin)
borderPath.addRect(self.imShape[0]-margin,0, margin, self.imShape[1])
borderPath.addRect(0,self.imShape[1]-margin, self.imShape[0], margin)
self.border = QtGui.QGraphicsPathItem(borderPath)
brush = QtGui.QBrush(QtGui.QColor(0,0,255))
brush.setStyle( QtCore.Qt.Dense7Pattern )
self.border.setBrush(brush)
self.border.setPen(QtGui.QPen(QtCore.Qt.NoPen))
self.border.setZValue(200)
self.scene.addItem(self.border)
def __init__(self, parent, imShape, axis, drawManager):
"""
imShape: 3D shape of the block that this slice view displays.
first two entries denote the x,y extent of one slice,
the last entry is the extent in slice direction
"""
QtGui.QGraphicsView.__init__(self)
self.imShape = imShape[0:2]
self.drawManager = drawManager
self.tempImageItems = []
self.volumeEditor = parent
self.axis = axis
self.sliceNumber = 0
self.sliceExtent = imShape[2]
self.drawing = False
self.view = self
self.image = QtGui.QImage(imShape[0], imShape[1], QtGui.QImage.Format_RGB888) #Format_ARGB32
self.border = None
self.allBorder = None
self.factor = 1.0
#for panning
self.lastPanPoint = QtCore.QPoint()
self.dragMode = False
self.deltaPan = QtCore.QPointF(0,0)
self.x = 0.0
self.y = 0.0
self.min = 0
self.max = 255
self.openglWidget = None
##enable OpenGL acceleratino
if self.volumeEditor.sharedOpenGLWidget is not None:
self.openglWidget = QtOpenGL.QGLWidget(shareWidget = self.volumeEditor.sharedOpenGLWidget)
self.setViewport(self.openglWidget)
self.setViewportUpdateMode(QtGui.QGraphicsView.FullViewportUpdate)
self.scene = CustomGraphicsScene(self, self.openglWidget, self.image)
# oli todo
if self.volumeEditor.image.shape[1] > 1:
grviewHudLayout = QtGui.QVBoxLayout(self)
tempLayout = QtGui.QHBoxLayout()
self.fullScreenButton = QtGui.QPushButton()
self.fullScreenButton.setIcon(QtGui.QIcon(QtGui.QPixmap(ilastikIcons.AddSelx22)))
self.fullScreenButton.setStyleSheet("background-color: white; border: 2px solid " + self.axisColor[self.axis].name() +"; border-radius: 4px;")
self.connect(self.fullScreenButton, QtCore.SIGNAL('clicked()'), self.imageSceneFullScreen)
tempLayout.addWidget(self.fullScreenButton)
tempLayout.addStretch()
grviewHudLayout.addLayout(tempLayout)
grviewHudLayout.addStretch()
if self.openglWidget is not None:
self.openglWidget.context().makeCurrent()
self.scene.tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D,self.scene.tex)
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, self.scene.image.width(), self.scene.image.height(), 0, GL_RGB, GL_UNSIGNED_BYTE, ctypes.c_void_p(self.scene.image.bits().__int__()))
self.view.setScene(self.scene)
self.scene.setSceneRect(0,0, imShape[0],imShape[1])
self.view.setSceneRect(0,0, imShape[0],imShape[1])
self.scene.bgColor = QtGui.QColor(QtCore.Qt.white)
if os.path.isfile('gui/backGroundBrush.png'):
self.scene.bgBrush = QtGui.QBrush(QtGui.QImage('gui/backGroundBrush.png'))
else:
self.scene.bgBrush = QtGui.QBrush(QtGui.QColor(QtCore.Qt.black))
#self.setBackgroundBrush(brushImage)
self.view.setRenderHint(QtGui.QPainter.Antialiasing, False)
#self.view.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, False)
self.patchAccessor = PatchAccessor(imShape[0],imShape[1],64)
#print "PatchCount :", self.patchAccessor.patchCount
self.imagePatches = range(self.patchAccessor.patchCount)
for i,p in enumerate(self.imagePatches):
b = self.patchAccessor.getPatchBounds(i, 0)
self.imagePatches[i] = QtGui.QImage(b[1]-b[0], b[3] -b[2], QtGui.QImage.Format_RGB888)
self.pixmap = QtGui.QPixmap.fromImage(self.image)
self.imageItem = QtGui.QGraphicsPixmapItem(self.pixmap)
#self.setStyleSheet("QWidget:!focus { border: 2px solid " + self.axisColor[self.axis].name() +"; border-radius: 4px; }\
# QWidget:focus { border: 2px solid white; border-radius: 4px; }")
if self.axis is 0:
self.view.rotate(90.0)
self.view.scale(1.0,-1.0)
#on right mouse press, the customContextMenuRequested() signal is
#_automatically_ emitted, no need to call onContext explicitly
#self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
#self.connect(self, QtCore.SIGNAL("customContextMenuRequested(QPoint)"), self.onContext)
self.setMouseTracking(True)
#indicators for the biggest filter mask's size
#marks the area where labels should not be placed
# -> the margin top, left, right, bottom
self.margin = 0
# -> the complete 2D slice is marked
brush = QtGui.QBrush(QtGui.QColor(0,0,255))
brush.setStyle( QtCore.Qt.DiagCrossPattern )
allBorderPath = QtGui.QPainterPath()
allBorderPath.setFillRule(QtCore.Qt.WindingFill)
allBorderPath.addRect(0, 0, imShape[0], imShape[1])
self.allBorder = QtGui.QGraphicsPathItem(allBorderPath)
self.allBorder.setBrush(brush)
self.allBorder.setPen(QtGui.QPen(QtCore.Qt.NoPen))
self.scene.addItem(self.allBorder)
self.allBorder.setVisible(False)
self.allBorder.setZValue(99)
self.ticker = QtCore.QTimer(self)
self.connect(self.ticker, QtCore.SIGNAL("timeout()"), self.tickerEvent)
#label updates while drawing, needed for interactive segmentation
self.drawTimer = QtCore.QTimer(self)
self.connect(self.drawTimer, QtCore.SIGNAL("timeout()"), self.updateLabels)
# invisible cursor to enable custom cursor
self.hiddenCursor = QtGui.QCursor(QtCore.Qt.BlankCursor)
# For screen recording BlankCursor dont work
#self.hiddenCursor = QtGui.QCursor(QtCore.Qt.ArrowCursor)
self.thread = ImageSceneRenderThread(self)
self.connect(self.thread, QtCore.SIGNAL('finishedPatch(int)'),self.redrawPatch)
self.connect(self.thread, QtCore.SIGNAL('finishedQueue()'), self.renderingThreadFinished)
self.thread.start()
#self.connect(self, QtCore.SIGNAL("destroyed()"),self.cleanUp)
self.crossHairCursor = CrossHairCursor(self.image.width(), self.image.height())
self.crossHairCursor.setZValue(100)
self.scene.addItem(self.crossHairCursor)
self.crossHairCursor.setBrushSize(self.drawManager.brushSize)
self.sliceIntersectionMarker = SliceIntersectionMarker(self.image.width(), self.image.height())
self.sliceIntersectionMarker.setPos(23, 42);
if self.axis == 0:
self.sliceIntersectionMarker.setColor(self.axisColor[1], self.axisColor[2])
elif self.axis == 1:
self.sliceIntersectionMarker.setColor(self.axisColor[0], self.axisColor[2])
elif self.axis == 2:
self.sliceIntersectionMarker.setColor(self.axisColor[0], self.axisColor[1])
self.scene.addItem(self.sliceIntersectionMarker)
self.tempErase = False
def imageSceneFullScreen(self):
if self.volumeEditor.imageScenes[0] == self.fullScreenButton.parent():
self.volumeEditor.toggleFullscreenX()
if self.volumeEditor.imageScenes[1] == self.fullScreenButton.parent():
self.volumeEditor.toggleFullscreenY()
if self.volumeEditor.imageScenes[2] == self.fullScreenButton.parent():
self.volumeEditor.toggleFullscreenZ()
def setImageSceneFullScreenLabel(self):
self.allVisible = True
a = range(3)
for i in a:
if not self.volumeEditor.imageScenes[i].isVisible():
self.allVisible = False
break
if self.allVisible:
self.fullScreenButton.setIcon(QtGui.QIcon(QtGui.QPixmap(ilastikIcons.AddSelx22)))
else:
self.fullScreenButton.setIcon(QtGui.QIcon(QtGui.QPixmap(ilastikIcons.RemSelx22)))
def setSliceIntersection(self, state):
if state == QtCore.Qt.Checked:
self.sliceIntersectionMarker.setVisibility(True)
else:
self.sliceIntersectionMarker.setVisibility(False)
def updateSliceIntersection(self, num, axis):
if self.axis == 0:
if axis == 1:
self.sliceIntersectionMarker.setPositionX(num)
elif axis == 2:
self.sliceIntersectionMarker.setPositionY(num)
else:
return
elif self.axis == 1:
if axis == 0:
self.sliceIntersectionMarker.setPositionX(num)
elif axis == 2:
self.sliceIntersectionMarker.setPositionY(num)
else:
return
elif self.axis == 2:
if axis == 0:
self.sliceIntersectionMarker.setPositionX(num)
elif axis == 1:
self.sliceIntersectionMarker.setPositionY(num)
else:
return
def changeSlice(self, delta):
if self.drawing == True:
self.endDraw(self.mousePos)
self.drawing = True
self.drawManager.beginDraw(self.mousePos, self.imShape)
self.volumeEditor.sliceSelectors[self.axis].stepBy(delta)
if self.volumeEditor.interactionLog is not None:
lm = "%f: changeSlice(axis, num) %d, %d" % (time.clock(), self.axis, self.volumeEditor.sliceSelectors[self.axis].value())
self.volumeEditor.interactionLog.append(lm)
def sliceUp(self):
self.changeSlice(1)
def sliceUp10(self):
self.changeSlice(10)
def sliceDown(self):
self.changeSlice(-1)
def sliceDown10(self):
self.changeSlice(-10)
def brushSmaller(self):
b = self.drawManager.brushSize
if b > 1:
self.drawManager.setBrushSize(b-1)
self.crossHairCursor.setBrushSize(b-1)
def brushBigger(self):
b = self.drawManager.brushSize
if b < 61:
self.drawManager.setBrushSize(b+1)
self.crossHairCursor.setBrushSize(b+1)
def cleanUp(self):
#print "stopping ImageSCeneRenderThread", str(self.axis)
self.thread.stopped = True
self.thread.dataPending.set()
self.thread.wait()
self.ticker.stop()
self.drawTimer.stop()
del self.drawTimer
del self.ticker
print "finished thread"
def updatePatches(self, patchNumbers ,image, overlays = ()):
stuff = [patchNumbers,image, overlays, self.min, self.max]
#print patchNumbers
if patchNumbers is not None:
self.thread.queue.append(stuff)
self.thread.dataPending.set()
def displayNewSlice(self, image, overlays = (), fastPreview = True):
self.thread.queue.clear()
self.thread.newerDataPending.set()
fastPreview = fastPreview and self.volumeEditor.fastRepaint
#if we are in opengl 2d render mode, quickly update the texture without any overlays
#to get a fast update on slice change
if image is not None:
#TODO: This doing something twice (see below)
if fastPreview is True and self.volumeEditor.sharedOpenGLWidget is not None and len(image.shape) == 2:
self.volumeEditor.sharedOpenGLWidget.context().makeCurrent()
t = self.scene.tex
ti = qimage2ndarray.gray2qimage(image.swapaxes(0,1), normalize = self.volumeEditor.normalizeData)
if not t > -1:
self.scene.tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D,self.scene.tex)
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, ti.width(), ti.height(), 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, ctypes.c_void_p(ti.bits().__int__()))
else:
glBindTexture(GL_TEXTURE_2D,self.scene.tex)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, ti.width(), ti.height(), GL_LUMINANCE, GL_UNSIGNED_BYTE, ctypes.c_void_p(ti.bits().__int__()))
self.viewport().repaint()
if self.volumeEditor.normalizeData:
self.min = numpy.min(image)
self.max = numpy.max(image)
else:
self.min = 0
self.max = 255
###########
#TODO: This doing something twice (see above)
self.updatePatches(range(self.patchAccessor.patchCount), image, overlays)
def saveSlice(self, filename):
print "Saving in ", filename, "slice #", self.sliceNumber, "axis", self.axis
result_image = QtGui.QImage(self.scene.image.size(), self.scene.image.format())
p = QtGui.QPainter(result_image)
for patchNr in range(self.patchAccessor.patchCount):
bounds = self.patchAccessor.getPatchBounds(patchNr)
if self.openglWidget is None:
p.drawImage(0, 0, self.scene.image)
else:
p.drawImage(bounds[0], bounds[2], self.imagePatches[patchNr])
p.end()
#horrible way to transpose an image. but it works.
transform = QtGui.QTransform()
transform.rotate(90)
result_image = result_image.mirrored()
result_image = result_image.transformed(transform)
result_image.save(QtCore.QString(filename))
def display(self, image, overlays = ()):
self.thread.queue.clear()
self.updatePatches(range(self.patchAccessor.patchCount),image, overlays)
def renderingThreadFinished(self):
#only proceed if htere is no new _data already in the rendering thread queue
if not self.thread.dataPending.isSet():
#if, in slicing direction, we are within the margin of the image border
#we set the border overlay indicator to visible
self.allBorder.setVisible((self.sliceNumber < self.margin or self.sliceExtent - self.sliceNumber < self.margin) and self.sliceExtent > 1 and self.volumeEditor.useBorderMargin)
# print "renderingThreadFinished()", self.volumeEditor.useBorderMargin, self.volumeEditor.borderMargin
#if we are in opengl 2d render mode, update the texture
if self.openglWidget is not None:
self.volumeEditor.sharedOpenGLWidget.context().makeCurrent()
for patchNr in self.thread.outQueue:
t = self.scene.tex
#self.scene.tex = -1
if t > -1:
#self.openglWidget.deleteTexture(t)
pass
else:
#self.scene.tex = self.openglWidget.bindTexture(self.scene.image, GL_TEXTURE_2D, GL_RGBA)
self.scene.tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D,self.scene.tex)
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, self.scene.image.width(), self.scene.image.height(), 0, GL_RGB, GL_UNSIGNED_BYTE, ctypes.c_void_p(self.scene.image.bits().__int__()))
glBindTexture(GL_TEXTURE_2D,self.scene.tex)
b = self.patchAccessor.getPatchBounds(patchNr,0)
glTexSubImage2D(GL_TEXTURE_2D, 0, b[0], b[2], b[1]-b[0], b[3]-b[2], GL_RGB, GL_UNSIGNED_BYTE, ctypes.c_void_p(self.imagePatches[patchNr].bits().__int__()))
else:
# TODO: What is going on down here??
"""
t = self.scene.tex
#self.scene.tex = -1
if t > -1:
#self.openglWidget.deleteTexture(t)
pass
else:
#self.scene.tex = self.openglWidget.bindTexture(self.scene.image, GL_TEXTURE_2D, GL_RGBA)
self.scene.tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D,self.scene.tex)
glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, self.scene.image.width(), self.scene.image.height(), 0, GL_RGB, GL_UNSIGNED_BYTE, ctypes.c_void_p(self.scene.image.bits().__int__()))
#glBindTexture(GL_TEXTURE_2D,self.scene.tex)
#glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, self.scene.image.width(), self.scene.image.height(), GL_RGB, GL_UNSIGNED_BYTE, ctypes.c_void_p(self.scene.image.bits().__int__()))
"""
self.thread.outQueue.clear()
#if all updates have been rendered remove tempitems
if self.thread.queue.__len__() == 0:
for index, item in enumerate(self.tempImageItems):
self.scene.removeItem(item)
self.tempImageItems = []
#update the scene, and the 3d overvie
#print "updating slice view ", self.axis
self.viewport().repaint() #update(QtCore.QRectF(self.image.rect()))
self.volumeEditor.overview.display(self.axis)
self.thread.freeQueue.set()
def redrawPatch(self, patchNr):
if self.thread.stopped is False:
pass
# patch = self.thread.imagePatches[patchNr]
# if self.textures[patchNr] < 0 :
# t = self.openglWidget.bindTexture(patch)
# self.textures[patchNr] = t
# else:
# t_old = self.textures[patchNr]
#
# t_new = self.openglWidget.bindTexture(patch)
# self.textures[patchNr] = t_new
#
# self.openglWidget.deleteTexture(t_old)
# bounds = self.patchAccessor.getPatchBounds(patchNr)
# p = QtGui.QPainter(self.scene.image)
# p.drawImage(bounds[0],bounds[2],self.thread.imagePatches[patchNr])
# p.end()
#self.scene.update(bounds[0],bounds[2],bounds[1]-bounds[0],bounds[3]-bounds[2])
def updateLabels(self):
result = self.drawManager.dumpDraw(self.mousePos)
image = result[2]
ndarr = qimage2ndarray.rgb_view(image)
labels = ndarr[:,:,0]
labels = labels.swapaxes(0,1)
number = self.volumeEditor.labelWidget.currentItem().number
labels = numpy.where(labels > 0, number, 0)
ls = LabelState('drawing', self.axis, self.volumeEditor.selSlices[self.axis], result[0:2], labels.shape, self.volumeEditor.selectedTime, self.volumeEditor, self.drawManager.erasing, labels, number)
self.volumeEditor._history.append(ls)
self.volumeEditor.setLabels(result[0:2], self.axis, self.volumeEditor.sliceSelectors[self.axis].value(), labels, self.drawManager.erasing)
def beginDraw(self, pos):
if self.volumeEditor.interactionLog is not None:
lm = "%f: endDraw()" % (time.clock())
self.volumeEditor.interactionLog.append(lm)
self.mousePos = pos
self.drawing = True
line = self.drawManager.beginDraw(pos, self.imShape)
line.setZValue(99)
self.tempImageItems.append(line)
self.scene.addItem(line)
if self.volumeEditor.drawUpdateInterval > 0:
self.drawTimer.start(self.volumeEditor.drawUpdateInterval) #update labels every some ms
self.volumeEditor.labelWidget.ensureLabelOverlayVisible()
def endDraw(self, pos):
if self.volumeEditor.interactionLog is not None:
lm = "%f: endDraw()" % (time.clock())
self.volumeEditor.interactionLog.append(lm)
self.drawTimer.stop()
result = self.drawManager.endDraw(pos)
image = result[2]
ndarr = qimage2ndarray.rgb_view(image)
labels = ndarr[:,:,0]
labels = labels.swapaxes(0,1)
number = self.volumeEditor.labelWidget.currentItem().number
labels = numpy.where(labels > 0, number, 0)
ls = LabelState('drawing', self.axis, self.volumeEditor.selSlices[self.axis], result[0:2], labels.shape, self.volumeEditor.selectedTime, self.volumeEditor, self.drawManager.erasing, labels, number)
self.volumeEditor._history.append(ls)
self.volumeEditor.setLabels(result[0:2], self.axis, self.volumeEditor.sliceSelectors[self.axis].value(), labels, self.drawManager.erasing)
self.volumeEditor.pushLabelsToLabelWidget()
self.drawing = False
def wheelEvent(self, event):
keys = QtGui.QApplication.keyboardModifiers()
k_alt = (keys == QtCore.Qt.AltModifier)
k_ctrl = (keys == QtCore.Qt.ControlModifier)
self.mousePos = self.mapToScene(event.pos())
grviewCenter = self.mapToScene(self.viewport().rect().center())
if event.delta() > 0:
if k_alt is True:
self.changeSlice(10)
elif k_ctrl is True:
scaleFactor = 1.1
self.doScale(scaleFactor)
else:
self.changeSlice(1)
else:
if k_alt is True:
self.changeSlice(-10)
elif k_ctrl is True:
scaleFactor = 0.9
self.doScale(scaleFactor)
else:
self.changeSlice(-1)
if k_ctrl is True:
mousePosAfterScale = self.mapToScene(event.pos())
offset = self.mousePos - mousePosAfterScale
newGrviewCenter = grviewCenter + offset
self.centerOn(newGrviewCenter)
self.mouseMoveEvent(event)
def zoomOut(self):
self.doScale(0.9)
def zoomIn(self):
self.doScale(1.1)
def doScale(self, factor):
self.factor = self.factor * factor
if self.volumeEditor.interactionLog is not None:
lm = "%f: zoomFactor(factor) %f" % (time.clock(), self.factor)
self.volumeEditor.interactionLog.append(lm)
self.view.scale(factor, factor)
def tabletEvent(self, event):
self.setFocus(True)
if not self.volumeEditor.labelWidget.currentItem():
return
self.mousePos = mousePos = self.mapToScene(event.pos())
x = mousePos.x()
y = mousePos.y()
if event.pointerType() == QtGui.QTabletEvent.Eraser or QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier:
self.drawManager.setErasing()
elif event.pointerType() == QtGui.QTabletEvent.Pen and QtGui.QApplication.keyboardModifiers() != QtCore.Qt.ShiftModifier:
self.drawManager.disableErasing()
if self.drawing == True:
if event.pressure() == 0:
self.endDraw(mousePos)
self.volumeEditor.changeSlice(self.volumeEditor.selSlices[self.axis], self.axis)
else:
if self.drawManager.erasing:
#make the brush size bigger while erasing
self.drawManager.setBrushSize(int(event.pressure()*10))
else:
self.drawManager.setBrushSize(int(event.pressure()*7))
if self.drawing == False:
if event.pressure() > 0:
self.beginDraw(mousePos)
self.mouseMoveEvent(event)
#TODO oli
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.MidButton:
self.lastPanPoint = event.pos()
self.crossHairCursor.setVisible(False)
self.dragMode = True
if self.ticker.isActive():
self.deltaPan = QtCore.QPointF(0, 0)
if not self.volumeEditor.labelWidget.currentItem():
return
if event.buttons() == QtCore.Qt.LeftButton:
#don't draw if flicker the view
if self.ticker.isActive():
return
if QtGui.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier:
self.drawManager.setErasing()
self.tempErase = True
mousePos = self.mapToScene(event.pos())
self.beginDraw(mousePos)
if event.buttons() == QtCore.Qt.RightButton:
#make sure that we have the cursor at the correct position
#before we call the context menu
self.mouseMoveEvent(event)
self.onContext(event.pos())
#TODO oli
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.MidButton:
releasePoint = event.pos()
self.lastPanPoint = releasePoint
self.dragMode = False
self.ticker.start(20)
if self.drawing == True:
mousePos = self.mapToScene(event.pos())
self.endDraw(mousePos)
if self.tempErase == True:
self.drawManager.disableErasing()
self.tempErase = False
#TODO oli
def panning(self):
hBar = self.horizontalScrollBar()
vBar = self.verticalScrollBar()
vBar.setValue(vBar.value() - self.deltaPan.y())
if self.isRightToLeft():
hBar.setValue(hBar.value() + self.deltaPan.x())
else:
hBar.setValue(hBar.value() - self.deltaPan.x())
#TODO oli
def deaccelerate(self, speed, a=1, maxVal=64):
x = self.qBound(-maxVal, speed.x(), maxVal)
y = self.qBound(-maxVal, speed.y(), maxVal)
ax ,ay = self.setdeaccelerateAxAy(speed.x(), speed.y(), a)
if x > 0:
x = max(0.0, x - a*ax)
elif x < 0:
x = min(0.0, x + a*ax)
if y > 0:
y = max(0.0, y - a*ay)
elif y < 0:
y = min(0.0, y + a*ay)
return QtCore.QPointF(x, y)
#TODO oli
def qBound(self, minVal, current, maxVal):
return max(min(current, maxVal), minVal)
def setdeaccelerateAxAy(self, x, y, a):
x = abs(x)
y = abs(y)
if x > y:
if y > 0:
ax = int(x / y)
if ax != 0:
return ax, 1
else:
return x/a, 1
if y > x:
if x > 0:
ay = int(y/x)
if ay != 0:
return 1, ay
else:
return 1, y/a
return 1, 1
#TODO oli
def tickerEvent(self):
if self.deltaPan.x() == 0.0 and self.deltaPan.y() == 0.0 or self.dragMode == True:
self.ticker.stop()
cursor = QtGui.QCursor()
mousePos = self.mapToScene(self.mapFromGlobal(cursor.pos()))
x = mousePos.x()
y = mousePos.y()
self.crossHairCursor.showXYPosition(x, y)
else:
self.deltaPan = self.deaccelerate(self.deltaPan)
self.panning()
#TODO oli
def updateInfoLabels(self, posX, posY, posZ, colorValues):
self.volumeEditor.posLabel.setText("<b>x:</b> %03i <b>y:</b> %03i <b>z:</b> %03i" % (posX, posY, posZ))
if isinstance(colorValues, numpy.ndarray):
self.volumeEditor.pixelValuesLabel.setText("<b>R:</b> %03i <b>G:</b> %03i <b>B:</b> %03i" % (colorValues[0], colorValues[1], colorValues[2]))
else:
self.volumeEditor.pixelValuesLabel.setText("<b>Gray:</b> %03i" %int(colorValues))
def coordinateUnderCursor(self):
"""returns the coordinate that is defined by hovering with the mouse
over one of the slice views. It is _not_ the coordinate as defined
by the three slice views"""
posX = posY = posZ = -1
if self.axis == 0:
posY = self.x
posZ = self.y
posX = self.volumeEditor.selSlices[0]
elif self.axis == 1:
posY = self.volumeEditor.selSlices[1]
posZ = self.y
posX = self.x
else:
posY = self.y
posZ = self.volumeEditor.selSlices[2]
posX = self.x
return (posX, posY, posZ)
#TODO oli
def mouseMoveEvent(self,event):
if self.dragMode == True:
self.deltaPan = QtCore.QPointF(event.pos() - self.lastPanPoint)
self.panning()
self.lastPanPoint = event.pos()
return
if self.ticker.isActive():
return
self.mousePos = mousePos = self.mousePos = self.mapToScene(event.pos())
x = self.x = mousePos.x()
y = self.y = mousePos.y()
#posX = 0
#posY = 0
#posZ = 0
if x > 0 and x < self.image.width() and y > 0 and y < self.image.height() and len(self.volumeEditor.overlayWidget.overlays) > 0:
#should we hide the cursor only when entering once ? performance?
#self.setCursor(self.hiddenCursor)
self.crossHairCursor.showXYPosition(x,y)
#self.crossHairCursor.setPos(x,y)
(posX, posY, posZ) = self.coordinateUnderCursor()
if self.axis == 0:
colorValues = self.volumeEditor.overlayWidget.getOverlayRef("Raw Data").getOverlaySlice(posX, 0, time=0, channel=0)._data[x,y]
self.updateInfoLabels(posX, posY, posZ, colorValues)
if len(self.volumeEditor.imageScenes) > 2:
yView = self.volumeEditor.imageScenes[1].crossHairCursor
zView = self.volumeEditor.imageScenes[2].crossHairCursor
yView.setVisible(False)
zView.showYPosition(x, y)
elif self.axis == 1:
colorValues = self.volumeEditor.overlayWidget.getOverlayRef("Raw Data").getOverlaySlice(posY, 1, time=0, channel=0)._data[x,y]
self.updateInfoLabels(posX, posY, posZ, colorValues)
xView = self.volumeEditor.imageScenes[0].crossHairCursor
zView = self.volumeEditor.imageScenes[2].crossHairCursor
zView.showXPosition(x, y)
xView.setVisible(False)
else:
colorValues = self.volumeEditor.overlayWidget.getOverlayRef("Raw Data").getOverlaySlice(posZ, 2, time=0, channel=0)._data[x,y]
self.updateInfoLabels(posX, posY, posZ, colorValues)
xView = self.volumeEditor.imageScenes[0].crossHairCursor
yView = self.volumeEditor.imageScenes[1].crossHairCursor
xView.showXPosition(y, x)
yView.showXPosition(x, y)
else:
self.unsetCursor()
if self.drawing == True:
line = self.drawManager.moveTo(mousePos)
line.setZValue(99)
self.tempImageItems.append(line)
self.scene.addItem(line)
def mouseDoubleClickEvent(self, event):
mousePos = self.mapToScene(event.pos())
x = mousePos.x()
y = mousePos.y()
if self.axis == 0:
self.volumeEditor.changeSlice(x, 1)
self.volumeEditor.changeSlice(y, 2)
elif self.axis == 1:
self.volumeEditor.changeSlice(x, 0)
self.volumeEditor.changeSlice(y, 2)
elif self.axis ==2:
self.volumeEditor.changeSlice(x, 0)
self.volumeEditor.changeSlice(y, 1)
def onContext(self, pos):
if type(self.volumeEditor.labelWidget) == DummyLabelWidget: return
self.volumeEditor.labelWidget.onImageSceneContext(self, pos)
def onContextSetLabel(self, i):
self.volumeEditor.labelWidget.listWidget.selectionModel().setCurrentIndex(i, QtGui.QItemSelectionModel.ClearAndSelect)
self.drawManager.updateCrossHair()
#*******************************************************************************
# O v e r v i e w S c e n e D u m m y *
#*******************************************************************************
class OverviewSceneDummy(QtGui.QWidget):
def __init__(self, parent, shape):
QtGui.QWidget.__init__(self)
pass
def display(self, axis):
pass
def redisplay(self):
pass
#*******************************************************************************
# O v e r v i e w S c e n e O l d *
#*******************************************************************************
class OverviewSceneOld(QtOpenGL.QGLWidget):
def __init__(self, parent, shape):
QtOpenGL.QGLWidget.__init__(self, shareWidget = parent.sharedOpenGLWidget)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.sceneShape = shape
self.volumeEditor = parent
self.images = parent.imageScenes
self.sceneItems = []
self.initialized = False
self.tex = []
self.tex.append(-1)
self.tex.append(-1)
self.tex.append(-1)
if self.volumeEditor.sharedOpenGLWidget is None:
self.setVisible(False)
def display(self, axis):
if self.volumeEditor.sharedOpenGLWidget is not None:
if self.initialized is True:
#self.initializeGL()
self.makeCurrent()
self.paintGL(axis)
self.swapBuffers()
def redisplay(self):
if self.volumeEditor.sharedOpenGLWidget is not None:
if self.initialized is True:
for i in range(3):
self.makeCurrent()
self.paintGL(i)
self.swapBuffers()
def paintGL(self, axis = None):
if self.volumeEditor.sharedOpenGLWidget is not None:
'''
Drawing routine
'''
pix0 = self.images[0].pixmap
pix1 = self.images[1].pixmap
pix2 = self.images[2].pixmap
maxi = max(pix0.width(),pix1.width())
maxi = max(maxi, pix2.width())
maxi = max(maxi, pix0.height())
maxi = max(maxi, pix1.height())
maxi = max(maxi, pix2.height())
ratio0w = 1.0 * pix0.width() / maxi
ratio1w = 1.0 * pix1.width() / maxi
ratio2w = 1.0 * pix2.width() / maxi
ratio0h = 1.0 * pix0.height() / maxi
ratio1h = 1.0 * pix1.height() / maxi
ratio2h = 1.0 * pix2.height() / maxi
glMatrixMode(GL_MODELVIEW)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glRotatef(30,1.0,0.0,0.0)
glTranslatef(0,-3,-5) # Move Into The Screen
glRotatef(-30,0.0,1.0,0.0) # Rotate The Cube On X, Y & Z
#glRotatef(180,1.0,0.0,1.0) # Rotate The Cube On X, Y & Z
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE ) #wireframe mode
glBegin(GL_QUADS) # Start Drawing The Cube
glColor3f(1.0,0.0,1.0) # Set The Color To Violet
glVertex3f( ratio2w, ratio1h,-ratio2h) # Top Right Of The Quad (Top)
glVertex3f(-ratio2w, ratio1h,-ratio2h) # Top Left Of The Quad (Top)
glVertex3f(-ratio2w, ratio1h, ratio2h) # Bottom Left Of The Quad (Top)
glVertex3f( ratio2w, ratio1h, ratio2h) # Bottom Right Of The Quad (Top)
glVertex3f( ratio2w,-ratio1h, ratio2h) # Top Right Of The Quad (Bottom)
glVertex3f(-ratio2w,-ratio1h, ratio2h) # Top Left Of The Quad (Bottom)
glVertex3f(-ratio2w,-ratio1h,-ratio2h) # Bottom Left Of The Quad (Bottom)
glVertex3f( ratio2w,-ratio1h,-ratio2h) # Bottom Right Of The Quad (Bottom)
glVertex3f( ratio2w, ratio1h, ratio2h) # Top Right Of The Quad (Front)
glVertex3f(-ratio2w, ratio1h, ratio2h) # Top from PyQt4 import QtCore, QtGui, QtOpenGLLeft Of The Quad (Front)
glVertex3f(-ratio2w,-ratio1h, ratio2h) # Bottom Left Of The Quad (Front)
glVertex3f( ratio2w,-ratio1h, ratio2h) # Bottom Right Of The Quad (Front)
glVertex3f( ratio2w,-ratio1h,-ratio2h) # Bottom Left Of The Quad (Back)
glVertex3f(-ratio2w,-ratio1h,-ratio2h) # Bottom Right Of The Quad (Back)
glVertex3f(-ratio2w, ratio1h,-ratio2h) # Top Right Of The Quad (Back)
glVertex3f( ratio2w, ratio1h,-ratio2h) # Top Left Of The Quad (Back)
glVertex3f(-ratio2w, ratio1h, ratio2h) # Top Right Of The Quad (Left)
glVertex3f(-ratio2w, ratio1h,-ratio2h) # Top Left Of The Quad (Left)
glVertex3f(-ratio2w,-ratio1h,-ratio2h) # Bottom Left Of The Quad (Left)
glVertex3f(-ratio2w,-ratio1h, ratio2h) # Bottom Right Of The Quad (Left)
glVertex3f( ratio2w, ratio1h,-ratio2h) # Top Right Of The Quad (Right)
glVertex3f( ratio2w, ratio1h, ratio2h) # Top Left Of The Quad (Right)
glVertex3f( ratio2w,-ratio1h, ratio2h) # Bottom Left Of The Quad (Right)
glVertex3f( ratio2w,-ratio1h,-ratio2h) # Bottom Right Of The Quad (Right)
glEnd() # Done Drawing The Quad
curCenter = -(( 1.0 * self.volumeEditor.selSlices[2] / self.sceneShape[2] ) - 0.5 )*2.0*ratio1h
if axis is 2:
self.tex[2] = self.images[2].scene.tex
if self.tex[2] != -1:
glBindTexture(GL_TEXTURE_2D,self.tex[2])
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glPolygonMode( GL_FRONT_AND_BACK, GL_FILL ) #solid drawing mode
glBegin(GL_QUADS) #horizontal quad (e.g. first axis)
glColor3f(1.0,1.0,1.0) # Set The Color To White
glTexCoord2d(0.0, 0.0)
glVertex3f( -ratio2w,curCenter, -ratio2h) # Top Right Of The Quad
glTexCoord2d(1.0, 0.0)
glVertex3f(+ ratio2w,curCenter, -ratio2h) # Top Left Of The Quad
glTexCoord2d(1.0, 1.0)
glVertex3f(+ ratio2w,curCenter, + ratio2h) # Bottom Left Of The Quad
glTexCoord2d(0.0, 1.0)
glVertex3f( -ratio2w,curCenter, + ratio2h) # Bottom Right Of The Quad
glEnd()
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE ) #wireframe mode
glBindTexture(GL_TEXTURE_2D,0) #unbind texture
glBegin(GL_QUADS)
glColor3f(0.0,0.0,1.0) # Set The Color To Blue, Z Axis
glVertex3f( ratio2w,curCenter, ratio2h) # Top Right Of The Quad (Bottom)
glVertex3f(- ratio2w,curCenter, ratio2h) # Top Left Of The Quad (Bottom)
glVertex3f(- ratio2w,curCenter,- ratio2h) # Bottom Left Of The Quad (Bottom)
glVertex3f( ratio2w,curCenter,- ratio2h) # Bottom Right Of The Quad (Bottom)
glEnd()
curCenter = (( (1.0 * self.volumeEditor.selSlices[0]) / self.sceneShape[0] ) - 0.5 )*2.0*ratio2w
if axis is 0:
self.tex[0] = self.images[0].scene.tex
if self.tex[0] != -1:
glBindTexture(GL_TEXTURE_2D,self.tex[0])
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glPolygonMode( GL_FRONT_AND_BACK, GL_FILL ) #solid drawing mode
glBegin(GL_QUADS)
glColor3f(0.8,0.8,0.8) # Set The Color To White
glTexCoord2d(1.0, 0.0)
glVertex3f(curCenter, ratio0h, ratio0w) # Top Right Of The Quad (Left)
glTexCoord2d(0.0, 0.0)
glVertex3f(curCenter, ratio0h, - ratio0w) # Top Left Of The Quad (Left)
glTexCoord2d(0.0, 1.0)
glVertex3f(curCenter,- ratio0h,- ratio0w) # Bottom Left Of The Quad (Left)
glTexCoord2d(1.0, 1.0)
glVertex3f(curCenter,- ratio0h, ratio0w) # Bottom Right Of The Quad (Left)
glEnd()
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE ) #wireframe mode
glBindTexture(GL_TEXTURE_2D,0) #unbind texture
glBegin(GL_QUADS)
glColor3f(1.0,0.0,0.0) # Set The Color To Red,
glVertex3f(curCenter, ratio0h, ratio0w) # Top Right Of The Quad (Left)
glVertex3f(curCenter, ratio0h, - ratio0w) # Top Left Of The Quad (Left)
glVertex3f(curCenter,- ratio0h,- ratio0w) # Bottom Left Of The Quad (Left)
glVertex3f(curCenter,- ratio0h, ratio0w) # Bottom Right Of The Quad (Left)
glEnd()
curCenter = (( 1.0 * self.volumeEditor.selSlices[1] / self.sceneShape[1] ) - 0.5 )*2.0*ratio2h
if axis is 1:
self.tex[1] = self.images[1].scene.tex
if self.tex[1] != -1:
glBindTexture(GL_TEXTURE_2D,self.tex[1])
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glPolygonMode( GL_FRONT_AND_BACK, GL_FILL ) #solid drawing mode
glBegin(GL_QUADS)
glColor3f(0.6,0.6,0.6) # Set The Color To White
glTexCoord2d(1.0, 0.0)
glVertex3f( ratio1w, ratio1h, curCenter) # Top Right Of The Quad (Front)
glTexCoord2d(0.0, 0.0)
glVertex3f(- ratio1w, ratio1h, curCenter) # Top Left Of The Quad (Front)
glTexCoord2d(0.0, 1.0)
glVertex3f(- ratio1w,- ratio1h, curCenter) # Bottom Left Of The Quad (Front)
glTexCoord2d(1.0, 1.0)
glVertex3f( ratio1w,- ratio1h, curCenter) # Bottom Right Of The Quad (Front)
glEnd()
glPolygonMode( GL_FRONT_AND_BACK, GL_LINE ) #wireframe mode
glBindTexture(GL_TEXTURE_2D,0) #unbind texture
glBegin(GL_QUADS)
glColor3f(0.0,1.0,0.0) # Set The Color To Green
glVertex3f( ratio1w, ratio1h, curCenter) # Top Right Of The Quad (Front)
glVertex3f(- ratio1w, ratio1h, curCenter) # Top Left Of The Quad (Front)
glVertex3f(- ratio1w,- ratio1h, curCenter) # Bottom Left Of The Quad (Front)
glVertex3f( ratio1w,- ratio1h, curCenter) # Bottom Right Of The Quad (Front)
glEnd()
glFlush()
def resizeGL(self, w, h):
'''
Resize the GL window
'''
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
def initializeGL(self):
'''
Initialize GL
'''
# set viewing projection
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glEnable(GL_TEXTURE_2D)
glLineWidth( 2.0 );
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 30.0)
self.initialized = True
#class OverviewScene2(QtGui.QGraphicsView):
# def __init__(self, images):
# QtGui.QGraphicsView.__init__(self)
# self.scene = QtGui.QGraphicsScene(self)
## self.scene.setSceneRect(0,0, imShape[0],imShape[1])
# self.setScene(self.scene)
# self.setRenderHint(QtGui.QPainter.Antialiasing)
# self.images = images
# self.sceneItems = []
#
# def display(self):
# for index, item in enumerate(self.sceneItems):
# self.scene.removeItem(item)
# del item
# self.sceneItems = []
# self.sceneItems.append(QtGui.QGraphicsPixmapItem(self.images[0].pixmap))
# self.sceneItems.append(QtGui.QGraphicsPixmapItem(self.images[1].pixmap))
# self.sceneItems.append(QtGui.QGraphicsPixmapItem(self.images[2].pixmap))
# for index, item in enumerate(self.sceneItems):
# self.scene.addItem(item)
def test():
"""Text editor demo"""
app = QtGui.QApplication([""])
im = (numpy.random.rand(1024,1024)*255).astype(numpy.uint8)
im[0:10,0:10] = 255
dialog = VolumeEditor(im)
dialog.show()
app.exec_()
del app
app = QtGui.QApplication([""])
im = (numpy.random.rand(128,128,128)*255).astype(numpy.uint8)
im[0:10,0:10,0:10] = 255
dialog = VolumeEditor(im)
dialog.show()
app.exec_()
#*******************************************************************************
# i f _ _ n a m e _ _ = = " _ _ m a i n _ _ " *
#*******************************************************************************
if __name__ == "__main__":
test()
| bsd-2-clause |
emawind84/readthedocs.org | readthedocs/rtd_tests/tests/test_api_version_compare.py | 34 | 1173 | from django.test import TestCase
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import Project
from readthedocs.restapi.views.footer_views import get_version_compare_data
class VersionCompareTests(TestCase):
fixtures = ['eric.json', 'test_data.json']
def test_not_highest(self):
project = Project.objects.get(slug='read-the-docs')
version = project.versions.get(slug='0.2.1')
data = get_version_compare_data(project, version)
self.assertEqual(data['is_highest'], False)
def test_latest_version_highest(self):
project = Project.objects.get(slug='read-the-docs')
data = get_version_compare_data(project)
self.assertEqual(data['is_highest'], True)
version = project.versions.get(slug=LATEST)
data = get_version_compare_data(project, version)
self.assertEqual(data['is_highest'], True)
def test_real_highest(self):
project = Project.objects.get(slug='read-the-docs')
version = project.versions.get(slug='0.2.2')
data = get_version_compare_data(project, version)
self.assertEqual(data['is_highest'], True)
| mit |
wmfs/chimp | src/chimp.py | 1 | 4690 | # http://diveintopython3.org/xml.html
import chimpinstall
import chimpbuild
import chimpclean
import chimpcreate
import extract
import settings as chimpSettings
from taskqueue.Queue import Queue
from taskqueue.StreamProcessor import StreamProcessor
from load.Loader import Loader
from taskqueue.Queuer import Queuer
import calc.solr as solr
import chimptools
settings = chimpSettings.Settings()
command = settings.args.command
if command == "install":
chimpinstall.installChimp(settings, settings.args.zones)
elif command == "build":
install = settings.args.reinstall or settings.args.install
drop = settings.args.reinstall or settings.args.drop
if settings.args.specification is not None:
chimpbuild.buildSpecificationScripts(settings, install, drop)
if settings.args.solrserver is not None:
chimpbuild.buildSolrServerScripts(settings, settings.args.solrserver, install,drop)
elif command == "import":
supportConnection = settings.db.makeConnection("support")
supportCursor = supportConnection.makeCursor("supportCursor", False, False)
queue = Queue(supportConnection, supportCursor, settings)
queuer = Queuer(supportConnection, supportCursor, settings, queue)
queuer.queueImport(settings.args.groupid)
if settings.args.postimportcompute in("specification", "full"):
if settings.args.postimportcompute=="specification":
restriction = settings.specification.name
else:
restriction = None
queuer.queueCalculation(None, restriction, settings.args.streamname, settings.args.groupid)
# supportConnection = settings.db.makeConnection("support")
# supportCursor = supportConnection.makeCursor("supportCursor", False, False)
# queue = Queue(supportConnection, supportCursor, settings)
# queuer = Queuer(supportConnection, supportCursor, settings, queue)
# queuer.queueCalculation(settings.args.groupid)
# if not settings.args.deferprocessing:
# loader = Loader(supportConnection, supportCursor, settings, queue)
# StreamProcessor(supportConnection, supportCursor, settings, queue, loader).processStream(False)
# queue.close()
if not settings.args.deferprocessing:
loader = Loader(supportConnection, supportCursor, settings, queue)
StreamProcessor(supportConnection, supportCursor, settings, queue, loader).processStream(False)
queue.close()
elif command == "queue":
supportConnection = settings.db.makeConnection("support")
supportCursor = supportConnection.makeCursor("supportCursor", False, False)
queue = Queue(supportConnection, supportCursor, settings)
if settings.args.action=="clear":
queue.clear()
elif settings.args.action=="restart":
loader = Loader(supportConnection, supportCursor, settings, queue)
StreamProcessor(supportConnection, supportCursor, settings, queue, loader).processStream(True)
elif settings.args.action=="stop":
queue.stop(settings.args.streamname)
queue.close()
elif command == "clean":
chimpclean.clean(settings, force=settings.args.force);
elif command == "create":
if settings.args.entitytype == "specification":
chimpcreate.createSpecification(settings, settings.args.name)
elif settings.args.entitytype == "solrserver":
chimpcreate.createSolrServer(settings, settings.args.name)
elif settings.args.entitytype == "repository":
chimpcreate.createRepository(settings)
elif command=="extract":
extractProcessor = extract.Extract(settings)
extractProcessor.debug(settings.appLogger)
elif command=="tool":
toolProcessor = chimptools.runTool(settings)
if command=="compute":
supportConnection = settings.db.makeConnection("support")
supportCursor = supportConnection.makeCursor("supportCursor", False, False)
queue = Queue(supportConnection, supportCursor, settings)
queuer = Queuer(supportConnection, supportCursor, settings, queue)
queuer.queueCalculation(settings.args.restriction, settings.args.specificationrestriction, settings.args.streamname, settings.args.groupid)
if not settings.args.deferprocessing:
loader = Loader(supportConnection, supportCursor, settings, queue)
StreamProcessor(supportConnection, supportCursor, settings, queue, loader).processStream(False)
queue.close()
if command == "solr":
solrServer = solr.SolrServer(settings.paths["config"], settings.args.server)
solrServer.debug(settings.appLogger)
solrServer.export(settings.appLogger)
print("Done.")
| gpl-3.0 |
abloomston/sympy | sympy/utilities/tests/test_lambdify.py | 4 | 16354 | from sympy.utilities.pytest import XFAIL, raises
from sympy import (
symbols, lambdify, sqrt, sin, cos, tan, pi, acos, acosh, Rational,
Float, Matrix, Lambda, exp, Integral, oo, I, Abs, Function, true, false)
from sympy.printing.lambdarepr import LambdaPrinter
import mpmath
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.external import import_module
import math
import sympy
MutableDenseMatrix = Matrix
numpy = import_module('numpy')
numexpr = import_module('numexpr')
w, x, y, z = symbols('w,x,y,z')
#================== Test different arguments =======================
def test_no_args():
f = lambdify([], 1)
raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x, y], x + y)
assert f(1, 2) == 3
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_own_namespace():
myfunc = lambda x: 1
f = lambdify(x, sin(x), {"sin": myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0) == 0.0
def test_bad_args():
# no vargs given
raises(TypeError, lambda: lambdify(1))
# same with vector exprs
raises(TypeError, lambda: lambdify([1, 2]))
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules =========================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_sympy_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec
# arctan is in numpy module and should not be available
raises(NameError, lambda: lambdify(x, arctan(x), "sympy"))
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a python math function
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a mpmath function
@conserve_mpmath_dps
@XFAIL
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
#================== Test Translations ==============================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.items():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
def test_numpy_transl():
if not numpy:
skip("numpy not installed.")
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, nump in NUMPY_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert nump in numpy.__dict__
def test_numpy_translation_abs():
if not numpy:
skip("numpy not installed.")
f = lambdify(x, Abs(x), "numpy")
assert f(-1) == 1
assert f(1) == 1
def test_numexpr_printer():
if not numexpr:
skip("numexpr not installed.")
# if translation/printing is done incorrectly then evaluating
# a lambdified numexpr expression will throw an exception
from sympy.printing.lambdarepr import NumExprPrinter
from sympy import S
blacklist = ('where', 'complex', 'contains')
arg_tuple = (x, y, z) # some functions take more than one argument
for sym in NumExprPrinter._numexpr_functions.keys():
if sym in blacklist:
continue
ssym = S(sym)
if hasattr(ssym, '_nargs'):
nargs = ssym._nargs[0]
else:
nargs = 1
args = arg_tuple[:nargs]
f = lambdify(args, ssym(*args), modules='numexpr')
assert f(*(1, )*nargs) is not None
def test_issue_9334():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
expr = sympy.S('b*a - sqrt(a**2)')
a, b = sorted(expr.free_symbols, key=lambda s: s.name)
func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False)
foo, bar = numpy.random.random((2, 4))
func_numexpr(foo, bar)
#================== Test some functions ============================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x), sin(x)])
d = f(pi)
prec = 1e-11
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
#================== Test vectors ===================================
def test_vector_simple():
f = lambdify((x, y, z), (z, y, x))
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x), sin(x)])
d = f(pi)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x), sin(x)])
d = f(3.14159)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x, y, z], [z, y, x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x, y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol = Matrix([[1, 2], [sin(3) + 4, 1]])
f = lambdify((x, y, z), A, modules="sympy")
assert f(1, 2, 3) == sol
f = lambdify((x, y, z), (A, [A]), modules="sympy")
assert f(1, 2, 3) == (sol, [sol])
J = Matrix((x, x + y)).jacobian((x, y))
v = Matrix((x, y))
sol = Matrix([[1, 0], [1, 1]])
assert lambdify(v, J, modules='sympy')(1, 2) == sol
assert lambdify(v.T, J, modules='sympy')(1, 2) == sol
def test_numpy_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
#Lambdify array first, to ensure return to matrix as default
f = lambdify((x, y, z), A, [{'ImmutableMatrix': numpy.array}, 'numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
#Check that the types are arrays and matrices
assert isinstance(f(1, 2, 3), numpy.ndarray)
def test_numpy_numexpr():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b, c = numpy.random.randn(3, 128, 128)
# ensure that numpy and numexpr return same value for complicated expression
expr = sin(x) + cos(y) + tan(z)**2 + Abs(z-y)*acos(sin(y*z)) + \
Abs(y-z)*acosh(2+exp(y-x))- sqrt(x**2+I*y**2)
npfunc = lambdify((x, y, z), expr, modules='numpy')
nefunc = lambdify((x, y, z), expr, modules='numexpr')
assert numpy.allclose(npfunc(a, b, c), nefunc(a, b, c))
def test_numexpr_userfunctions():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b = numpy.random.randn(2, 10)
uf = type('uf', (Function, ),
{'eval' : classmethod(lambda x, y : y**2+1)})
func = lambdify(x, 1-uf(x), modules='numexpr')
assert numpy.allclose(func(a), -(a**2))
uf = implemented_function(Function('uf'), lambda x, y : 2*x*y+1)
func = lambdify((x, y), uf(x, y), modules='numexpr')
assert numpy.allclose(func(a, b), 2*a*b+1)
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
#================== Test symbolic ==================================
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x, y], x + y + z)
assert f(1, 2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x: 'first f'}
n2 = {'f': lambda x: 'second f',
'g': lambda x: 'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(func, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x: x + 101)
raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_wrong_args():
raises(ValueError, lambda: implemented_function(sin, lambda x: x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
def test_dummification():
t = symbols('t')
F = Function('F')
G = Function('G')
#"\alpha" is not a valid python variable name
#lambdify should sub in a dummy for it, and return
#without a syntax error
alpha = symbols(r'\alpha')
some_expr = 2 * F(t)**2 / G(t)
lam = lambdify((F(t), G(t)), some_expr)
assert lam(3, 9) == 2
lam = lambdify(sin(t), 2 * sin(t)**2)
assert lam(F(t)) == 2 * F(t)**2
#Test that \alpha was properly dummified
lam = lambdify((alpha, t), 2*alpha + t)
assert lam(2, 1) == 5
raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))
def test_python_keywords():
# Test for issue 7452. The automatic dummification should ensure use of
# Python reserved keywords as symbol names will create valid lambda
# functions. This is an additional regression test.
python_if = symbols('if')
expr = python_if / 2
f = lambdify(python_if, expr)
assert f(4.0) == 2.0
def test_lambdify_docstring():
func = lambdify((w, x, y, z), w + x + y + z)
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(w, x, y, z)\n\n"
"Expression:\n\n"
"w + x + y + z")
syms = symbols('a1:26')
func = lambdify(syms, sum(syms))
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n"
" a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n"
"Expression:\n\n"
"a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +...")
#================== Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr)
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sympy.sqrt(sympy.sqrt(2) + sympy.sqrt(3)) + sympy.S(1)/2
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
def test_true_false():
# We want exact is comparison here, not just ==
assert lambdify([], true)() is True
assert lambdify([], false)() is False
def test_issue_2790():
assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3
assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10
assert lambdify(x, x + 1, dummify=False)(1) == 2
| bsd-3-clause |
CospanDesign/nysa | nysa/host/driver/gpio.py | 1 | 6941 | #Distributed under the MIT licesnse.
#Copyright (c) 2011 Dave McCoy ([email protected])
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
GPIO
Facilitates communication with the GPIO core
For more details see:
http://wiki.cospandesign.com/index.php?title=Wb_gpio
TODO: Implement Debounce
"""
__author__ = "[email protected] (Dave McCoy)"
import sys
import os
import time
from array import array as Array
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir))
import driver
#Sub Module ID
COSPAN_DESIGN_GPIO_MODULE = 0x01
#Register Constants
GPIO_PORT = 0x00000000
GPIO_OUTPUT_ENABLE = 0x00000001
INTERRUPTS = 0x00000002
INTERRUPT_ENABLE = 0x00000003
INTERRUPT_EDGE = 0x00000004
INTERRUPT_BOTH_EDGE = 0x00000005
INTERRUPT_TIMEOUT = 0x00000006
READ_CLOCK_RATE = 0x00000007
class GPIO(driver.Driver):
""" GPIO
Communication with a GPIO Core
"""
@staticmethod
def get_abi_class():
return 0
@staticmethod
def get_abi_major():
return driver.get_device_id_from_name("gpio")
@staticmethod
def get_abi_minor():
return COSPAN_DESIGN_GPIO_MODULE
@staticmethod
def get_vendor_id():
return 0x800000000000C594
@staticmethod
def get_device_id():
return 0x00000002
def __init__(self, nysa, urn, debug = False):
super(GPIO, self).__init__(nysa, urn, debug)
def set_port_direction(self, direction):
"""set_port_direction
Sets the direction of the port
Args:
direction: 32-bit value that will set the direction of all the ports
Return:
Nothing
Raises:
NysaCommError
"""
if self.debug:
print "Writing GPIO Direction"
self.write_register(GPIO_OUTPUT_ENABLE, direction)
def get_port_direction(self):
"""get_port_direction
Gets the direction of the port
Args:
Nothing
Return (Integer):
32-bit value that will set the direction of all the ports
Raises:
NysaCommError
"""
if self.debug:
print "Reading GPIO Direction"
return self.read_register(GPIO_OUTPUT_ENABLE)
def set_port_raw(self, value):
"""set_port_raw
set multiple GPIO output values
Args:
value: 32-bit value that will replace the current ports
Returns:
Nothing
Raises:
NysaCommError
"""
self.write_register(GPIO_PORT, value)
def get_port_raw(self):
"""get_port_raw
Get multiple GPIO input values
Args:
Nothing
Return:
32-bit value representing the port values
Raises:
NysaCommError
"""
return self.read_register(GPIO_PORT)
def set_bit_value(self, bit, value):
"""set_bit_value
Sets an individual bit with the specified value (1, or 0)
Args:
bit: the bit of the port to set
value: 1 or 0
Return:
Nothing
Raises:
NysaCommError
"""
if self.debug:
print "Setting individual bit value"
self.enable_register_bit(GPIO_PORT, bit, value)
def get_bit_value(self, bit):
"""get_bit_value
Gets an individual bit value
Args:
bit
Returns:
1, 0
Raises:
NysaCommError
"""
if self.debug:
print "Getting individual bit value"
return self.is_register_bit_set(GPIO_PORT, bit)
def set_interrupt_enable(self, interrupt_enable):
"""set_interrupt_enable
Enables/Disables interrupts
Args:
interrupt_enable: 32-bit enable (1), disable(0) mask
Return:
Nothing
Raises:
NysaComError
"""
self.write_register(INTERRUPT_ENABLE, interrupt_enable)
def get_interrupt_enable(self):
"""get_interrupt_enable
Returns the interrupt mask
Args:
Nothing
Returns:
32-bit interrupt mask value
Raises:
Nothing
"""
return self.read_register(INTERRUPT_ENABLE)
def set_interrupt_edge(self, interrupt_edge):
"""set_interrupt_edge
Interrupt triggers on high (1) or low (0)
Args:
Interrupt_level: 32-bit enable (1), disable (0) mask
Return:
Nothing
Raises:
NysaCommError
"""
self.write_register(INTERRUPT_EDGE, interrupt_edge)
def get_interrupt_edge(self):
"""get_interrupt_edge
Returns the interrupt level
Args:
Nothing
Returns:
32-bit value contiaining the interrupt level
Raises:
NysaCommError
"""
return self.read_register(INTERRUPT_EDGE)
def set_interrupt_both_edge(self, interrupt_both_edge):
self.write_register(INTERRUPT_BOTH_EDGE, interrupt_both_edge)
def get_interrupt_both_edge(self):
return self.read_register(INTERRUPT_BOTH_EDGE)
def set_interrupt_timeout(self, interrupt_timeout):
self.write_register(INTERRUPT_BOTH_EDGE, interrupt_timeout)
def get_interrupt_timeout(self):
return self.read_register(INTERRUPT_TIMEOUT)
def get_clock_rate(self):
return self.read_register(READ_CLOCK_RATE)
def get_interrupts(self):
"""get_interrupts
Returns a 32-bit value representing the interrupts on the specified pins
Args:
Nothing
Returns:
32-bit value containing the interrupts
Raises:
NysaCommError
"""
return self.read_register(INTERRUPTS)
| mit |
ariakerstein/twitterFlaskClone | project/lib/python2.7/site-packages/werkzeug/contrib/cache.py | 252 | 27983 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
The main problem with dynamic Web sites is, well, they're dynamic. Each
time a user requests a page, the webserver executes a lot of code, queries
the database, renders templates until the visitor gets the page he sees.
This is a lot more expensive than just loading a file from the file system
and sending it to the visitor.
For most Web applications, this overhead isn't a big deal but once it
becomes, you will be glad to have a cache system in place.
How Caching Works
=================
Caching is pretty simple. Basically you have a cache object lurking around
somewhere that is connected to a remote cache or the file system or
something else. When the request comes in you check if the current page
is already in the cache and if so, you're returning it from the cache.
Otherwise you generate the page and put it into the cache. (Or a fragment
of the page, you don't have to cache the full thing)
Here is a simple example of how to cache a sidebar for a template::
def get_sidebar(user):
identifier = 'sidebar_for/user%d' % user.id
value = cache.get(identifier)
if value is not None:
return value
value = generate_sidebar_for(user=user)
cache.set(identifier, value, timeout=60 * 5)
return value
Creating a Cache Object
=======================
To create a cache object you just import the cache system of your choice
from the cache module and instantiate it. Then you can start working
with that object:
>>> from werkzeug.contrib.cache import SimpleCache
>>> c = SimpleCache()
>>> c.set("foo", "value")
>>> c.get("foo")
'value'
>>> c.get("missing") is None
True
Please keep in mind that you have to create the cache and put it somewhere
you have access to it (either as a module global you can import or you just
put it into your WSGI application).
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
import errno
import tempfile
from hashlib import md5
from time import time
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
from werkzeug._compat import iteritems, string_types, text_type, \
integer_types, to_native
from werkzeug.posixemulation import rename
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
if hasattr(mappingorseq, 'items'):
return iteritems(mappingorseq)
return mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout (in seconds) that is used if no
timeout is specified on :meth:`set`. A timeout of 0
indicates that the cache never expires.
"""
def __init__(self, default_timeout=300):
self.default_timeout = default_timeout
def get(self, key):
"""Look up key in the cache and return the value for it.
:param key: the key to be looked up.
:returns: The value if it exists and is readable, else ``None``.
"""
return None
def delete(self, key):
"""Delete `key` from the cache.
:param key: the key to delete.
:returns: Whether the key existed and has been deleted.
:rtype: boolean
"""
return True
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created::
foo, bar = cache.get_many("foo", "bar")
Has the same error handling as :meth:`get`.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Like :meth:`get_many` but return a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(zip(keys, self.get_many(*keys)))
def set(self, key, value, timeout=None):
"""Add a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout). A timeout of 0 idicates
that the cache never expires.
:returns: ``True`` if key has been updated, ``False`` for backend
errors. Pickling errors, however, will raise a subclass of
``pickle.PickleError``.
:rtype: boolean
"""
return True
def add(self, key, value, timeout=None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified. A timeout of 0 indicates
that the cache never expires.
:returns: Same as :meth:`set`, but also ``False`` for already
existing keys.
:rtype: boolean
"""
return True
def set_many(self, mapping, timeout=None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout). A timeout of 0
indicates tht the cache never expires.
:returns: Whether all given keys have been set.
:rtype: boolean
"""
rv = True
for key, value in _items(mapping):
if not self.set(key, value, timeout):
rv = False
return rv
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
:returns: Whether all given keys have been deleted.
:rtype: boolean
"""
return all(self.delete(key) for key in keys)
def has(self, key):
"""Checks if a key exists in the cache without returning it. This is a
cheap operation that bypasses loading the actual data on the backend.
This method is optional and may not be implemented on all caches.
:param key: the key to check
"""
raise NotImplementedError(
'%s doesn\'t have an efficient implementation of `has`. That '
'means it is impossible to check whether a key exists without '
'fully loading the key\'s data. Consider using `self.get` '
'explicitly if you don\'t care about performance.'
)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
:returns: Whether the cache has been cleared.
:rtype: boolean
"""
return True
def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
:returns: The new value or ``None`` for backend errors.
"""
value = (self.get(key) or 0) + delta
return value if self.set(key, value) else None
def dec(self, key, delta=1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
:returns: The new value or `None` for backend errors.
"""
value = (self.get(key) or 0) - delta
return value if self.set(key, value) else None
class NullCache(BaseCache):
"""A cache that doesn't cache. This can be useful for unit testing.
:param default_timeout: a dummy parameter that is ignored but exists
for API compatibility with other caches.
"""
class SimpleCache(BaseCache):
"""Simple memory cache for single process environments. This class exists
mainly for the development server and is not 100% thread safe. It tries
to use as many atomic operations as possible and no locks for simplicity
but it could happen under heavy load that keys are added multiple times.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
"""
def __init__(self, threshold=500, default_timeout=300):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
toremove = []
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if (expires != 0 and expires <= now) or idx % 3 == 0:
toremove.append(key)
for key in toremove:
self._cache.pop(key, None)
def _get_expiration(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout > 0:
timeout = time() + timeout
return timeout
def get(self, key):
try:
expires, value = self._cache[key]
if expires == 0 or expires > time():
return pickle.loads(value)
except (KeyError, pickle.PickleError):
return None
def set(self, key, value, timeout=None):
expires = self._get_expiration(timeout)
self._prune()
self._cache[key] = (expires, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
return True
def add(self, key, value, timeout=None):
expires = self._get_expiration(timeout)
self._prune()
item = (expires, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
if key in self._cache:
return False
self._cache.setdefault(key, item)
return True
def delete(self, key):
return self._cache.pop(key, None) is not None
def has(self, key):
try:
expires, value = self._cache[key]
return expires == 0 or expires > time()
except KeyError:
return False
_test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match
class MemcachedCache(BaseCache):
"""A cache that uses memcached as backend.
The first argument can either be an object that resembles the API of a
:class:`memcache.Client` or a tuple/list of server addresses. In the
event that a tuple/list is passed, Werkzeug tries to import the best
available memcache library.
This cache looks into the following packages/modules to find bindings for
memcached:
- ``pylibmc``
- ``google.appengine.api.memcached``
- ``memcached``
Implementation notes: This cache backend works around some limitations in
memcached to simplify the interface. For example unicode keys are encoded
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
the keys in the same format as passed. Furthermore all get methods
silently ignore key errors to not cause problems when untrusted user data
is passed to the get methods which is often the case in web applications.
:param servers: a list or tuple of server addresses or alternatively
a :class:`memcache.Client` or a compatible client.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates taht the cache never expires.
:param key_prefix: a prefix that is added before all keys. This makes it
possible to use the same memcached server for different
applications. Keep in mind that
:meth:`~BaseCache.clear` will also clear keys with a
different prefix.
"""
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if servers is None or isinstance(servers, (list, tuple)):
if servers is None:
servers = ['127.0.0.1:11211']
self._client = self.import_preferred_memcache_lib(servers)
if self._client is None:
raise RuntimeError('no memcache module found')
else:
# NOTE: servers is actually an already initialized memcache
# client.
self._client = servers
self.key_prefix = to_native(key_prefix)
def _normalize_key(self, key):
key = to_native(key, 'utf-8')
if self.key_prefix:
key = self.key_prefix + key
return key
def _normalize_timeout(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout > 0:
timeout = int(time()) + timeout
return timeout
def get(self, key):
key = self._normalize_key(key)
# memcached doesn't support keys longer than that. Because often
# checks for so long keys can occur because it's tested from user
# submitted data etc we fail silently for getting.
if _test_memcached_key(key):
return self._client.get(key)
def get_dict(self, *keys):
key_mapping = {}
have_encoded_keys = False
for key in keys:
encoded_key = self._normalize_key(key)
if not isinstance(key, str):
have_encoded_keys = True
if _test_memcached_key(key):
key_mapping[encoded_key] = key
d = rv = self._client.get_multi(key_mapping.keys())
if have_encoded_keys or self.key_prefix:
rv = {}
for key, value in iteritems(d):
rv[key_mapping[key]] = value
if len(rv) < len(keys):
for key in keys:
if key not in rv:
rv[key] = None
return rv
def add(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.add(key, value, timeout)
def set(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.set(key, value, timeout)
def get_many(self, *keys):
d = self.get_dict(*keys)
return [d[key] for key in keys]
def set_many(self, mapping, timeout=None):
new_mapping = {}
for key, value in _items(mapping):
key = self._normalize_key(key)
new_mapping[key] = value
timeout = self._normalize_timeout(timeout)
failed_keys = self._client.set_multi(new_mapping, timeout)
return not failed_keys
def delete(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.delete(key)
def delete_many(self, *keys):
new_keys = []
for key in keys:
key = self._normalize_key(key)
if _test_memcached_key(key):
new_keys.append(key)
return self._client.delete_multi(new_keys)
def has(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.append(key, '')
return False
def clear(self):
return self._client.flush_all()
def inc(self, key, delta=1):
key = self._normalize_key(key)
return self._client.incr(key, delta)
def dec(self, key, delta=1):
key = self._normalize_key(key)
return self._client.decr(key, delta)
def import_preferred_memcache_lib(self, servers):
"""Returns an initialized memcache client. Used by the constructor."""
try:
import pylibmc
except ImportError:
pass
else:
return pylibmc.Client(servers)
try:
from google.appengine.api import memcache
except ImportError:
pass
else:
return memcache.Client()
try:
import memcache
except ImportError:
pass
else:
return memcache.Client(servers)
# backwards compatibility
GAEMemcachedCache = MemcachedCache
class RedisCache(BaseCache):
"""Uses the Redis key-value store as a cache backend.
The first argument can be either a string denoting address of the Redis
server or an object resembling an instance of a redis.Redis class.
Note: Python Redis API already takes care of encoding unicode strings on
the fly.
.. versionadded:: 0.7
.. versionadded:: 0.8
`key_prefix` was added.
.. versionchanged:: 0.8
This cache backend now properly serializes objects.
.. versionchanged:: 0.8.3
This cache backend now supports password authentication.
.. versionchanged:: 0.10
``**kwargs`` is now passed to the redis object.
:param host: address of the Redis server or an object which API is
compatible with the official Python Redis client (redis-py).
:param port: port number on which Redis server listens for connections.
:param password: password authentication for the Redis server.
:param db: db (zero-based numeric index) on Redis Server to connect.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param key_prefix: A prefix that should be added to all keys.
Any additional keyword arguments will be passed to ``redis.Redis``.
"""
def __init__(self, host='localhost', port=6379, password=None,
db=0, default_timeout=300, key_prefix=None, **kwargs):
BaseCache.__init__(self, default_timeout)
if isinstance(host, string_types):
try:
import redis
except ImportError:
raise RuntimeError('no redis module found')
if kwargs.get('decode_responses', None):
raise ValueError('decode_responses is not supported by '
'RedisCache.')
self._client = redis.Redis(host=host, port=port, password=password,
db=db, **kwargs)
else:
self._client = host
self.key_prefix = key_prefix or ''
def _get_expiration(self, timeout):
if timeout is None:
timeout = self.default_timeout
if timeout == 0:
timeout = -1
return timeout
def dump_object(self, value):
"""Dumps an object into a string for redis. By default it serializes
integers as regular string and pickle dumps everything else.
"""
t = type(value)
if t in integer_types:
return str(value).encode('ascii')
return b'!' + pickle.dumps(value)
def load_object(self, value):
"""The reversal of :meth:`dump_object`. This might be called with
None.
"""
if value is None:
return None
if value.startswith(b'!'):
try:
return pickle.loads(value[1:])
except pickle.PickleError:
return None
try:
return int(value)
except ValueError:
# before 0.8 we did not have serialization. Still support that.
return value
def get(self, key):
return self.load_object(self._client.get(self.key_prefix + key))
def get_many(self, *keys):
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return [self.load_object(x) for x in self._client.mget(keys)]
def set(self, key, value, timeout=None):
timeout = self._get_expiration(timeout)
dump = self.dump_object(value)
if timeout == -1:
result = self._client.set(name=self.key_prefix + key,
value=dump)
else:
result = self._client.setex(name=self.key_prefix + key,
value=dump, time=timeout)
return result
def add(self, key, value, timeout=None):
timeout = self._get_expiration(timeout)
dump = self.dump_object(value)
return (
self._client.setnx(name=self.key_prefix + key, value=dump) and
self._client.expire(name=self.key_prefix + key, time=timeout)
)
def set_many(self, mapping, timeout=None):
timeout = self._get_expiration(timeout)
# Use transaction=False to batch without calling redis MULTI
# which is not supported by twemproxy
pipe = self._client.pipeline(transaction=False)
for key, value in _items(mapping):
dump = self.dump_object(value)
if timeout == -1:
pipe.set(name=self.key_prefix + key, value=dump)
else:
pipe.setex(name=self.key_prefix + key, value=dump,
time=timeout)
return pipe.execute()
def delete(self, key):
return self._client.delete(self.key_prefix + key)
def delete_many(self, *keys):
if not keys:
return
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return self._client.delete(*keys)
def has(self, key):
return self._client.exists(self.key_prefix + key)
def clear(self):
status = False
if self.key_prefix:
keys = self._client.keys(self.key_prefix + '*')
if keys:
status = self._client.delete(*keys)
else:
status = self._client.flushdb()
return status
def inc(self, key, delta=1):
return self._client.incr(name=self.key_prefix + key, amount=delta)
def dec(self, key, delta=1):
return self._client.decr(name=self.key_prefix + key, amount=delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold=500, default_timeout=300,
mode=0o600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
try:
os.makedirs(self._path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
try:
for idx, fname in enumerate(entries):
remove = False
with open(fname, 'rb') as f:
expires = pickle.load(f)
remove = (expires != 0 and expires <= now) or idx % 3 == 0
if remove:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
return False
return True
def _get_filename(self, key):
if isinstance(key, text_type):
key = key.encode('utf-8') # XXX unicode review
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
with open(filename, 'rb') as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return pickle.load(f)
else:
os.remove(filename)
return None
except (IOError, OSError, pickle.PickleError):
return None
def add(self, key, value, timeout=None):
filename = self._get_filename(key)
if not os.path.exists(filename):
return self.set(key, value, timeout)
return False
def set(self, key, value, timeout=None):
if timeout is None:
timeout = int(time() + self.default_timeout)
elif timeout != 0:
timeout = int(time() + timeout)
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
dir=self._path)
with os.fdopen(fd, 'wb') as f:
pickle.dump(timeout, f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
return False
else:
return True
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
return False
else:
return True
def has(self, key):
filename = self._get_filename(key)
try:
with open(filename, 'rb') as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return True
else:
os.remove(filename)
return False
except (IOError, OSError, pickle.PickleError):
return False
| mit |
ThinkingBridge/platform_external_chromium_org | third_party/closure_linter/closure_linter/checkerbase.py | 135 | 10755 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for writing checkers that operate on tokens."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)',
'[email protected] (Jacob Richman)')
import StringIO
import traceback
import gflags as flags
from closure_linter import ecmametadatapass
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter.common import error
from closure_linter.common import htmlutil
FLAGS = flags.FLAGS
flags.DEFINE_boolean('debug_tokens', False,
'Whether to print all tokens for debugging.')
flags.DEFINE_boolean('error_trace', False,
'Whether to show error exceptions.')
class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language."""
def __init__(self):
self.__checker = None
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initializes to prepare to check a file.
Args:
checker: Class to report errors to.
limited_doc_checks: Whether doc checking is relaxed for this file.
is_html: Whether the file is an HTML file with extracted contents.
"""
self.__checker = checker
self._limited_doc_checks = limited_doc_checks
self._is_html = is_html
def _HandleError(self, code, message, token, position=None,
fix_data=None):
"""Call the HandleError function for the checker we are associated with."""
if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data)
def _SetLimitedDocChecks(self, limited_doc_checks):
"""Sets whether doc checking is relaxed for this file.
Args:
limited_doc_checks: Whether doc checking is relaxed for this file.
"""
self._limited_doc_checks = limited_doc_checks
def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration.
parser_state: Object that indicates the parser state in the page.
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method CheckToken not implemented')
def Finalize(self, parser_state, tokenizer_mode):
"""Perform all checks that need to occur after all lines are processed.
Args:
parser_state: State of the parser after parsing all tokens
tokenizer_mode: Mode of the tokenizer after parsing the entire page
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method Finalize not implemented')
class CheckerBase(object):
"""This class handles checking a LintRules object against a file."""
def __init__(self, error_handler, lint_rules, state_tracker,
limited_doc_files=None, metadata_pass=None):
"""Initialize a checker object.
Args:
error_handler: Object that handles errors.
lint_rules: LintRules object defining lint errors given a token
and state_tracker object.
state_tracker: Object that tracks the current state in the token stream.
limited_doc_files: List of filenames that are not required to have
documentation comments.
metadata_pass: Object that builds metadata about the token stream.
"""
self._error_handler = error_handler
self._lint_rules = lint_rules
self._state_tracker = state_tracker
self._metadata_pass = metadata_pass
self._limited_doc_files = limited_doc_files
# TODO(user): Factor out. A checker does not need to know about the
# tokenizer, only the token stream.
self._tokenizer = javascripttokenizer.JavaScriptTokenizer()
self._has_errors = False
def HandleError(self, code, message, token, position=None,
fix_data=None):
"""Prints out the given error message including a line number.
Args:
code: The error code.
message: The error to print.
token: The token where the error occurred, or None if it was a file-wide
issue.
position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error.
"""
self._has_errors = True
self._error_handler.HandleError(
error.Error(code, message, token, position, fix_data))
def HasErrors(self):
"""Returns true if the style checker has found any errors.
Returns:
True if the style checker has found any errors.
"""
return self._has_errors
def Check(self, filename, source=None):
"""Checks the file, printing warnings and errors as they are found.
Args:
filename: The name of the file to check.
source: Optional. The contents of the file. Can be either a string or
file-like object. If omitted, contents will be read from disk from
the given filename.
"""
if source is None:
try:
f = open(filename)
except IOError:
self._error_handler.HandleFile(filename, None)
self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
self._error_handler.FinishFile()
return
else:
if type(source) in [str, unicode]:
f = StringIO.StringIO(source)
else:
f = source
try:
if filename.endswith('.html') or filename.endswith('.htm'):
self.CheckLines(filename, htmlutil.GetScriptLines(f), True)
else:
self.CheckLines(filename, f, False)
finally:
f.close()
def CheckLines(self, filename, lines_iter, is_html):
"""Checks a file, given as an iterable of lines, for warnings and errors.
Args:
filename: The name of the file to check.
lines_iter: An iterator that yields one line of the file at a time.
is_html: Whether the file being checked is an HTML file with extracted
contents.
Returns:
A boolean indicating whether the full file could be checked or if checking
failed prematurely.
"""
limited_doc_checks = False
if self._limited_doc_files:
for limited_doc_filename in self._limited_doc_files:
if filename.endswith(limited_doc_filename):
limited_doc_checks = True
break
lint_rules = self._lint_rules
lint_rules.Initialize(self, limited_doc_checks, is_html)
token = self._tokenizer.TokenizeFile(lines_iter)
parse_error = None
if self._metadata_pass:
try:
self._metadata_pass.Reset()
self._metadata_pass.Process(token)
except ecmametadatapass.ParseError, caught_parse_error:
if FLAGS.error_trace:
traceback.print_exc()
parse_error = caught_parse_error
except Exception:
print 'Internal error in %s' % filename
traceback.print_exc()
return False
self._error_handler.HandleFile(filename, token)
return self._CheckTokens(token, parse_error=parse_error,
debug_tokens=FLAGS.debug_tokens)
def _CheckTokens(self, token, parse_error, debug_tokens):
"""Checks a token stream for lint warnings/errors.
Args:
token: The first token in the token stream to check.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
"""
result = self._ExecutePass(token, self._LintPass, parse_error, debug_tokens)
if not result:
return False
self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
self._error_handler.FinishFile()
return True
def _LintPass(self, token):
"""Checks an individual token for lint warnings/errors.
Used to encapsulate the logic needed to check an individual token so that it
can be passed to _ExecutePass.
Args:
token: The token to check.
"""
self._lint_rules.CheckToken(token, self._state_tracker)
def _ExecutePass(self, token, pass_function, parse_error=None,
debug_tokens=False):
"""Calls the given function for every token in the given token stream.
As each token is passed to the given function, state is kept up to date and,
depending on the error_trace flag, errors are either caught and reported, or
allowed to bubble up so developers can see the full stack trace. If a parse
error is specified, the pass will proceed as normal until the token causing
the parse error is reached.
Args:
token: The first token in the token stream.
pass_function: The function to call for each token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
Raises:
Exception: If any error occurred while calling the given function.
"""
self._state_tracker.Reset()
while token:
if debug_tokens:
print token
if parse_error and parse_error.token == token:
message = ('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string)
self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
self._error_handler.FinishFile()
return
try:
self._state_tracker.HandleToken(
token, self._state_tracker.GetLastNonSpaceToken())
pass_function(token)
self._state_tracker.HandleAfterToken(token)
except:
if FLAGS.error_trace:
raise
else:
self.HandleError(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string),
token)
self._error_handler.FinishFile()
return False
token = token.next
return True
| bsd-3-clause |
dimagol/trex-core | scripts/external_libs/pyzmq-14.5.0/python2/intel/ucs4/64bit/zmq/devices/monitoredqueuedevice.py | 44 | 1994 | """MonitoredQueue classes and functions."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from zmq import ZMQError, PUB
from zmq.devices.proxydevice import ProxyBase, Proxy, ThreadProxy, ProcessProxy
from zmq.devices.monitoredqueue import monitored_queue
class MonitoredQueueBase(ProxyBase):
"""Base class for overriding methods."""
_in_prefix = b''
_out_prefix = b''
def __init__(self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out'):
ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type)
self._in_prefix = in_prefix
self._out_prefix = out_prefix
def run_device(self):
ins,outs,mons = self._setup_sockets()
monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix)
class MonitoredQueue(MonitoredQueueBase, Proxy):
"""Class for running monitored_queue in the background.
See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy,
only in that it adds a ``prefix`` to messages sent on the monitor socket,
with a different prefix for each direction.
MQ also supports ROUTER on both sides, which zmq.proxy does not.
If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket.
If it arrives on out_sock, it will be prefixed with `out_prefix`.
A PUB socket is the most logical choice for the mon_socket, but it is not required.
"""
pass
class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy):
"""Run zmq.monitored_queue in a background thread.
See MonitoredQueue and Proxy for details.
"""
pass
class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy):
"""Run zmq.monitored_queue in a background thread.
See MonitoredQueue and Proxy for details.
"""
__all__ = [
'MonitoredQueue',
'ThreadMonitoredQueue',
'ProcessMonitoredQueue'
]
| apache-2.0 |
tudo-astroparticlephysics/starry_night | starry_night/plotting.py | 1 | 6455 | import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from matplotlib import gridspec, ticker
from matplotlib.lines import Line2D
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import numpy as np
def exponential(x, m, b):
return np.exp(m * x + b)
def plot_kernel_curve(stars, outputfile):
res = list()
gr = stars.query('kernel>=1 & vmag<4').reset_index().groupby('HIP')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
cm = plt.get_cmap()
color = cm(np.linspace(0, 1, 10 * (gr.vmag.max().max() - gr.vmag.min().min()) + 2))
for _, s in gr:
# normalize
n = s.response_orig.max()
res.append(s.query('response_orig == {}'.format(n)).kernel.values)
plt.plot(
s.kernel.values,
s.response_orig.values / n,
c=color[int(round(s.vmag.max()*10))]
)
ax.set_xlabel('$\sigma$ of LoG filter')
ax.set_ylabel('Kernel response normalized')
lEntry = Line2D([], [], color='black', label='Response of all stars')
ax.grid()
ax.legend(handles=[lEntry])
fig.tight_layout(pad=0)
fig.savefig(outputfile, dpi=300)
def plot_choose_sigma(stars, kernelSize, outputfile):
gr = stars.query('kernel>=1 & vmag<4').reset_index().groupby('kernel')
res = list()
for _, s in gr:
popt, pcov = curve_fit(exponential, s.vmag.values, s.response_orig.values)
res.append(
(s.kernel.max(), *popt, np.sqrt(pcov[0, 0]), np.sqrt(pcov[1, 1]))
)
res = np.array(res)
fig = plt.figure()
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])
ax1 = fig.add_subplot(gs[0])
ax2 = plt.subplot(gs[1], sharex=ax1)
ax1.grid()
ax2.grid()
ax1.scatter(res[:, 0], res[:, 2], label='b')
ax1.set_xlabel('$\sigma$ of LoG filter')
ax1.set_ylabel('b')
ax2.scatter(res[:, 0], res[:, 4], label='b')
ax2.set_ylabel('Standard deviation')
fig.tight_layout(pad=0)
fig.savefig(outputfile, dpi=300)
def plot_camera_image(img, timestamp, stars, celObjects, outputfile):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
vmin = np.nanpercentile(img, 5)
vmax = np.nanpercentile(img, 90)
for source in celObjects['points_of_interest'].itertuples():
ax.plot(
source.x,
source.y,
marker='^',
color='C1',
)
ax.annotate(
source.name,
xy=(source.x, source.y),
color='C1',
xytext=(5, 0), textcoords='offset points',
va='center',
)
ax.imshow(img, vmin=vmin,vmax=vmax, cmap='gray')
ax.set_title(
str(timestamp),
verticalalignment='bottom', horizontalalignment='right',
)
plot = ax.scatter(
stars.x.values,
stars.y.values,
c=stars.visible.values,
cmap='RdYlGn',
s=5,
vmin=0,
vmax=1,
)
fig.colorbar(plot, label='Visibility')
fig.tight_layout(pad=0)
fig.savefig(outputfile)
def plot_kernel_response(lower_limit, upper_limit, vmag_limit, img, data, stars, outputfile):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_yscale('log')
# draw visibility limits
x = np.linspace(-5 + stars.vmag.min(), stars.vmag.max() + 5, 20)
y1 = 10**(x * lower_limit[0] + lower_limit[1])
y2 = 10**(x * upper_limit[0] + upper_limit[1])
ax.plot(x, y1, c='red', label='lower limit')
ax.plot(x, y2, c='green', label='upper limit')
stars.plot.scatter(
x='vmag',
y='response',
c=stars.visible.values,
ax=ax,
cmap=plt.cm.RdYlGn,
vmin=0, vmax=1,
label='Kernel Response',
)
ax.set_xlim((-1, float(data['vmaglimit']) + 0.5))
ax.set_ylim(
10**(lower_limit[0] * vmag_limit + lower_limit[1] - 1),
10**(-upper_limit[0] + upper_limit[1])
)
ax.set_ylabel('Kernel Response')
ax.set_xlabel('Star Magnitude')
ax_in = inset_axes(ax, width='40%', height='40%', loc='lower left')
vmin = np.nanpercentile(img, 0.5)
vmax = np.nanpercentile(img, 99.5)
ax_in.imshow(img, cmap='gray', vmin=vmin, vmax=vmax)
stars.plot.scatter(
x='x',
y='y',
c='visible',
cmap='RdYlGn',
vmin=0,
vmax=1,
s=3,
colorbar=False,
ax=ax_in,
)
ax_in.get_xaxis().set_visible(False)
ax_in.get_yaxis().set_visible(False)
leg = ax.legend(loc='lower right')
leg.legendHandles[2].set_color('yellow')
fig.tight_layout(pad=0)
fig.savefig(outputfile, dpi=300)
plt.close('all')
def plot_ratescan(response, sobelList, logList, gradList, minThresholdPos, outputfile):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax2 = ax1.twinx()
ax1.set_xscale('log')
ax1.grid()
ax2.grid()
kernels = [sobelList, logList, gradList]
labels = ['Sobel Kernel', 'LoG Kernel', 'Square Gradient']
for i, (kernel, label) in enumerate(zip(kernels, labels)):
color = 'C{}'.format(i)
ax1.plot(
response,
kernel[:, 0],
color=color,
marker='x',
label='{} - Percent'.format(label),
)
ax2.plot(
response,
kernel[:, 2],
color=color,
marker='s',
label='{} - Clustercount'.format(label)
)
ax1.axvline(response[minThresholdPos[i]], color=color)
ax2.axhline(kernel[minThresholdPos[0], 2], color=color)
ax1.axvline(14**2 / 255**2, color='black', label='old threshold')
ax1.set_ylabel('')
ax1.legend(loc='center left')
ax2.legend(loc='upper right')
ax2.set_ylim((0, 2**14))
fig.tight_layout(pad=0)
fig.savefig(outputfile)
def plot_cloudmap_and_image(img, cloud_map, timestamp, outputfile):
fig = plt.figure()
fig.suptitle(timestamp.isoformat())
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
vmin = np.nanpercentile(img, 5.5)
vmax = np.nanpercentile(img, 99.9)
ax1.imshow(img, vmin=vmin, vmax=vmax, cmap='gray', interpolation='none')
ax2.imshow(cloud_map, cmap='gray_r', vmin=0, vmax=1)
fig.tight_layout(pad=0)
fig.savefig(outputfile, dpi=300)
def plot_cloudmap(cloud_map, outputfile):
ax = plt.subplot(1, 1, 1)
ax.imshow(cloud_map, cmap='gray_r', vmin=0, vmax=1)
ax.grid()
plt.savefig(outputfile, dpi=300)
| mit |
o5k/openerp-oemedical-v0.1 | openerp/addons/base/module/wizard/base_import_language.py | 105 | 2645 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from tempfile import TemporaryFile
from openerp import tools
from openerp.osv import osv, fields
class base_language_import(osv.osv_memory):
""" Language Import """
_name = "base.language.import"
_description = "Language Import"
_columns = {
'name': fields.char('Language Name', size=64 , required=True),
'code': fields.char('ISO Code', size=5, help="ISO Language and Country code, e.g. en_US", required=True),
'data': fields.binary('File', required=True),
'overwrite': fields.boolean('Overwrite Existing Terms',
help="If you enable this option, existing translations (including custom ones) "
"will be overwritten and replaced by those in this file"),
}
def import_lang(self, cr, uid, ids, context=None):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0])
if this.overwrite:
context.update(overwrite=True)
fileobj = TemporaryFile('w+')
try:
fileobj.write(base64.decodestring(this.data))
# now we determine the file format
fileobj.seek(0)
first_line = fileobj.readline().strip().replace('"', '').replace(' ', '')
fileformat = first_line.endswith("type,name,res_id,src,value") and 'csv' or 'po'
fileobj.seek(0)
tools.trans_load_data(cr, fileobj, fileformat, this.code, lang_name=this.name, context=context)
finally:
fileobj.close()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
loxal/zcash | qa/rpc-tests/reindex.py | 144 | 1063 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -reindex with CheckBlockIndex
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os.path
class ReindexTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir))
def run_test(self):
self.nodes[0].generate(3)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
assert_equal(self.nodes[0].getblockcount(), 3)
print "Success"
if __name__ == '__main__':
ReindexTest().main()
| mit |
SummerLW/Perf-Insight-Report | third_party/gsutil/third_party/boto/tests/integration/ec2containerservice/test_ec2containerservice.py | 99 | 1749 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.ec2containerservice.exceptions import ClientException
from tests.compat import unittest
class TestEC2ContainerService(unittest.TestCase):
def setUp(self):
self.ecs = boto.connect_ec2containerservice()
def test_list_clusters(self):
response = self.ecs.list_clusters()
self.assertIn('clusterArns',
response['ListClustersResponse']['ListClustersResult'])
def test_handle_not_found_exception(self):
with self.assertRaises(ClientException):
# Try to stop a task with an invalid arn.
self.ecs.stop_task(task='foo')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.