repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
DSMan195276/protura-binutils
|
gdb/testsuite/gdb.python/py-pp-maint.py
|
32
|
2491
|
# Copyright (C) 2010-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It tests python pretty
# printers.
import re
import gdb.types
import gdb.printing
def lookup_function_lookup_test(val):
class PrintFunctionLookup(object):
def __init__(self, val):
self.val = val
def to_string(self):
return ("x=<" + str(self.val["x"]) +
"> y=<" + str(self.val["y"]) + ">")
typename = gdb.types.get_basic_type(val.type).tag
# Note: typename could be None.
if typename == "function_lookup_test":
return PrintFunctionLookup(val)
return None
class pp_s (object):
def __init__(self, val):
self.val = val
def to_string(self):
a = self.val["a"]
b = self.val["b"]
if a.address != b:
raise Exception("&a(%s) != b(%s)" % (str(a.address), str(b)))
return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
class pp_ss (object):
def __init__(self, val):
self.val = val
def to_string(self):
return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter("pp-test")
pp.add_printer('struct s', '^struct s$', pp_s)
pp.add_printer('s', '^s$', pp_s)
# Use a lambda this time to exercise doing things this way.
pp.add_printer('struct ss', '^struct ss$', lambda val: pp_ss(val))
pp.add_printer('ss', '^ss$', lambda val: pp_ss(val))
pp.add_printer('enum flag_enum', '^flag_enum$',
gdb.printing.FlagEnumerationPrinter('enum flag_enum'))
return pp
gdb.printing.register_pretty_printer(gdb, lookup_function_lookup_test)
my_pretty_printer = build_pretty_printer()
gdb.printing.register_pretty_printer(gdb, my_pretty_printer)
|
gpl-2.0
|
2014cdbg14/2014cdbg14
|
wsgi/programs/cdbg5/remsub61.py
|
5
|
11976
|
import cherrypy
# 這是 REMSUB61 類別的定義
'''
# 在 application 中導入子模組
import programs.cdag5.remsub61 as cdag5_remsub61
# 加入 cdag5 模組下的 remsub61.py 且以子模組 remsub61 對應其 MAIN() 類別
root.cdag5.remsub61 = cdag5_remsub61.MAIN()
# 完成設定後, 可以利用
/cdag5/remsub61/assembly
# 呼叫 remsub61.py 中 MAIN 類別的 assembly 方法
'''
class MAIN(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDA 協同專案下的 cdag5 模組下的 remsub6-1.py 檔案中的 MAIN 類別.<br /><br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="assembly">執行 MAIN 類別中的 assembly 方法</a><br /><br />
請確定下列零件於 V:/home/lego/remsub6-1 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="https://copy.com/oEKNnJlWGTSV">lego_parts.7z</a><br />
'''
return outstring
@cherrypy.expose
def assembly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
</head>
<body>
</script><script language="JavaScript">
/*remsub6.py 完全利用函式呼叫進行組立*/
/*設計一個零件組立函式*/
// featID 為組立件第一個組立零件的編號
// inc 則為 part1 的組立順序編號, 第一個入組立檔編號為 featID+0
// part2 為外加的零件名稱
////////////////////////////////////////////////
// axis_plane_assembly 組立函式
////////////////////////////////////////////////
function axis_plane_assembly(session, assembly, transf, featID, inc, part2, axis1, plane1, axis2, plane2){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/remsub61/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var asmDatums = new Array(axis1, plane1);
var compDatums = new Array(axis2, plane2);
var relation = new Array (pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
}
// 以上為 axis_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly 採 align 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/remsub61/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly2 採 mate 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly2(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/remsub61/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly2() 函式, 主要採三面 MATE 組立
//
// 假如 Creo 所在的操作系統不是 Windows 環境
if (!pfcIsWindows())
// 則啟動對應的 UniversalXPConnect 執行權限 (等同 Windows 下的 ActiveX)
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// pfcGetProESession() 是位於 pfcUtils.js 中的函式, 確定此 JavaScript 是在嵌入式瀏覽器中執行
var session = pfcGetProESession();
// 設定 config option, 不要使用元件組立流程中內建的假設約束條件
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣, Pro/Web.Link 中的變數無法直接建立, 必須透過 pfcCreate() 建立
var identityMatrix = pfcCreate("pfcMatrix3D");
// 建立 identity 位置矩陣
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set(x, y, 1.0);
else
identityMatrix.Set(x, y, 0.0);
}
// 利用 identityMatrix 建立 transf 座標轉換矩陣
var transf = pfcCreate("pfcTransform3D").Create(identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔的空白組立檔案, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件, 若不符合條件則丟出錯誤訊息
if (model == void null || model.Type != pfcCreate("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
// 將此模型設為組立物件
var assembly = model;
/////////////////////////////////////////////////////////////////
// 開始執行組立, 全部採函式呼叫組立
/////////////////////////////////////////////////////////////////
// Body 與空組立檔案採三個平面約束組立
// 空組立面為 ASM_TOP, ASM_FRONT, ASM_RIGHT
// Body 組立面為 TOP, FRONT, RIGHT
// 若 featID=0 表示為空組立檔案, 而且函式會傳回第一個組立件的 featID
var featID = three_plane_assembly(session, assembly, transf, 0, 0, "BEAM_ANGLE.prt", "ASM_TOP", "ASM_FRONT", "ASM_RIGHT", "DTM2", "FRONT", "RIGHT");
alert("第一個零件特徵 ID 為:"+featID);
// BEAM_ANGLE.prt 中間面為 middle_green, 其餘定位面則為 red 與 blue
// AXLE_10.prt 中間面為 DTM1, Right 與 Front 則為定位面
// featID, 0 表示為 BEAM_ANGLE.prt 零件, "middle_green", "red", "blue" 為其定位面
// AXLE_10.prt 的定位面則為 "DTM1"(green), "RIGHT"(red), "FRONT"(blue)
three_plane_assembly(session, assembly, transf, featID, 0, "AXLE_10.prt", "MIDDLE_GREEN", "RED", "BLUE", "DTM1", "RIGHT", "FRONT");
alert("AXLE_10.prt 已經定位完成!");
// regenerate 並且 repaint 組立檔案
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
|
gpl-2.0
|
OsirisSPS/osiris-sps
|
client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/plat-irix6/cdplayer.py
|
33
|
3112
|
# This file implements a class which forms an interface to the .cdplayerrc
# file that is maintained by SGI's cdplayer program.
#
# Usage is as follows:
#
# import readcd
# r = readcd.Readcd()
# c = Cdplayer(r.gettrackinfo())
#
# Now you can use c.artist, c.title and c.track[trackno] (where trackno
# starts at 1). When the CD is not recognized, all values will be the empty
# string.
# It is also possible to set the above mentioned variables to new values.
# You can then use c.write() to write out the changed values to the
# .cdplayerrc file.
from warnings import warnpy3k
warnpy3k("the cdplayer module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
cdplayerrc = '.cdplayerrc'
class Cdplayer:
def __init__(self, tracklist):
import string
self.artist = ''
self.title = ''
if type(tracklist) == type(''):
t = []
for i in range(2, len(tracklist), 4):
t.append((None, \
(int(tracklist[i:i+2]), \
int(tracklist[i+2:i+4]))))
tracklist = t
self.track = [None] + [''] * len(tracklist)
self.id = 'd' + string.zfill(len(tracklist), 2)
for track in tracklist:
start, length = track
self.id = self.id + string.zfill(length[0], 2) + \
string.zfill(length[1], 2)
try:
import posix
f = open(posix.environ['HOME'] + '/' + cdplayerrc, 'r')
except IOError:
return
import re
reg = re.compile(r'^([^:]*):\t(.*)')
s = self.id + '.'
l = len(s)
while 1:
line = f.readline()
if line == '':
break
if line[:l] == s:
line = line[l:]
match = reg.match(line)
if not match:
print 'syntax error in ~/' + cdplayerrc
continue
name, value = match.group(1, 2)
if name == 'title':
self.title = value
elif name == 'artist':
self.artist = value
elif name[:5] == 'track':
trackno = int(name[6:])
self.track[trackno] = value
f.close()
def write(self):
import posix
filename = posix.environ['HOME'] + '/' + cdplayerrc
try:
old = open(filename, 'r')
except IOError:
old = open('/dev/null', 'r')
new = open(filename + '.new', 'w')
s = self.id + '.'
l = len(s)
while 1:
line = old.readline()
if line == '':
break
if line[:l] != s:
new.write(line)
new.write(self.id + '.title:\t' + self.title + '\n')
new.write(self.id + '.artist:\t' + self.artist + '\n')
for i in range(1, len(self.track)):
new.write('%s.track.%r:\t%s\n' % (self.id, i, self.track[i]))
old.close()
new.close()
posix.rename(filename + '.new', filename)
|
gpl-3.0
|
redhat-openstack/nova
|
tools/xenserver/destroy_cached_images.py
|
42
|
1897
|
"""
destroy_cached_images.py
This script is used to clean up Glance images that are cached in the SR. By
default, this script will only cleanup unused cached images.
Options:
--dry_run - Don't actually destroy the VDIs
--all_cached - Destroy all cached images instead of just unused cached
images.
"""
import eventlet
eventlet.monkey_patch()
import os
import sys
from oslo.config import cfg
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from nova import config
from nova import utils
from nova.virt.xenapi import driver as xenapi_driver
from nova.virt.xenapi import vm_utils
destroy_opts = [
cfg.BoolOpt('all_cached',
default=False,
help='Destroy all cached images instead of just unused cached'
' images.'),
cfg.BoolOpt('dry_run',
default=False,
help='Don\'t actually delete the VDIs.')
]
CONF = cfg.CONF
CONF.register_cli_opts(destroy_opts)
def main():
config.parse_args(sys.argv)
utils.monkey_patch()
xenapi = xenapi_driver.XenAPIDriver()
session = xenapi._session
sr_ref = vm_utils.safe_find_sr(session)
destroyed = vm_utils.destroy_cached_images(
session, sr_ref, all_cached=CONF.all_cached,
dry_run=CONF.dry_run)
if '--verbose' in sys.argv:
print '\n'.join(destroyed)
print "Destroyed %d cached VDIs" % len(destroyed)
if __name__ == "__main__":
main()
|
apache-2.0
|
dimroc/tensorflow-mnist-tutorial
|
lib/python3.6/site-packages/numpy/polynomial/chebyshev.py
|
22
|
62902
|
"""
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2:
return z1[:1]*0, z1
else:
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([ 14., 12., 24.])
>>> C.chebder(c,3)
array([ 96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([ 12., 96.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
return c
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
return c
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
c = chebval(z, c, tensor=False)
return c
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
c = chebval(z, c)
return c
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
vz = chebvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = chebvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = chebvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax + 1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = chebcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))
.. math:: w_i = \\pi / n
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
|
apache-2.0
|
nelango/ViralityAnalysis
|
model/lib/sklearn/tests/test_kernel_ridge.py
|
342
|
3027
|
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
|
mit
|
Pafcholini/emotion_tw_COI3
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
1935
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
gpl-2.0
|
wfxiang08/django197
|
django/core/management/utils.py
|
405
|
2590
|
from __future__ import unicode_literals
import os
import sys
from subprocess import PIPE, Popen
from django.utils import six
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text
from .base import CommandError
def popen_wrapper(args, os_err_exc_type=CommandError, universal_newlines=True):
"""
Friendly wrapper around Popen.
Returns stdout output, stderr output and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE,
close_fds=os.name != 'nt', universal_newlines=universal_newlines)
except OSError as e:
strerror = force_text(e.strerror, DEFAULT_LOCALE_ENCODING,
strings_only=True)
six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' %
(args[0], strerror)), sys.exc_info()[2])
output, errors = p.communicate()
return (
output,
force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True),
p.returncode
)
def handle_extensions(extensions):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, six.string_types):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
|
bsd-3-clause
|
YinongLong/scikit-learn
|
sklearn/decomposition/tests/test_kernel_pca.py
|
74
|
8472
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
|
bsd-3-clause
|
havard024/prego
|
sales/urls.py
|
3
|
7025
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Sales module URLs
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('treeio.sales.views',
url(r'^(\.(?P<response_format>\w+))?$', 'index', name='sales'),
url(r'^index(\.(?P<response_format>\w+))?/?$', 'index',
name='sales_index'),
url(r'^index/open(\.(?P<response_format>\w+))?/?$', 'index_open',
name='sales_index_open'),
url(r'^index/assigned(\.(?P<response_format>\w+))?/?$', 'index_assigned',
name='sales_index_assigned'),
# Orders
url(r'^order/add(\.(?P<response_format>\w+))?/?$', 'order_add',
name='sales_order_add'),
url(r'^order/add/lead/(?P<lead_id>\w+)(\.(?P<response_format>\w+))?/?$',
'order_add', name='sales_order_add_with_lead'),
url(r'^order/add/opportunity/(?P<opportunity_id>\w+)(\.(?P<response_format>\w+))?/?$',
'order_add', name='sales_order_add_with_opportunity'),
url(r'^order/edit/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_edit', name='sales_order_edit'),
url(r'^order/view/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_view', name='sales_order_view'),
url(r'^order/invoice/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_invoice_view', name='sales_order_invoice_view'),
url(r'^order/delete/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'order_delete', name='sales_order_delete'),
# Products
url(r'^product/index(\.(?P<response_format>\w+))?/?$',
'product_index', name='sales_product_index'),
url(r'^product/add/(?P<parent_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_add', name='sales_product_add'),
url(r'^product/add(\.(?P<response_format>\w+))?/?$',
'product_add', name='sales_product_add'),
url(r'^product/edit/(?P<product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_edit', name='sales_product_edit'),
url(r'^product/view/(?P<product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_view', name='sales_product_view'),
url(r'^product/delete/(?P<product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'product_delete', name='sales_product_delete'),
# Settings
url(r'^settings/view(\.(?P<response_format>\w+))?/?$',
'settings_view', name='sales_settings_view'),
url(r'^settings/edit(\.(?P<response_format>\w+))?/?$',
'settings_edit', name='sales_settings_edit'),
# Statuses
url(r'^status/add(\.(?P<response_format>\w+))?/?$',
'status_add', name='sales_status_add'),
url(r'^status/edit/(?P<status_id>\d+)(\.(?P<response_format>\w+))?/?$',
'status_edit', name='sales_status_edit'),
url(r'^status/view/(?P<status_id>\d+)(\.(?P<response_format>\w+))?/?$',
'status_view', name='sales_status_view'),
url(r'^status/delete/(?P<status_id>\d+)(\.(?P<response_format>\w+))?/?$',
'status_delete', name='sales_status_delete'),
# Subscriptions
url(r'^subscription/add(\.(?P<response_format>\w+))?/?$',
'subscription_add', name='sales_subscription_add'),
url(r'^subscription/add/order/(?P<order_id>\w+)/product/(?P<product_id>\w+)(\.(?P<response_format>\w+))?/?$',
'subscription_add', name='sales_subscription_add_with_order_and_product'),
url(r'^subscription/add/(?P<productset_id>\w+)(\.(?P<response_format>\w+))?/?$',
'subscription_add', name='sales_subscription_add_with_product'),
url(r'^subscription/edit/(?P<subscription_id>\d+)(\.(?P<response_format>\w+))?/?$',
'subscription_edit', name='sales_subscription_edit'),
url(r'^subscription/view/(?P<subscription_id>\d+)(\.(?P<response_format>\w+))?/?$',
'subscription_view', name='sales_subscription_view'),
url(r'^subscription/delete/(?P<subscription_id>\d+)(\.(?P<response_format>\w+))?/?$',
'subscription_delete', name='sales_subscription_delete'),
# Ordered Products
url(r'^ordered_product/add/(?P<order_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_add', name='sales_ordered_product_add'),
url(r'^ordered_product/edit/(?P<ordered_product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_edit', name='sales_ordered_product_edit'),
url(r'^ordered_product/view/(?P<ordered_product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_view', name='sales_ordered_product_view'),
url(r'^ordered_product/delete/(?P<ordered_product_id>\d+)(\.(?P<response_format>\w+))?/?$',
'ordered_product_delete', name='sales_ordered_product_delete'),
# Sources
url(r'^source/add(\.(?P<response_format>\w+))?/?$',
'source_add', name='sales_source_add'),
url(r'^source/edit/(?P<source_id>\d+)(\.(?P<response_format>\w+))?/?$',
'source_edit', name='sales_source_edit'),
url(r'^source/view/(?P<source_id>\d+)(\.(?P<response_format>\w+))?/?$',
'source_view', name='sales_source_view'),
url(r'^source/delete/(?P<source_id>\d+)(\.(?P<response_format>\w+))?/?$',
'source_delete', name='sales_source_delete'),
# Leads
url(r'^lead/index(\.(?P<response_format>\w+))?/?$',
'lead_index', name='sales_lead_index'),
url(r'^lead/index/assigned(\.(?P<response_format>\w+))?/?$',
'lead_index_assigned', name='sales_lead_index_assigned'),
url(r'^lead/add(\.(?P<response_format>\w+))?/?$',
'lead_add', name='sales_lead_add'),
url(r'^lead/edit/(?P<lead_id>\d+)(\.(?P<response_format>\w+))?/?$',
'lead_edit', name='sales_lead_edit'),
url(r'^lead/view/(?P<lead_id>\d+)(\.(?P<response_format>\w+))?/?$',
'lead_view', name='sales_lead_view'),
url(r'^lead/delete/(?P<lead_id>\d+)(\.(?P<response_format>\w+))?/?$',
'lead_delete', name='sales_lead_delete'),
# Opportunities
url(r'^opportunity/index(\.(?P<response_format>\w+))?/?$',
'opportunity_index', name='sales_opportunity_index'),
url(r'^opportunity/index/assigned(\.(?P<response_format>\w+))?/?$',
'opportunity_index_assigned', name='sales_opportunity_index_assigned'),
url(r'^opportunity/add(\.(?P<response_format>\w+))?/?$',
'opportunity_add', name='sales_opportunity_add'),
url(r'^opportunity/add/lead/(?P<lead_id>\w+)(\.(?P<response_format>\w+))?/?$',
'opportunity_add', name='sales_opportunity_add_with_lead'),
url(r'^opportunity/edit/(?P<opportunity_id>\d+)(\.(?P<response_format>\w+))?/?$',
'opportunity_edit', name='sales_opportunity_edit'),
url(r'^opportunity/view/(?P<opportunity_id>\d+)(\.(?P<response_format>\w+))?/?$',
'opportunity_view', name='sales_opportunity_view'),
url(r'^opportunity/delete/(?P<opportunity_id>\d+)(\.(?P<response_format>\w+))?/?$',
'opportunity_delete', name='sales_opportunity_delete'),
# AJAX lookups
url(r'^ajax/subscription(\.(?P<response_format>\w+))?/?$',
'ajax_subscription_lookup', name='sales_ajax_subscription_lookup'),
)
|
mit
|
denisshockwave/image_processing_ocr_server
|
venv/lib/python2.7/site-packages/pip/vendor/html5lib/serializer/htmlserializer.py
|
79
|
12467
|
from __future__ import absolute_import, division, unicode_literals
from pip.vendor.six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"minimize_boolean_attributes", "use_trailing_solidus",
"space_before_trailing_solidus", "omit_optional_tags",
"strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs",
"escape_rcdata", "resolve_entities", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# XXX: WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
|
gpl-3.0
|
azaghal/ansible
|
test/support/integration/plugins/modules/sefcontext.py
|
32
|
9795
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: sefcontext
short_description: Manages SELinux file context mapping definitions
description:
- Manages SELinux file context mapping definitions.
- Similar to the C(semanage fcontext) command.
version_added: '2.2'
options:
target:
description:
- Target path (expression).
type: str
required: yes
aliases: [ path ]
ftype:
description:
- The file type that should have SELinux contexts applied.
- "The following file type options are available:"
- C(a) for all files,
- C(b) for block devices,
- C(c) for character devices,
- C(d) for directories,
- C(f) for regular files,
- C(l) for symbolic links,
- C(p) for named pipes,
- C(s) for socket files.
type: str
choices: [ a, b, c, d, f, l, p, s ]
default: a
setype:
description:
- SELinux type for the specified target.
type: str
required: yes
seuser:
description:
- SELinux user for the specified target.
type: str
selevel:
description:
- SELinux range for the specified target.
type: str
aliases: [ serange ]
state:
description:
- Whether the SELinux file context must be C(absent) or C(present).
type: str
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
- Note that this does not apply SELinux file contexts to existing files.
type: bool
default: yes
ignore_selinux_state:
description:
- Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
type: bool
default: no
version_added: '2.8'
notes:
- The changes are persistent across reboots.
- The M(sefcontext) module does not modify existing files to the new
SELinux context(s), so it is advisable to first create the SELinux
file contexts before creating files, or run C(restorecon) manually
for the existing files that require the new SELinux file contexts.
- Not applying SELinux fcontexts to existing files is a deliberate
decision as it would be unclear what reported changes would entail
to, and there's no guarantee that applying SELinux fcontext does
not pick up other unrelated prior changes.
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Allow apache to modify files in /srv/git_repos
sefcontext:
target: '/srv/git_repos(/.*)?'
setype: httpd_git_rw_content_t
state: present
- name: Apply new SELinux file context to filesystem
command: restorecon -irv /srv/git_repos
'''
RETURN = r'''
# Default return values
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
SELINUX_IMP_ERR = None
try:
import selinux
HAVE_SELINUX = True
except ImportError:
SELINUX_IMP_ERR = traceback.format_exc()
HAVE_SELINUX = False
SEOBJECT_IMP_ERR = None
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
SEOBJECT_IMP_ERR = traceback.format_exc()
HAVE_SEOBJECT = False
# Add missing entries (backward compatible)
if HAVE_SEOBJECT:
seobject.file_types.update(
a=seobject.SEMANAGE_FCONTEXT_ALL,
b=seobject.SEMANAGE_FCONTEXT_BLOCK,
c=seobject.SEMANAGE_FCONTEXT_CHAR,
d=seobject.SEMANAGE_FCONTEXT_DIR,
f=seobject.SEMANAGE_FCONTEXT_REG,
l=seobject.SEMANAGE_FCONTEXT_LINK,
p=seobject.SEMANAGE_FCONTEXT_PIPE,
s=seobject.SEMANAGE_FCONTEXT_SOCK,
)
# Make backward compatible
option_to_file_type_str = dict(
a='all files',
b='block device',
c='character device',
d='directory',
f='regular file',
l='symbolic link',
p='named pipe',
s='socket',
)
def get_runtime_status(ignore_selinux_state=False):
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
def semanage_fcontext_exists(sefcontext, target, ftype):
''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
# Beware that records comprise of a string representation of the file_type
record = (target, option_to_file_type_str[ftype])
records = sefcontext.get_all()
try:
return records[record]
except KeyError:
return None
def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
''' Add or modify SELinux file context mapping definition to the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Modify existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if seuser is None:
seuser = orig_seuser
if serange is None:
serange = orig_serange
if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
if not module.check_mode:
sefcontext.modify(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Change to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
else:
# Add missing entry
if seuser is None:
seuser = 'system_u'
if serange is None:
serange = 's0'
if not module.check_mode:
sefcontext.add(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Addition to semanage file context mappings\n'
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
except Exception as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
''' Delete SELinux file context mapping definition from the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Remove existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if not module.check_mode:
sefcontext.delete(target, ftype)
changed = True
if module._diff:
prepared_diff += '# Deletion to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
except Exception as e:
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, **result)
def main():
module = AnsibleModule(
argument_spec=dict(
ignore_selinux_state=dict(type='bool', default=False),
target=dict(type='str', required=True, aliases=['path']),
ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()),
setype=dict(type='str', required=True),
seuser=dict(type='str'),
selevel=dict(type='str', aliases=['serange']),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
if not HAVE_SEOBJECT:
module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
ignore_selinux_state = module.params['ignore_selinux_state']
if not get_runtime_status(ignore_selinux_state):
module.fail_json(msg="SELinux is disabled on this host.")
target = module.params['target']
ftype = module.params['ftype']
setype = module.params['setype']
seuser = module.params['seuser']
serange = module.params['selevel']
state = module.params['state']
do_reload = module.params['reload']
result = dict(target=target, ftype=ftype, setype=setype, state=state)
if state == 'present':
semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
elif state == 'absent':
semanage_fcontext_delete(module, result, target, ftype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
if __name__ == '__main__':
main()
|
gpl-3.0
|
CSF-JH/crossbarexamples
|
work/call_from_handler/callee.py
|
11
|
2237
|
###############################################################################
##
## Copyright (C) 2014-2015, Tavendo GmbH and/or collaborators. All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
##
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
##
###############################################################################
from twisted.internet.defer import inlineCallbacks, returnValue
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
class MyCallee(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
## REGISTER a procedure for remote calling
##
@inlineCallbacks
def slowsquare(x):
print("slowsquare() called with {}".format(x))
yield sleep(3)
returnValue(x * x)
reg = yield self.register(slowsquare, 'com.example.slowsquare')
print("procedure slowsquare() registered")
|
apache-2.0
|
eaplatanios/tensorflow
|
tensorflow/python/eager/graph_callable_test.py
|
31
|
8775
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import graph_callable
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class GraphCallableTest(test.TestCase):
def testBasic(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.float32)])
def my_function(x):
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=())
return v + x
self.assertEqual(
2, my_function(constant_op.constant(2, dtype=dtypes.float32)).numpy())
my_function.variables[0].assign(1.)
self.assertEqual(
3, my_function(constant_op.constant(2, dtype=dtypes.float32)).numpy())
def testFunctionWithoutReturnValue(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.float32)])
def my_function(x):
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=())
v.assign(x)
my_function(constant_op.constant(4, dtype=dtypes.float32))
self.assertAllEqual(4, my_function.variables[0].read_value())
def testFunctionWithoutReturnValueAndArgs(self):
@graph_callable.graph_callable([])
def my_function():
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=())
v.assign(4)
my_function()
self.assertAllEqual(4, my_function.variables[0].read_value())
def testVariableAPI(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.float32)])
def my_function(x):
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=())
return v.read_value() + x
self.assertEqual(
2, my_function(constant_op.constant(2, dtype=dtypes.float32)).numpy())
my_function.variables[0].assign(1.)
self.assertEqual(
3, my_function(constant_op.constant(2, dtype=dtypes.float32)).numpy())
def testTensorShape(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(1), dtype=dtypes.float32)])
def my_function(x):
_ = x.get_shape()
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=[x.shape[0]])
self.assertEqual(v.shape[0], x.shape[0])
return v + x
self.assertEqual([2.],
my_function(
constant_op.constant([2.],
dtype=dtypes.float32)).numpy())
def testUpdatesAreOrdered(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.float32)])
def my_function(x):
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=())
v.assign(x + 1)
v.assign(v * x)
return v.read_value()
self.assertAllEqual(my_function(constant_op.constant(2.0)), 6.0)
def testEmptyInitializer(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(1), dtype=dtypes.float32)])
def my_function(x):
v = variable_scope.get_variable("v", shape=[1])
return x + 0 * v
self.assertEqual([2.],
my_function(
constant_op.constant([2.],
dtype=dtypes.float32)).numpy())
def testMismatchingNumArgs(self):
# pylint: disable=anomalous-backslash-in-string
with self.assertRaisesRegexp(TypeError,
"The number of arguments accepted by the "
"decorated function `my_function` \(2\) must "
"match the number of ShapeAndDtype objects "
"passed to the graph_callable\(\) decorator "
"\(1\)."):
@graph_callable.graph_callable([
graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.float32)])
def my_function(x, y): # pylint: disable=unused-variable
return x + y
# pylint: enable=anomalous-backslash-in-string
def testPureFunction(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.int32)])
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testNestedFunction(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
# A graph_callable that will invoke the TensorFlow function.
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.int32)])
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
# TODO(ashankar): Make this work.
# The problem is that the two graph_callables (for add_one and add_two)
# are both trying to register the FunctionDef corresponding to "add".
def DISABLED_testRepeatedUseOfSubFunction(self):
@function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.int32)])
def add_one(x):
return add(x, 1)
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.int32)])
def add_two(x):
return add(x, 2)
two = constant_op.constant(2)
self.assertAllEqual(3, add_one(two))
self.assertAllEqual(4, add_two(two))
def testNestedSequenceInputs(self):
sd = graph_callable.ShapeAndDtype(shape=(), dtype=dtypes.float32)
@graph_callable.graph_callable([[sd, tuple([sd, sd]), sd]])
def my_op(inputs):
a, b, c = inputs
e, f = b
v = variable_scope.get_variable(
"my_v", initializer=init_ops.zeros_initializer(), shape=())
return [a + a + v, tuple([e + e, f + f]), c + c], a + e + f + c + v
inputs = [constant_op.constant(1.),
[constant_op.constant(2.), constant_op.constant(3.)],
constant_op.constant(4.)]
ret = my_op(inputs)
self.assertEqual(len(ret), 2.)
self.assertAllEqual(ret[1], 10.)
my_op.variables[0].assign(1.)
ret = my_op(inputs)
self.assertAllEqual(ret[1], 11.)
def testVariableShapeIsTensorShape(self):
@graph_callable.graph_callable([])
def my_function():
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=())
self.assertIsInstance(v.get_shape(), tensor_shape.TensorShape)
my_function()
def testIncorrectlyShapedInputs(self):
@graph_callable.graph_callable(
[graph_callable.ShapeAndDtype(shape=(3), dtype=dtypes.float32)])
def my_function(x):
v = variable_scope.get_variable(
"v", initializer=init_ops.zeros_initializer(), shape=())
return v + x
with self.assertRaises(ValueError):
my_function([1, 2])
self.assertTrue(([1, 2, 3] == my_function(
constant_op.constant([1, 2, 3], dtype=dtypes.float32)).numpy()).all())
def testGradients(self):
@graph_callable.graph_callable([])
def my_function():
v = variable_scope.get_variable(
"v", initializer=init_ops.constant_initializer(3.), shape=())
return v * v
grad_fn = backprop.implicit_grad(my_function)
grads_and_vars = list(zip(*grad_fn()))
self.assertAllEqual(6., grads_and_vars[0][0])
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Belxjander/Kirito
|
Python-3.5.0-Amiga/Lib/test/test_cprofile.py
|
8
|
5557
|
"""Test suite for the cProfile module."""
import sys
from test.support import run_unittest, TESTFN, unlink
# rip off all interesting stuff from test_profile
import cProfile
from test.test_profile import ProfileTest, regenerate_expected_output
from test.profilee import testfunc
class CProfileTest(ProfileTest):
profilerclass = cProfile.Profile
profilermodule = cProfile
expected_max_output = "{built-in method builtins.max}"
def get_expected_output(self):
return _ProfileOutput
# Issue 3895.
def test_bad_counter_during_dealloc(self):
import _lsprof
# Must use a file as StringIO doesn't trigger the bug.
orig_stderr = sys.stderr
try:
with open(TESTFN, 'w') as file:
sys.stderr = file
try:
obj = _lsprof.Profiler(lambda: int)
obj.enable()
obj = _lsprof.Profiler(1)
obj.disable()
obj.clear()
finally:
sys.stderr = orig_stderr
finally:
unlink(TESTFN)
def test_main():
run_unittest(CProfileTest)
def main():
if '-r' not in sys.argv:
test_main()
else:
regenerate_expected_output(__file__, CProfileTest)
# Don't remove this comment. Everything below it is auto-generated.
#--cut--------------------------------------------------------------------------
_ProfileOutput = {}
_ProfileOutput['print_stats'] = """\
28 0.028 0.001 0.028 0.001 profilee.py:110(__getattr__)
1 0.270 0.270 1.000 1.000 profilee.py:25(testfunc)
23/3 0.150 0.007 0.170 0.057 profilee.py:35(factorial)
20 0.020 0.001 0.020 0.001 profilee.py:48(mul)
2 0.040 0.020 0.600 0.300 profilee.py:55(helper)
4 0.116 0.029 0.120 0.030 profilee.py:73(helper1)
2 0.000 0.000 0.140 0.070 profilee.py:84(helper2_indirect)
8 0.312 0.039 0.400 0.050 profilee.py:88(helper2)
8 0.064 0.008 0.080 0.010 profilee.py:98(subhelper)"""
_ProfileOutput['print_callers'] = """\
profilee.py:110(__getattr__) <- 16 0.016 0.016 profilee.py:98(subhelper)
profilee.py:25(testfunc) <- 1 0.270 1.000 <string>:1(<module>)
profilee.py:35(factorial) <- 1 0.014 0.130 profilee.py:25(testfunc)
20/3 0.130 0.147 profilee.py:35(factorial)
2 0.006 0.040 profilee.py:84(helper2_indirect)
profilee.py:48(mul) <- 20 0.020 0.020 profilee.py:35(factorial)
profilee.py:55(helper) <- 2 0.040 0.600 profilee.py:25(testfunc)
profilee.py:73(helper1) <- 4 0.116 0.120 profilee.py:55(helper)
profilee.py:84(helper2_indirect) <- 2 0.000 0.140 profilee.py:55(helper)
profilee.py:88(helper2) <- 6 0.234 0.300 profilee.py:55(helper)
2 0.078 0.100 profilee.py:84(helper2_indirect)
profilee.py:98(subhelper) <- 8 0.064 0.080 profilee.py:88(helper2)
{built-in method builtins.hasattr} <- 4 0.000 0.004 profilee.py:73(helper1)
8 0.000 0.008 profilee.py:88(helper2)
{built-in method sys.exc_info} <- 4 0.000 0.000 profilee.py:73(helper1)
{method 'append' of 'list' objects} <- 4 0.000 0.000 profilee.py:73(helper1)"""
_ProfileOutput['print_callees'] = """\
<string>:1(<module>) -> 1 0.270 1.000 profilee.py:25(testfunc)
profilee.py:110(__getattr__) ->
profilee.py:25(testfunc) -> 1 0.014 0.130 profilee.py:35(factorial)
2 0.040 0.600 profilee.py:55(helper)
profilee.py:35(factorial) -> 20/3 0.130 0.147 profilee.py:35(factorial)
20 0.020 0.020 profilee.py:48(mul)
profilee.py:48(mul) ->
profilee.py:55(helper) -> 4 0.116 0.120 profilee.py:73(helper1)
2 0.000 0.140 profilee.py:84(helper2_indirect)
6 0.234 0.300 profilee.py:88(helper2)
profilee.py:73(helper1) -> 4 0.000 0.004 {built-in method builtins.hasattr}
profilee.py:84(helper2_indirect) -> 2 0.006 0.040 profilee.py:35(factorial)
2 0.078 0.100 profilee.py:88(helper2)
profilee.py:88(helper2) -> 8 0.064 0.080 profilee.py:98(subhelper)
profilee.py:98(subhelper) -> 16 0.016 0.016 profilee.py:110(__getattr__)
{built-in method builtins.hasattr} -> 12 0.012 0.012 profilee.py:110(__getattr__)"""
if __name__ == "__main__":
main()
|
gpl-3.0
|
gxx/lettuce
|
tests/integration/lib/Django-1.3/tests/modeltests/custom_columns/tests.py
|
92
|
2224
|
from django.core.exceptions import FieldError
from django.test import TestCase
from models import Author, Article
class CustomColumnsTests(TestCase):
def test_db_column(self):
a1 = Author.objects.create(first_name="John", last_name="Smith")
a2 = Author.objects.create(first_name="Peter", last_name="Jones")
art = Article.objects.create(headline="Django lets you build Web apps easily")
art.authors = [a1, a2]
# Although the table and column names on Author have been set to custom
# values, nothing about using the Author model has changed...
# Query the available authors
self.assertQuerysetEqual(
Author.objects.all(), [
"Peter Jones", "John Smith",
],
unicode
)
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact="John"), [
"John Smith",
],
unicode
)
self.assertEqual(
Author.objects.get(first_name__exact="John"),
a1,
)
self.assertRaises(FieldError,
lambda: Author.objects.filter(firstname__exact="John")
)
a = Author.objects.get(last_name__exact="Smith")
a.first_name = "John"
a.last_name = "Smith"
self.assertRaises(AttributeError, lambda: a.firstname)
self.assertRaises(AttributeError, lambda: a.last)
# Although the Article table uses a custom m2m table,
# nothing about using the m2m relationship has changed...
# Get all the authors for an article
self.assertQuerysetEqual(
art.authors.all(), [
"Peter Jones",
"John Smith",
],
unicode
)
# Get the articles for an author
self.assertQuerysetEqual(
a.article_set.all(), [
"Django lets you build Web apps easily",
],
lambda a: a.headline
)
# Query the authors across the m2m relation
self.assertQuerysetEqual(
art.authors.filter(last_name='Jones'), [
"Peter Jones"
],
unicode
)
|
gpl-3.0
|
konstruktoid/ansible-upstream
|
test/units/modules/network/f5/test_bigip_sys_db.py
|
27
|
4070
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_sys_db import Parameters
from library.modules.bigip_sys_db import ModuleManager
from library.modules.bigip_sys_db import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_sys_db import Parameters
from ansible.modules.network.f5.bigip_sys_db import ModuleManager
from ansible.modules.network.f5.bigip_sys_db import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
key='foo',
value='bar',
password='password',
server='localhost',
user='admin'
)
p = Parameters(params=args)
assert p.key == 'foo'
assert p.value == 'bar'
def test_api_parameters(self):
args = dict(
key='foo',
value='bar',
password='password',
server='localhost',
defaultValue='baz',
user='admin'
)
p = Parameters(params=args)
assert p.key == 'foo'
assert p.value == 'bar'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_blackhole(self, *args):
set_module_args(dict(
key='provision.cpu.afm',
value='1',
password='admin',
server='localhost',
user='admin',
state='present'
))
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
dict(
kind="tm:sys:db:dbstate",
name="provision.cpu.afm",
fullPath="provision.cpu.afm",
generation=1,
selfLink="https://localhost/mgmt/tm/sys/db/provision.cpu.afm?ver=11.6.1",
defaultValue="0",
scfConfig="false",
value="0",
valueRange="integer min:0 max:100"
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
gpl-3.0
|
jolevq/odoopub
|
addons/base_import/models.py
|
90
|
13824
|
import csv
import itertools
import logging
import operator
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import psycopg2
from openerp.osv import orm, fields
from openerp.tools.translate import _
FIELDS_RECURSION_LIMIT = 2
ERROR_PREVIEW_BYTES = 200
_logger = logging.getLogger(__name__)
class ir_import(orm.TransientModel):
_name = 'base_import.import'
# allow imports to survive for 12h in case user is slow
_transient_max_hours = 12.0
_columns = {
'res_model': fields.char('Model'),
'file': fields.binary(
'File', help="File to check and/or import, raw binary (not base64)"),
'file_name': fields.char('File Name'),
'file_type': fields.char('File Type'),
}
def get_fields(self, cr, uid, model, context=None,
depth=FIELDS_RECURSION_LIMIT):
""" Recursively get fields for the provided model (through
fields_get) and filter them according to importability
The output format is a list of ``Field``, with ``Field``
defined as:
.. class:: Field
.. attribute:: id (str)
A non-unique identifier for the field, used to compute
the span of the ``required`` attribute: if multiple
``required`` fields have the same id, only one of them
is necessary.
.. attribute:: name (str)
The field's logical (Odoo) name within the scope of
its parent.
.. attribute:: string (str)
The field's human-readable name (``@string``)
.. attribute:: required (bool)
Whether the field is marked as required in the
model. Clients must provide non-empty import values
for all required fields or the import will error out.
.. attribute:: fields (list(Field))
The current field's subfields. The database and
external identifiers for m2o and m2m fields; a
filtered and transformed fields_get for o2m fields (to
a variable depth defined by ``depth``).
Fields with no sub-fields will have an empty list of
sub-fields.
:param str model: name of the model to get fields form
:param int landing: depth of recursion into o2m fields
"""
model_obj = self.pool[model]
fields = [{
'id': 'id',
'name': 'id',
'string': _("External ID"),
'required': False,
'fields': [],
}]
fields_got = model_obj.fields_get(cr, uid, context=context)
blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD]
for name, field in fields_got.iteritems():
if name in blacklist:
continue
# an empty string means the field is deprecated, @deprecated must
# be absent or False to mean not-deprecated
if field.get('deprecated', False) is not False:
continue
if field.get('readonly'):
states = field.get('states')
if not states:
continue
# states = {state: [(attr, value), (attr2, value2)], state2:...}
if not any(attr == 'readonly' and value is False
for attr, value in itertools.chain.from_iterable(
states.itervalues())):
continue
f = {
'id': name,
'name': name,
'string': field['string'],
# Y U NO ALWAYS HAS REQUIRED
'required': bool(field.get('required')),
'fields': [],
}
if field['type'] in ('many2many', 'many2one'):
f['fields'] = [
dict(f, name='id', string=_("External ID")),
dict(f, name='.id', string=_("Database ID")),
]
elif field['type'] == 'one2many' and depth:
f['fields'] = self.get_fields(
cr, uid, field['relation'], context=context, depth=depth-1)
if self.pool['res.users'].has_group(cr, uid, 'base.group_no_one'):
f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []})
fields.append(f)
# TODO: cache on model?
return fields
def _read_csv(self, record, options):
""" Returns a CSV-parsed iterator of all empty lines in the file
:throws csv.Error: if an error is detected during CSV parsing
:throws UnicodeDecodeError: if ``options.encoding`` is incorrect
"""
csv_iterator = csv.reader(
StringIO(record.file),
quotechar=str(options['quoting']),
delimiter=str(options['separator']))
csv_nonempty = itertools.ifilter(None, csv_iterator)
# TODO: guess encoding with chardet? Or https://github.com/aadsm/jschardet
encoding = options.get('encoding', 'utf-8')
return itertools.imap(
lambda row: [item.decode(encoding) for item in row],
csv_nonempty)
def _match_header(self, header, fields, options):
""" Attempts to match a given header to a field of the
imported model.
:param str header: header name from the CSV file
:param fields:
:param dict options:
:returns: an empty list if the header couldn't be matched, or
all the fields to traverse
:rtype: list(Field)
"""
for field in fields:
# FIXME: should match all translations & original
# TODO: use string distance (levenshtein? hamming?)
if header == field['name'] \
or header.lower() == field['string'].lower():
return [field]
if '/' not in header:
return []
# relational field path
traversal = []
subfields = fields
# Iteratively dive into fields tree
for section in header.split('/'):
# Strip section in case spaces are added around '/' for
# readability of paths
match = self._match_header(section.strip(), subfields, options)
# Any match failure, exit
if not match: return []
# prep subfields for next iteration within match[0]
field = match[0]
subfields = field['fields']
traversal.append(field)
return traversal
def _match_headers(self, rows, fields, options):
""" Attempts to match the imported model's fields to the
titles of the parsed CSV file, if the file is supposed to have
headers.
Will consume the first line of the ``rows`` iterator.
Returns a pair of (None, None) if headers were not requested
or the list of headers and a dict mapping cell indices
to key paths in the ``fields`` tree
:param Iterator rows:
:param dict fields:
:param dict options:
:rtype: (None, None) | (list(str), dict(int: list(str)))
"""
if not options.get('headers'):
return None, None
headers = next(rows)
return headers, dict(
(index, [field['name'] for field in self._match_header(header, fields, options)] or None)
for index, header in enumerate(headers)
)
def parse_preview(self, cr, uid, id, options, count=10, context=None):
""" Generates a preview of the uploaded files, and performs
fields-matching between the import's file data and the model's
columns.
If the headers are not requested (not options.headers),
``matches`` and ``headers`` are both ``False``.
:param id: identifier of the import
:param int count: number of preview lines to generate
:param options: format-specific options.
CSV: {encoding, quoting, separator, headers}
:type options: {str, str, str, bool}
:returns: {fields, matches, headers, preview} | {error, preview}
:rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str}
"""
(record,) = self.browse(cr, uid, [id], context=context)
fields = self.get_fields(cr, uid, record.res_model, context=context)
try:
rows = self._read_csv(record, options)
headers, matches = self._match_headers(rows, fields, options)
# Match should have consumed the first row (iif headers), get
# the ``count`` next rows for preview
preview = list(itertools.islice(rows, count))
assert preview, "CSV file seems to have no content"
return {
'fields': fields,
'matches': matches or False,
'headers': headers or False,
'preview': preview,
}
except Exception, e:
# Due to lazy generators, UnicodeDecodeError (for
# instance) may only be raised when serializing the
# preview to a list in the return.
_logger.debug("Error during CSV parsing preview", exc_info=True)
return {
'error': str(e),
# iso-8859-1 ensures decoding will always succeed,
# even if it yields non-printable characters. This is
# in case of UnicodeDecodeError (or csv.Error
# compounded with UnicodeDecodeError)
'preview': record.file[:ERROR_PREVIEW_BYTES]
.decode( 'iso-8859-1'),
}
def _convert_import_data(self, record, fields, options, context=None):
""" Extracts the input browse_record and fields list (with
``False``-y placeholders for fields to *not* import) into a
format Model.import_data can use: a fields list without holes
and the precisely matching data matrix
:param browse_record record:
:param list(str|bool): fields
:returns: (data, fields)
:rtype: (list(list(str)), list(str))
:raises ValueError: in case the import data could not be converted
"""
# Get indices for non-empty fields
indices = [index for index, field in enumerate(fields) if field]
if not indices:
raise ValueError(_("You must configure at least one field to import"))
# If only one index, itemgetter will return an atom rather
# than a 1-tuple
if len(indices) == 1: mapper = lambda row: [row[indices[0]]]
else: mapper = operator.itemgetter(*indices)
# Get only list of actually imported fields
import_fields = filter(None, fields)
rows_to_import = self._read_csv(record, options)
if options.get('headers'):
rows_to_import = itertools.islice(
rows_to_import, 1, None)
data = [
row for row in itertools.imap(mapper, rows_to_import)
# don't try inserting completely empty rows (e.g. from
# filtering out o2m fields)
if any(row)
]
return data, import_fields
def do(self, cr, uid, id, fields, options, dryrun=False, context=None):
""" Actual execution of the import
:param fields: import mapping: maps each column to a field,
``False`` for the columns to ignore
:type fields: list(str|bool)
:param dict options:
:param bool dryrun: performs all import operations (and
validations) but rollbacks writes, allows
getting as much errors as possible without
the risk of clobbering the database.
:returns: A list of errors. If the list is empty the import
executed fully and correctly. If the list is
non-empty it contains dicts with 3 keys ``type`` the
type of error (``error|warning``); ``message`` the
error message associated with the error (a string)
and ``record`` the data which failed to import (or
``false`` if that data isn't available or provided)
:rtype: list({type, message, record})
"""
cr.execute('SAVEPOINT import')
(record,) = self.browse(cr, uid, [id], context=context)
try:
data, import_fields = self._convert_import_data(
record, fields, options, context=context)
except ValueError, e:
return [{
'type': 'error',
'message': unicode(e),
'record': False,
}]
_logger.info('importing %d rows...', len(data))
import_result = self.pool[record.res_model].load(
cr, uid, import_fields, data, context=context)
_logger.info('done')
# If transaction aborted, RELEASE SAVEPOINT is going to raise
# an InternalError (ROLLBACK should work, maybe). Ignore that.
# TODO: to handle multiple errors, create savepoint around
# write and release it in case of write error (after
# adding error to errors array) => can keep on trying to
# import stuff, and rollback at the end if there is any
# error in the results.
try:
if dryrun:
cr.execute('ROLLBACK TO SAVEPOINT import')
else:
cr.execute('RELEASE SAVEPOINT import')
except psycopg2.InternalError:
pass
return import_result['messages']
|
agpl-3.0
|
harshilasu/GraphicMelon
|
y/google-cloud-sdk/lib/googlecloudapis/apitools/base/py/transfer.py
|
5
|
25853
|
"""Upload and download support for apitools."""
import email.generator as email_generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import httplib
import io
import json
import mimetypes
import os
import StringIO
import threading
from googlecloudapis.apitools.base.py import exceptions
from googlecloudapis.apitools.base.py import http_wrapper
from googlecloudapis.apitools.base.py import util
__all__ = [
'Download',
'Upload',
]
_RESUMABLE_UPLOAD_THRESHOLD = 5 << 20
_SIMPLE_UPLOAD = 'simple'
_RESUMABLE_UPLOAD = 'resumable'
class _Transfer(object):
"""Generic bits common to Uploads and Downloads."""
def __init__(self, stream, close_stream=False, chunksize=None,
auto_transfer=True, http=None):
self.__bytes_http = None
self.__close_stream = close_stream
self.__http = http
self.__stream = stream
self.__url = None
self.auto_transfer = auto_transfer
self.chunksize = chunksize or 1048576L
def __repr__(self):
return str(self)
@property
def close_stream(self):
return self.__close_stream
@property
def http(self):
return self.__http
@property
def bytes_http(self):
return self.__bytes_http or self.http
@bytes_http.setter
def bytes_http(self, value):
self.__bytes_http = value
@property
def stream(self):
return self.__stream
@property
def url(self):
return self.__url
def _Initialize(self, http, url):
"""Initialize this download by setting self.http and self.url.
We want the user to be able to override self.http by having set
the value in the constructor; in that case, we ignore the provided
http.
Args:
http: An httplib2.Http instance or None.
url: The url for this transfer.
Returns:
None. Initializes self.
"""
self.EnsureUninitialized()
if self.http is None:
self.__http = http or http_wrapper.GetHttp()
self.__url = url
@property
def initialized(self):
return self.url is not None and self.http is not None
@property
def _type_name(self):
return type(self).__name__
def EnsureInitialized(self):
if not self.initialized:
raise exceptions.TransferInvalidError(
'Cannot use uninitialized %s', self._type_name)
def EnsureUninitialized(self):
if self.initialized:
raise exceptions.TransferInvalidError(
'Cannot re-initialize %s', self._type_name)
def __del__(self):
if self.__close_stream:
self.__stream.close()
def _ExecuteCallback(self, callback, response):
# TODO(user): Push these into a queue.
if callback is not None:
threading.Thread(target=callback, args=(response, self)).start()
class Download(_Transfer):
"""Data for a single download.
Public attributes:
chunksize: default chunksize to use for transfers.
"""
_ACCEPTABLE_STATUSES = set((
httplib.OK,
httplib.NO_CONTENT,
httplib.PARTIAL_CONTENT,
httplib.REQUESTED_RANGE_NOT_SATISFIABLE,
))
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'progress', 'total_size', 'url'))
def __init__(self, *args, **kwds):
super(Download, self).__init__(*args, **kwds)
self.__initial_response = None
self.__progress = 0
self.__total_size = None
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, overwrite=False, auto_transfer=True):
"""Create a new download object from a filename."""
path = os.path.expanduser(filename)
if os.path.exists(path) and not overwrite:
raise exceptions.InvalidUserInputError(
'File %s exists and overwrite not specified' % path)
return cls(open(path, 'wb'), close_stream=True, auto_transfer=auto_transfer)
@classmethod
def FromStream(cls, stream, auto_transfer=True):
"""Create a new Download object from a stream."""
return cls(stream, auto_transfer=auto_transfer)
@classmethod
def FromData(cls, stream, json_data, http=None, auto_transfer=None):
"""Create a new Download object from a stream and serialized data."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
download = cls.FromStream(stream)
if auto_transfer is not None:
download.auto_transfer = auto_transfer
else:
download.auto_transfer = info['auto_transfer']
setattr(download, '_Download__progress', info['progress'])
setattr(download, '_Download__total_size', info['total_size'])
download._Initialize(http, info['url']) # pylint: disable=protected-access
return download
@property
def serialization_data(self):
self.EnsureInitialized()
return {
'auto_transfer': self.auto_transfer,
'progress': self.progress,
'total_size': self.total_size,
'url': self.url,
}
@property
def total_size(self):
return self.__total_size
def __str__(self):
if not self.initialized:
return 'Download (uninitialized)'
else:
return 'Download with %d/%s bytes transferred from url %s' % (
self.progress, self.total_size, self.url)
def ConfigureRequest(self, http_request, url_builder):
url_builder.query_params['alt'] = 'media'
http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,)
def __SetTotal(self, info):
if 'content-range' in info:
_, _, total = info['content-range'].rpartition('/')
if total != '*':
self.__total_size = int(total)
# Note "total_size is None" means we don't know it; if no size
# info was returned on our initial range request, that means we
# have a 0-byte file. (That last statement has been verified
# empirically, but is not clearly documented anywhere.)
if self.total_size is None:
self.__total_size = 0
def InitializeDownload(self, http_request, http=None, client=None):
"""Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http will be used instead.
"""
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
response = http_wrapper.MakeRequest(self.bytes_http or http, http_request)
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.HttpError.FromResponse(response)
self.__initial_response = response
self.__SetTotal(response.info)
url = response.info.get('content-location', response.request_url)
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
self.StreamInChunks()
@staticmethod
def _ArgPrinter(response, unused_download):
if 'content-range' in response.info:
print 'Received %s' % response.info['content-range']
else:
print 'Received %d bytes' % len(response)
@staticmethod
def _CompletePrinter(*unused_args):
print 'Download complete'
def __NormalizeStartEnd(self, start, end=None):
if end is not None:
if start < 0:
raise exceptions.TransferInvalidError(
'Cannot have end index with negative start index')
elif start >= self.total_size:
raise exceptions.TransferInvalidError(
'Cannot have start index greater than total size')
end = min(end, self.total_size - 1)
if end < start:
raise exceptions.TransferInvalidError(
'Range requested with end[%s] < start[%s]' % (end, start))
return start, end
else:
if start < 0:
start = max(0, start + self.total_size)
return start, self.total_size
def __SetRangeHeader(self, request, start, end=None):
if start < 0:
request.headers['range'] = 'bytes=%d' % start
elif end is None:
request.headers['range'] = 'bytes=%d-' % start
else:
request.headers['range'] = 'bytes=%d-%d' % (start, end)
def __GetChunk(self, start, end=None, additional_headers=None):
"""Retrieve a chunk, and return the full response."""
self.EnsureInitialized()
end_byte = min(end or start + self.chunksize, self.total_size)
request = http_wrapper.Request(url=self.url)
self.__SetRangeHeader(request, start, end=end_byte)
if additional_headers is not None:
request.headers.update(additional_headers)
return http_wrapper.MakeRequest(self.bytes_http, request)
def __ProcessResponse(self, response):
"""Process this response (by updating self and writing to self.stream)."""
if response.status_code not in self._ACCEPTABLE_STATUSES:
raise exceptions.TransferInvalidError(response.content)
if response.status_code in (httplib.OK, httplib.PARTIAL_CONTENT):
self.stream.write(response.content)
self.__progress += len(response)
elif response.status_code == httplib.NO_CONTENT:
# It's important to write something to the stream for the case
# of a 0-byte download to a file, as otherwise python won't
# create the file.
self.stream.write('')
return response
def GetRange(self, start, end=None, additional_headers=None):
"""Retrieve a given byte range from this download, inclusive.
Range must be of one of these three forms:
* 0 <= start, end = None: Fetch from start to the end of the file.
* 0 <= start <= end: Fetch the bytes from start to end.
* start < 0, end = None: Fetch the last -start bytes of the file.
(These variations correspond to those described in the HTTP 1.1
protocol for range headers in RFC 2616, sec. 14.35.1.)
Args:
start: (int) Where to start fetching bytes. (See above.)
end: (int, optional) Where to stop fetching bytes. (See above.)
additional_headers: (bool, optional) Any additional headers to
pass with the request.
Returns:
None. Streams bytes into self.stream.
"""
self.EnsureInitialized()
progress, end = self.__NormalizeStartEnd(start, end)
while progress < end:
chunk_end = min(progress + self.chunksize, end)
response = self.__GetChunk(progress, end=chunk_end,
additional_headers=additional_headers)
response = self.__ProcessResponse(response)
progress += len(response)
if not response:
raise exceptions.TransferInvalidError(
'Zero bytes unexpectedly returned in download response')
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Stream the entire download."""
callback = callback or self._ArgPrinter
finish_callback = finish_callback or self._CompletePrinter
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
response = self.__GetChunk(self.progress,
additional_headers=additional_headers)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == httplib.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response)
class Upload(_Transfer):
"""Data for a single Upload.
Fields:
stream: The stream to upload.
mime_type: MIME type of the upload.
total_size: (optional) Total upload size for the stream.
close_stream: (default: False) Whether or not we should close the
stream when finished with the upload.
auto_transfer: (default: True) If True, stream all bytes as soon as
the upload is created.
"""
_REQUIRED_SERIALIZATION_KEYS = set((
'auto_transfer', 'mime_type', 'total_size', 'url'))
def __init__(self, stream, mime_type, total_size=None, http=None,
close_stream=False, chunksize=None, auto_transfer=True):
super(Upload, self).__init__(
stream, close_stream=close_stream, chunksize=chunksize,
auto_transfer=auto_transfer, http=http)
self.__complete = False
self.__mime_type = mime_type
self.__progress = 0
self.__server_chunk_granularity = None
self.__strategy = None
self.total_size = total_size
@property
def progress(self):
return self.__progress
@classmethod
def FromFile(cls, filename, mime_type=None, auto_transfer=True):
"""Create a new Upload object from a filename."""
path = os.path.expanduser(filename)
if not os.path.exists(path):
raise exceptions.NotFoundError('Could not find file %s' % path)
if not mime_type:
mime_type, _ = mimetypes.guess_type(path)
if mime_type is None:
raise exceptions.InvalidUserInputError(
'Could not guess mime type for %s' % path)
size = os.stat(path).st_size
return cls(open(path, 'rb'), mime_type, total_size=size, close_stream=True,
auto_transfer=auto_transfer)
@classmethod
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True):
"""Create a new Upload object from a stream."""
if mime_type is None:
raise exceptions.InvalidUserInputError(
'No mime_type specified for stream')
return cls(stream, mime_type, total_size=total_size, close_stream=False,
auto_transfer=auto_transfer)
@classmethod
def FromData(cls, stream, json_data, http, auto_transfer=None):
"""Create a new Upload of stream from serialized json_data using http."""
info = json.loads(json_data)
missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys())
if missing_keys:
raise exceptions.InvalidDataError(
'Invalid serialization data, missing keys: %s' % (
', '.join(missing_keys)))
upload = cls.FromStream(stream, info['mime_type'],
total_size=info.get('total_size'))
if isinstance(stream, io.IOBase) and not stream.seekable():
raise exceptions.InvalidUserInputError(
'Cannot restart resumable upload on non-seekable stream')
if auto_transfer is not None:
upload.auto_transfer = auto_transfer
else:
upload.auto_transfer = info['auto_transfer']
upload.strategy = _RESUMABLE_UPLOAD
upload._Initialize(http, info['url']) # pylint: disable=protected-access
upload._RefreshResumableUploadState() # pylint: disable=protected-access
upload.EnsureInitialized()
if upload.auto_transfer:
upload.StreamInChunks()
return upload
@property
def serialization_data(self):
self.EnsureInitialized()
if self.strategy != _RESUMABLE_UPLOAD:
raise exceptions.InvalidDataError(
'Serialization only supported for resumable uploads')
return {
'auto_transfer': self.auto_transfer,
'mime_type': self.mime_type,
'total_size': self.total_size,
'url': self.url,
}
@property
def complete(self):
return self.__complete
@property
def mime_type(self):
return self.__mime_type
def __str__(self):
if not self.initialized:
return 'Upload (uninitialized)'
else:
return 'Upload with %d/%s bytes transferred for url %s' % (
self.progress, self.total_size or '???', self.url)
@property
def strategy(self):
return self.__strategy
@strategy.setter
def strategy(self, value):
if value not in (_SIMPLE_UPLOAD, _RESUMABLE_UPLOAD):
raise exceptions.UserError((
'Invalid value "%s" for upload strategy, must be one of '
'"simple" or "resumable".') % value)
self.__strategy = value
@property
def total_size(self):
return self.__total_size
@total_size.setter
def total_size(self, value):
self.EnsureUninitialized()
self.__total_size = value
def __SetDefaultUploadStrategy(self, upload_config, http_request):
"""Determine and set the default upload strategy for this upload.
We generally prefer simple or multipart, unless we're forced to
use resumable. This happens when any of (1) the upload is too
large, (2) the simple endpoint doesn't support multipart requests
and we have metadata, or (3) there is no simple upload endpoint.
Args:
upload_config: Configuration for the upload endpoint.
http_request: The associated http request.
Returns:
None.
"""
if self.strategy is not None:
return
strategy = _SIMPLE_UPLOAD
if (self.total_size is not None and
self.total_size > _RESUMABLE_UPLOAD_THRESHOLD):
strategy = _RESUMABLE_UPLOAD
if http_request.body and not upload_config.simple_multipart:
strategy = _RESUMABLE_UPLOAD
if not upload_config.simple_path:
strategy = _RESUMABLE_UPLOAD
self.strategy = strategy
def ConfigureRequest(self, upload_config, http_request, url_builder):
"""Configure the request and url for this upload."""
# Validate total_size vs. max_size
if (self.total_size and upload_config.max_size and
self.total_size > upload_config.max_size):
raise exceptions.InvalidUserInputError(
'Upload too big: %s larger than max size %s' % (
self.total_size, upload_config.max_size))
# Validate mime type
if not util.AcceptableMimeType(upload_config.accept, self.mime_type):
raise exceptions.InvalidUserInputError(
'MIME type %s does not match any accepted MIME ranges %s' % (
self.mime_type, upload_config.accept))
self.__SetDefaultUploadStrategy(upload_config, http_request)
if self.strategy == _SIMPLE_UPLOAD:
url_builder.relative_path = upload_config.simple_path
if http_request.body:
url_builder.query_params['uploadType'] = 'multipart'
self.__ConfigureMultipartRequest(http_request)
else:
url_builder.query_params['uploadType'] = 'media'
self.__ConfigureMediaRequest(http_request)
else:
url_builder.relative_path = upload_config.resumable_path
url_builder.query_params['uploadType'] = 'resumable'
self.__ConfigureResumableRequest(http_request)
def __ConfigureMediaRequest(self, http_request):
"""Configure http_request as a simple request for this upload."""
http_request.headers['content-type'] = self.mime_type
http_request.body = self.stream.read()
def __ConfigureMultipartRequest(self, http_request):
"""Configure http_request as a multipart request for this upload."""
# This is a multipart/related upload.
msg_root = mime_multipart.MIMEMultipart('related')
# msg_root should not write out its own headers
setattr(msg_root, '_write_headers', lambda self: None)
# attach the body as one part
msg = mime_nonmultipart.MIMENonMultipart(
*http_request.headers['content-type'].split('/'))
msg.set_payload(http_request.body)
msg_root.attach(msg)
# attach the media as the second part
msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
msg.set_payload(self.stream.read())
msg_root.attach(msg)
# encode the body: note that we can't use `as_string`, because
# it plays games with `From ` lines.
fp = StringIO.StringIO()
g = email_generator.Generator(fp, mangle_from_=False)
g.flatten(msg_root, unixfrom=False)
http_request.body = fp.getvalue()
multipart_boundary = msg_root.get_boundary()
http_request.headers['content-type'] = (
'multipart/related; boundary=%r' % multipart_boundary)
def __ConfigureResumableRequest(self, http_request):
http_request.headers['X-Upload-Content-Type'] = self.mime_type
if self.total_size is not None:
http_request.headers['X-Upload-Content-Length'] = str(self.total_size)
def _RefreshResumableUploadState(self):
"""Talk to the server and refresh the state of this resumable upload."""
if self.strategy != _RESUMABLE_UPLOAD:
return
self.EnsureInitialized()
refresh_request = http_wrapper.Request(
url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'})
refresh_response = http_wrapper.MakeRequest(
self.http, refresh_request, redirections=0)
range_header = refresh_response.info.get(
'Range', refresh_response.info.get('range'))
if refresh_response.status_code in (httplib.OK, httplib.CREATED):
self.__complete = True
elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE:
if range_header is None:
self.__progress = 0
else:
self.__progress = self.__GetLastByte(range_header) + 1
self.stream.seek(self.progress)
else:
raise exceptions.HttpError.FromResponse(refresh_response)
def InitializeUpload(self, http_request, http=None, client=None):
"""Initialize this upload from the given http_request."""
if self.strategy is None:
raise exceptions.UserError(
'No upload strategy set; did you call ConfigureRequest?')
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
if self.strategy != _RESUMABLE_UPLOAD:
return
if self.total_size is None:
raise exceptions.InvalidUserInputError(
'Cannot stream upload without total size')
http = http or client.http
if client is not None:
http_request.url = client.FinalizeTransferUrl(http_request.url)
self.EnsureUninitialized()
http_response = http_wrapper.MakeRequest(http, http_request)
if http_response.status_code != httplib.OK:
raise exceptions.HttpError.FromResponse(http_response)
self.__server_chunk_granularity = http_response.info.get(
'X-Goog-Upload-Chunk-Granularity')
self.__ValidateChunksize()
url = http_response.info['location']
if client is not None:
url = client.FinalizeTransferUrl(url)
self._Initialize(http, url)
# Unless the user has requested otherwise, we want to just
# go ahead and pump the bytes now.
if self.auto_transfer:
return self.StreamInChunks()
def __GetLastByte(self, range_header):
_, _, end = range_header.partition('-')
# TODO(user): Validate start == 0?
return int(end)
def __ValidateChunksize(self, chunksize=None):
if self.__server_chunk_granularity is None:
return
chunksize = chunksize or self.chunksize
if chunksize % self.__server_chunk_granularity:
raise exceptions.ConfigurationValueError(
'Server requires chunksize to be a multiple of %d',
self.__server_chunk_granularity)
@staticmethod
def _ArgPrinter(response, unused_upload):
print 'Sent %s' % response.info['range']
@staticmethod
def _CompletePrinter(*unused_args):
print 'Upload complete'
def StreamInChunks(self, callback=None, finish_callback=None,
additional_headers=None):
"""Send this (resumable) upload in chunks."""
if self.strategy != _RESUMABLE_UPLOAD:
raise exceptions.InvalidUserInputError(
'Cannot stream non-resumable upload')
if self.total_size is None:
raise exceptions.InvalidUserInputError(
'Cannot stream upload without total size')
callback = callback or self._ArgPrinter
finish_callback = finish_callback or self._CompletePrinter
response = None
self.__ValidateChunksize(self.chunksize)
self.EnsureInitialized()
while not self.complete:
response = self.__SendChunk(self.stream.tell(),
additional_headers=additional_headers)
if response.status_code in (httplib.OK, httplib.CREATED):
self.__complete = True
break
self.__progress = self.__GetLastByte(response.info['range'])
if self.progress + 1 != self.stream.tell():
# TODO(user): Add a better way to recover here.
raise exceptions.CommunicationError(
'Failed to transfer all bytes in chunk, upload paused at byte '
'%d' % self.progress)
self._ExecuteCallback(callback, response)
self._ExecuteCallback(finish_callback, response)
return response
def __SendChunk(self, start, additional_headers=None, data=None):
"""Send the specified chunk."""
self.EnsureInitialized()
if data is None:
data = self.stream.read(self.chunksize)
end = start + len(data)
request = http_wrapper.Request(url=self.url, http_method='PUT', body=data)
request.headers['Content-Type'] = self.mime_type
if data:
request.headers['Content-Range'] = 'bytes %s-%s/%s' % (
start, end - 1, self.total_size)
if additional_headers:
request.headers.update(additional_headers)
response = http_wrapper.MakeRequest(self.bytes_http, request)
if response.status_code not in (httplib.OK, httplib.CREATED,
http_wrapper.RESUME_INCOMPLETE):
raise exceptions.HttpError.FromResponse(response)
if response.status_code in (httplib.OK, httplib.CREATED):
return response
# TODO(user): Add retries on no progress?
last_byte = self.__GetLastByte(response.info['range'])
if last_byte + 1 != end:
new_start = last_byte + 1 - start
response = self.__SendChunk(last_byte + 1, data=data[new_start:])
return response
|
gpl-3.0
|
TeachAtTUM/edx-platform
|
openedx/core/djangoapps/schedules/tests/test_resolvers.py
|
18
|
2866
|
import datetime
from unittest import skipUnless
import ddt
from django.conf import settings
from mock import Mock
from openedx.core.djangoapps.schedules.resolvers import BinnedSchedulesBaseResolver
from openedx.core.djangoapps.schedules.tests.factories import ScheduleConfigFactory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory, SiteConfigurationFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
@ddt.ddt
@skip_unless_lms
@skipUnless('openedx.core.djangoapps.schedules.apps.SchedulesConfig' in settings.INSTALLED_APPS,
"Can't test schedules if the app isn't installed")
class TestBinnedSchedulesBaseResolver(CacheIsolationTestCase):
def setUp(self):
super(TestBinnedSchedulesBaseResolver, self).setUp()
self.site = SiteFactory.create()
self.site_config = SiteConfigurationFactory(site=self.site)
self.schedule_config = ScheduleConfigFactory.create(site=self.site)
self.resolver = BinnedSchedulesBaseResolver(
async_send_task=Mock(name='async_send_task'),
site=self.site,
target_datetime=datetime.datetime.now(),
day_offset=3,
bin_num=2,
)
@ddt.data(
'course1'
)
def test_get_course_org_filter_equal(self, course_org_filter):
self.site_config.values['course_org_filter'] = course_org_filter
self.site_config.save()
mock_query = Mock()
result = self.resolver.filter_by_org(mock_query)
self.assertEqual(result, mock_query.filter.return_value)
mock_query.filter.assert_called_once_with(enrollment__course__org=course_org_filter)
@ddt.unpack
@ddt.data(
(['course1', 'course2'], ['course1', 'course2'])
)
def test_get_course_org_filter_include__in(self, course_org_filter, expected_org_list):
self.site_config.values['course_org_filter'] = course_org_filter
self.site_config.save()
mock_query = Mock()
result = self.resolver.filter_by_org(mock_query)
self.assertEqual(result, mock_query.filter.return_value)
mock_query.filter.assert_called_once_with(enrollment__course__org__in=expected_org_list)
@ddt.unpack
@ddt.data(
(None, set([])),
('course1', set([u'course1'])),
(['course1', 'course2'], set([u'course1', u'course2']))
)
def test_get_course_org_filter_exclude__in(self, course_org_filter, expected_org_list):
SiteConfigurationFactory.create(
values={'course_org_filter': course_org_filter},
)
mock_query = Mock()
result = self.resolver.filter_by_org(mock_query)
mock_query.exclude.assert_called_once_with(enrollment__course__org__in=expected_org_list)
self.assertEqual(result, mock_query.exclude.return_value)
|
agpl-3.0
|
afaheem88/rally
|
tests/unit/common/test_fileutils.py
|
18
|
2944
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from rally.common import fileutils
from tests.unit import test
class FileUtilsTestCase(test.TestCase):
@mock.patch("os.path.exists", return_value=True)
@mock.patch.dict("os.environ", values={}, clear=True)
def test_load_env_vile(self, mock_exists):
file_data = ["FAKE_ENV=fake_env\n"]
with mock.patch("rally.common.fileutils.open", mock.mock_open(
read_data=file_data), create=True) as mock_file:
mock_file.return_value.readlines.return_value = file_data
fileutils.load_env_file("path_to_file")
self.assertIn("FAKE_ENV", os.environ)
mock_file.return_value.readlines.assert_called_once_with()
@mock.patch("os.path.exists", return_value=True)
def test_update_env_file(self, mock_exists):
file_data = ["FAKE_ENV=old_value\n", "FAKE_ENV2=any\n"]
with mock.patch("rally.common.fileutils.open", mock.mock_open(
read_data=file_data), create=True) as mock_file:
mock_file.return_value.readlines.return_value = file_data
fileutils.update_env_file("path_to_file", "FAKE_ENV", "new_value")
calls = [mock.call("FAKE_ENV2=any\n"), mock.call(
"FAKE_ENV=new_value")]
mock_file.return_value.readlines.assert_called_once_with()
mock_file.return_value.write.assert_has_calls(calls)
class PackDirTestCase(test.TestCase):
@mock.patch("os.walk")
@mock.patch("zipfile.ZipFile")
def test_pack_dir(self, mock_zip_file, mock_walk):
mock_walk.side_effect = [
[("foo_root", [], ["file1", "file2", "file3"])]]
fileutils.pack_dir("rally-jobs/extra/murano/HelloReporter",
"fake_dir/package.zip")
mock_zip_file.assert_called_once_with("fake_dir/package.zip",
mode="w")
mock_walk.assert_called_once_with(
"rally-jobs/extra/murano/HelloReporter")
mock_zip_file.return_value.assert_has_calls(
[mock.call.write("foo_root/file1", "../../../../foo_root/file1"),
mock.call.write("foo_root/file2", "../../../../foo_root/file2"),
mock.call.write("foo_root/file3", "../../../../foo_root/file3"),
mock.call.close()])
|
apache-2.0
|
clstl/servo
|
tests/wpt/web-platform-tests/tools/py/py/_process/cmdexec.py
|
273
|
1814
|
import sys
import subprocess
import py
from subprocess import Popen, PIPE
def cmdexec(cmd):
""" return unicode output of executing 'cmd' in a separate process.
raise cmdexec.Error exeception if the command failed.
the exception will provide an 'err' attribute containing
the error-output from the command.
if the subprocess module does not provide a proper encoding/unicode strings
sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
"""
process = subprocess.Popen(cmd, shell=True,
universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
try:
default_encoding = sys.getdefaultencoding() # jython may not have it
except AttributeError:
default_encoding = sys.stdout.encoding or 'UTF-8'
out = unicode(out, process.stdout.encoding or default_encoding)
err = unicode(err, process.stderr.encoding or default_encoding)
status = process.poll()
if status:
raise ExecutionFailed(status, status, cmd, out, err)
return out
class ExecutionFailed(py.error.Error):
def __init__(self, status, systemstatus, cmd, out, err):
Exception.__init__(self)
self.status = status
self.systemstatus = systemstatus
self.cmd = cmd
self.err = err
self.out = out
def __str__(self):
return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
# export the exception under the name 'py.process.cmdexec.Error'
cmdexec.Error = ExecutionFailed
try:
ExecutionFailed.__module__ = 'py.process.cmdexec'
ExecutionFailed.__name__ = 'Error'
except (AttributeError, TypeError):
pass
|
mpl-2.0
|
tgsd96/gargnotes
|
venv/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/geom.py
|
114
|
4443
|
from ctypes import c_char_p, c_int, c_size_t, c_ubyte, POINTER
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_sized_string, check_string, check_zero)
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
### ctypes generation functions ###
def bin_constructor(func):
"Generates a prototype for binary construction (HEX, WKB) GEOS routines."
func.argtypes = [c_char_p, c_size_t]
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
# HEX & WKB output
def bin_output(func):
"Generates a prototype for the routines that return a sized string."
func.argtypes = [GEOM_PTR, POINTER(c_size_t)]
func.errcheck = check_sized_string
func.restype = c_uchar_p
return func
def geom_output(func, argtypes):
"For GEOS routines that return a geometry."
if argtypes:
func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
def geom_index(func):
"For GEOS routines that return geometries from an index."
return geom_output(func, [GEOM_PTR, c_int])
def int_from_geom(func, zero=False):
"Argument is a geometry, return type is an integer."
func.argtypes = [GEOM_PTR]
func.restype = c_int
if zero:
func.errcheck = check_zero
else:
func.errcheck = check_minus_one
return func
def string_from_geom(func):
"Argument is a Geometry, return type is a string."
func.argtypes = [GEOM_PTR]
func.restype = geos_char_p
func.errcheck = check_string
return func
### ctypes prototypes ###
# Deprecated creation routines from WKB, HEX, WKT
from_hex = bin_constructor(GEOSFunc('GEOSGeomFromHEX_buf'))
from_wkb = bin_constructor(GEOSFunc('GEOSGeomFromWKB_buf'))
from_wkt = geom_output(GEOSFunc('GEOSGeomFromWKT'), [c_char_p])
# Deprecated output routines
to_hex = bin_output(GEOSFunc('GEOSGeomToHEX_buf'))
to_wkb = bin_output(GEOSFunc('GEOSGeomToWKB_buf'))
to_wkt = string_from_geom(GEOSFunc('GEOSGeomToWKT'))
# The GEOS geometry type, typeid, num_coordites and number of geometries
geos_normalize = int_from_geom(GEOSFunc('GEOSNormalize'))
geos_type = string_from_geom(GEOSFunc('GEOSGeomType'))
geos_typeid = int_from_geom(GEOSFunc('GEOSGeomTypeId'))
get_dims = int_from_geom(GEOSFunc('GEOSGeom_getDimensions'), zero=True)
get_num_coords = int_from_geom(GEOSFunc('GEOSGetNumCoordinates'))
get_num_geoms = int_from_geom(GEOSFunc('GEOSGetNumGeometries'))
# Geometry creation factories
create_point = geom_output(GEOSFunc('GEOSGeom_createPoint'), [CS_PTR])
create_linestring = geom_output(GEOSFunc('GEOSGeom_createLineString'), [CS_PTR])
create_linearring = geom_output(GEOSFunc('GEOSGeom_createLinearRing'), [CS_PTR])
# Polygon and collection creation routines are special and will not
# have their argument types defined.
create_polygon = geom_output(GEOSFunc('GEOSGeom_createPolygon'), None)
create_collection = geom_output(GEOSFunc('GEOSGeom_createCollection'), None)
# Ring routines
get_extring = geom_output(GEOSFunc('GEOSGetExteriorRing'), [GEOM_PTR])
get_intring = geom_index(GEOSFunc('GEOSGetInteriorRingN'))
get_nrings = int_from_geom(GEOSFunc('GEOSGetNumInteriorRings'))
# Collection Routines
get_geomn = geom_index(GEOSFunc('GEOSGetGeometryN'))
# Cloning
geom_clone = GEOSFunc('GEOSGeom_clone')
geom_clone.argtypes = [GEOM_PTR]
geom_clone.restype = GEOM_PTR
# Destruction routine.
destroy_geom = GEOSFunc('GEOSGeom_destroy')
destroy_geom.argtypes = [GEOM_PTR]
destroy_geom.restype = None
# SRID routines
geos_get_srid = GEOSFunc('GEOSGetSRID')
geos_get_srid.argtypes = [GEOM_PTR]
geos_get_srid.restype = c_int
geos_set_srid = GEOSFunc('GEOSSetSRID')
geos_set_srid.argtypes = [GEOM_PTR, c_int]
geos_set_srid.restype = None
|
mit
|
abstract-open-solutions/account-financial-tools
|
account_invoice_currency/models/account_invoice.py
|
27
|
3754
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2011
# Pexego Sistemas Informáticos. (http://pexego.es)
# Zikzakmedia S.L. (http://zikzakmedia.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class AccountInvoice(models.Model):
_inherit = "account.invoice"
@api.one
@api.depends('amount_total', 'amount_untaxed', 'amount_tax',
'currency_id', 'move_id')
def _cc_amount_all(self):
if self.company_id.currency_id == self.currency_id:
self.cc_amount_untaxed = self.amount_untaxed
self.cc_amount_tax = self.amount_tax
self.cc_amount_total = self.amount_total
else:
self.cc_amount_untaxed = 0.0
self.cc_amount_tax = 0.0
self.cc_amount_total = 0.0
# It could be computed only in open or paid invoices with a
# generated account move
if self.move_id:
# Accounts to compute amount_untaxed
line_accounts = set([x.account_id.id for x in
self.invoice_line])
# Accounts to compute amount_tax
tax_accounts = set([x.account_id.id for x in
self.tax_line if x.amount != 0])
# The company currency amounts are the debit-credit
# amounts in the account moves
for line in self.move_id.line_id:
if line.account_id.id in line_accounts:
self.cc_amount_untaxed += line.debit - line.credit
if line.account_id.id in tax_accounts:
self.cc_amount_tax += line.debit - line.credit
if self.type in ('out_invoice', 'in_refund'):
self.cc_amount_untaxed = -self.cc_amount_untaxed
self.cc_amount_tax = -self.cc_amount_tax
self.cc_amount_total = (self.cc_amount_tax +
self.cc_amount_untaxed)
cc_amount_untaxed = fields.Float(
compute="_cc_amount_all", digits_compute=dp.get_precision('Account'),
string='Company Cur. Untaxed',
help="Invoice untaxed amount in the company currency (useful when "
"invoice currency is different from company currency).")
cc_amount_tax = fields.Float(
compute="_cc_amount_all", digits_compute=dp.get_precision('Account'),
string='Company Cur. Tax',
help="Invoice tax amount in the company currency (useful when invoice "
"currency is different from company currency).")
cc_amount_total = fields.Float(
compute="_cc_amount_all", digits_compute=dp.get_precision('Account'),
string='Company Cur. Total',
help="Invoice total amount in the company currency (useful when "
"invoice currency is different from company currency).")
|
agpl-3.0
|
dirn/ansible
|
contrib/inventory/vmware.py
|
59
|
16907
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMware Inventory Script
=======================
Retrieve information about virtual machines from a vCenter server or
standalone ESX host. When `group_by=false` (in the INI file), host systems
are also returned in addition to VMs.
This script will attempt to read configuration from an INI file with the same
base filename if present, or `vmware.ini` if not. It is possible to create
symlinks to the inventory script to support multiple configurations, e.g.:
* `vmware.py` (this script)
* `vmware.ini` (default configuration, will be read by `vmware.py`)
* `vmware_test.py` (symlink to `vmware.py`)
* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`)
* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no
`vmware_other.ini` exists)
The path to an INI file may also be specified via the `VMWARE_INI` environment
variable, in which case the filename matching rules above will not apply.
Host and authentication parameters may be specified via the `VMWARE_HOST`,
`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will
take precedence over options present in the INI file. An INI file is not
required if these options are specified using environment variables.
'''
import collections
import json
import logging
import optparse
import os
import sys
import time
import ConfigParser
# Disable logging message trigged by pSphere/suds.
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler())
from psphere.client import Client
from psphere.errors import ObjectNotFoundError
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network
from suds.sudsobject import Object as SudsObject
class VMwareInventory(object):
def __init__(self, guests_only=None):
self.config = ConfigParser.SafeConfigParser()
if os.environ.get('VMWARE_INI', ''):
config_files = [os.environ['VMWARE_INI']]
else:
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
for config_file in config_files:
if os.path.exists(config_file):
self.config.read(config_file)
break
# Retrieve only guest VMs, or include host systems?
if guests_only is not None:
self.guests_only = guests_only
elif self.config.has_option('defaults', 'guests_only'):
self.guests_only = self.config.getboolean('defaults', 'guests_only')
else:
self.guests_only = True
# Read authentication information from VMware environment variables
# (if set), otherwise from INI file.
auth_host = os.environ.get('VMWARE_HOST')
if not auth_host and self.config.has_option('auth', 'host'):
auth_host = self.config.get('auth', 'host')
auth_user = os.environ.get('VMWARE_USER')
if not auth_user and self.config.has_option('auth', 'user'):
auth_user = self.config.get('auth', 'user')
auth_password = os.environ.get('VMWARE_PASSWORD')
if not auth_password and self.config.has_option('auth', 'password'):
auth_password = self.config.get('auth', 'password')
# Create the VMware client connection.
self.client = Client(auth_host, auth_user, auth_password)
def _put_cache(self, name, value):
'''
Saves the value to cache with the name given.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, name)
with open(cache_file, 'w') as cache:
json.dump(value, cache)
def _get_cache(self, name, default=None):
'''
Retrieves the value from cache for the given name.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = self.config.get('defaults', 'cache_dir')
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
if self.config.has_option('defaults', 'cache_max_age'):
cache_max_age = self.config.getint('defaults', 'cache_max_age')
else:
cache_max_age = 0
cache_stat = os.stat(cache_file)
if (cache_stat.st_mtime + cache_max_age) >= time.time():
with open(cache_file) as cache:
return json.load(cache)
return default
def _flatten_dict(self, d, parent_key='', sep='_'):
'''
Flatten nested dicts by combining keys with a separator. Lists with
only string items are included as is; any other lists are discarded.
'''
items = []
for k, v in d.items():
if k.startswith('_'):
continue
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep).items())
elif isinstance(v, (list, tuple)):
if all([isinstance(x, basestring) for x in v]):
items.append((new_key, v))
else:
items.append((new_key, v))
return dict(items)
def _get_obj_info(self, obj, depth=99, seen=None):
'''
Recursively build a data structure for the given pSphere object (depth
only applies to ManagedObject instances).
'''
seen = seen or set()
if isinstance(obj, ManagedObject):
try:
obj_unicode = unicode(getattr(obj, 'name'))
except AttributeError:
obj_unicode = ()
if obj in seen:
return obj_unicode
seen.add(obj)
if depth <= 0:
return obj_unicode
d = {}
for attr in dir(obj):
if attr.startswith('_'):
continue
try:
val = getattr(obj, attr)
obj_info = self._get_obj_info(val, depth - 1, seen)
if obj_info != ():
d[attr] = obj_info
except Exception, e:
pass
return d
elif isinstance(obj, SudsObject):
d = {}
for key, val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
d[key] = obj_info
return d
elif isinstance(obj, (list, tuple)):
l = []
for val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
l.append(obj_info)
return l
elif isinstance(obj, (type(None), bool, int, long, float, basestring)):
return obj
else:
return ()
def _get_host_info(self, host, prefix='vmware'):
'''
Return a flattened dict with info about the given host system.
'''
host_info = {
'name': host.name,
}
for attr in ('datastore', 'network', 'vm'):
try:
value = getattr(host, attr)
host_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
host_info['%ss' % attr] = []
for k, v in self._get_obj_info(host.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
host_info[k2] = v2
elif k != 'host':
host_info[k] = v
try:
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
except Exception, e:
print >> sys.stderr, e
host_info = self._flatten_dict(host_info, prefix)
if ('%s_ipAddress' % prefix) in host_info:
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
return host_info
def _get_vm_info(self, vm, prefix='vmware'):
'''
Return a flattened dict with info about the given virtual machine.
'''
vm_info = {
'name': vm.name,
}
for attr in ('datastore', 'network'):
try:
value = getattr(vm, attr)
vm_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
vm_info['%ss' % attr] = []
try:
vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0)
except AttributeError:
vm_info['resourcePool'] = ''
try:
vm_info['guestState'] = vm.guest.guestState
except AttributeError:
vm_info['guestState'] = ''
for k, v in self._get_obj_info(vm.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
if k2 == 'host':
k2 = 'hostSystem'
vm_info[k2] = v2
elif k != 'vm':
vm_info[k] = v
vm_info = self._flatten_dict(vm_info, prefix)
if ('%s_ipAddress' % prefix) in vm_info:
vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix]
return vm_info
def _add_host(self, inv, parent_group, host_name):
'''
Add the host to the parent group in the given inventory.
'''
p_group = inv.setdefault(parent_group, [])
if isinstance(p_group, dict):
group_hosts = p_group.setdefault('hosts', [])
else:
group_hosts = p_group
if host_name not in group_hosts:
group_hosts.append(host_name)
def _add_child(self, inv, parent_group, child_group):
'''
Add a child group to a parent group in the given inventory.
'''
if parent_group != 'all':
p_group = inv.setdefault(parent_group, {})
if not isinstance(p_group, dict):
inv[parent_group] = {'hosts': p_group}
p_group = inv[parent_group]
group_children = p_group.setdefault('children', [])
if child_group not in group_children:
group_children.append(child_group)
inv.setdefault(child_group, [])
def get_inventory(self, meta_hostvars=True):
'''
Reads the inventory from cache or VMware API via pSphere.
'''
# Use different cache names for guests only vs. all hosts.
if self.guests_only:
cache_name = '__inventory_guests__'
else:
cache_name = '__inventory_all__'
inv = self._get_cache(cache_name, None)
if inv is not None:
return inv
inv = {'all': {'hosts': []}}
if meta_hostvars:
inv['_meta'] = {'hostvars': {}}
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if not self.guests_only:
if self.config.has_option('defaults', 'hw_group'):
hw_group = self.config.get('defaults', 'hw_group')
else:
hw_group = default_group + '_hw'
if self.config.has_option('defaults', 'vm_group'):
vm_group = self.config.get('defaults', 'vm_group')
else:
vm_group = default_group + '_vm'
if self.config.has_option('defaults', 'prefix_filter'):
prefix_filter = self.config.get('defaults', 'prefix_filter')
else:
prefix_filter = None
# Loop through physical hosts:
for host in HostSystem.all(self.client):
if not self.guests_only:
self._add_host(inv, 'all', host.name)
self._add_host(inv, hw_group, host.name)
host_info = self._get_host_info(host)
if meta_hostvars:
inv['_meta']['hostvars'][host.name] = host_info
self._put_cache(host.name, host_info)
# Loop through all VMs on physical host.
for vm in host.vm:
if prefix_filter:
if vm.name.startswith( prefix_filter ):
continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)
vm_info = self._get_vm_info(vm)
if meta_hostvars:
inv['_meta']['hostvars'][vm.name] = vm_info
self._put_cache(vm.name, vm_info)
# Group by resource pool.
vm_resourcePool = vm_info.get('vmware_resourcePool', None)
if vm_resourcePool:
self._add_child(inv, vm_group, 'resource_pools')
self._add_child(inv, 'resource_pools', vm_resourcePool)
self._add_host(inv, vm_resourcePool, vm.name)
# Group by datastore.
for vm_datastore in vm_info.get('vmware_datastores', []):
self._add_child(inv, vm_group, 'datastores')
self._add_child(inv, 'datastores', vm_datastore)
self._add_host(inv, vm_datastore, vm.name)
# Group by network.
for vm_network in vm_info.get('vmware_networks', []):
self._add_child(inv, vm_group, 'networks')
self._add_child(inv, 'networks', vm_network)
self._add_host(inv, vm_network, vm.name)
# Group by guest OS.
vm_guestId = vm_info.get('vmware_guestId', None)
if vm_guestId:
self._add_child(inv, vm_group, 'guests')
self._add_child(inv, 'guests', vm_guestId)
self._add_host(inv, vm_guestId, vm.name)
# Group all VM templates.
vm_template = vm_info.get('vmware_template', False)
if vm_template:
self._add_child(inv, vm_group, 'templates')
self._add_host(inv, 'templates', vm.name)
self._put_cache(cache_name, inv)
return inv
def get_host(self, hostname):
'''
Read info about a specific host or VM from cache or VMware API.
'''
inv = self._get_cache(hostname, None)
if inv is not None:
return inv
if not self.guests_only:
try:
host = HostSystem.get(self.client, name=hostname)
inv = self._get_host_info(host)
except ObjectNotFoundError:
pass
if inv is None:
try:
vm = VirtualMachine.get(self.client, name=hostname)
inv = self._get_vm_info(vm)
except ObjectNotFoundError:
pass
if inv is not None:
self._put_cache(hostname, inv)
return inv or {}
def main():
parser = optparse.OptionParser()
parser.add_option('--list', action='store_true', dest='list',
default=False, help='Output inventory groups and hosts')
parser.add_option('--host', dest='host', default=None, metavar='HOST',
help='Output variables only for the given hostname')
# Additional options for use when running the script standalone, but never
# used by Ansible.
parser.add_option('--pretty', action='store_true', dest='pretty',
default=False, help='Output nicely-formatted JSON')
parser.add_option('--include-host-systems', action='store_true',
dest='include_host_systems', default=False,
help='Include host systems in addition to VMs')
parser.add_option('--no-meta-hostvars', action='store_false',
dest='meta_hostvars', default=True,
help='Exclude [\'_meta\'][\'hostvars\'] with --list')
options, args = parser.parse_args()
if options.include_host_systems:
vmware_inventory = VMwareInventory(guests_only=False)
else:
vmware_inventory = VMwareInventory()
if options.host is not None:
inventory = vmware_inventory.get_host(options.host)
else:
inventory = vmware_inventory.get_inventory(options.meta_hostvars)
json_kwargs = {}
if options.pretty:
json_kwargs.update({'indent': 4, 'sort_keys': True})
json.dump(inventory, sys.stdout, **json_kwargs)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ladyquartermaine/POSTMan-Chrome-Extension
|
tests/selenium/pmtests/postman_tests_requests.py
|
104
|
23191
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
import traceback
import inspect
import time
from postman_tests import PostmanTests
class PostmanTestsRequests(PostmanTests):
def test_1_get_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("get") > 0:
return True
else:
return False
def test_2_get_only_key(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get?start")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("/get?start") > 0:
return True
else:
return False
def test_3_delete_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/delete")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("DELETE")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("delete") > 0:
return True
else:
return False
return True
def test_4_head_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/html")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("HEAD")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("div") > 0:
return True
else:
return False
def test_5_options_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/html")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("OPTIONS")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("div") > 0:
return True
else:
return False
def test_6_post_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("post") > 0:
return True
else:
return False
def test_7_put_basic(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/put")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("PUT")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("put") > 0:
return True
else:
return False
def test_8_init_environment(self):
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
time.sleep(0.1)
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:last-child a")
manage_env_link.click()
time.sleep(1)
add_env_button = self.browser.find_element_by_css_selector("#environments-list-wrapper .toolbar .environments-actions-add")
add_env_button.click()
time.sleep(0.3)
environment_name = self.browser.find_element_by_id("environment-editor-name")
environment_name.clear()
environment_name.send_keys("Requests environment")
first_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("path_get")
first_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("get?start=something")
second_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_key.clear()
second_key.send_keys("path_post")
second_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_val.clear()
second_val.send_keys("post")
third_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-key")
third_key.clear()
third_key.send_keys("Foo")
third_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(3) .keyvalueeditor-value")
third_val.clear()
third_val.send_keys("Bar")
fourth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-key")
fourth_key.clear()
fourth_key.send_keys("Name")
fourth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(4) .keyvalueeditor-value")
fourth_val.clear()
fourth_val.send_keys("John Appleseed")
fifth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-key")
fifth_key.clear()
fifth_key.send_keys("nonce")
fifth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(5) .keyvalueeditor-value")
fifth_val.clear()
fifth_val.send_keys("kllo9940pd9333jh")
sixth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-key")
sixth_key.clear()
sixth_key.send_keys("timestamp")
sixth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(6) .keyvalueeditor-value")
sixth_val.clear()
sixth_val.send_keys("1191242096")
seventh_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-key")
seventh_key.clear()
seventh_key.send_keys("url")
seventh_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(7) .keyvalueeditor-value")
seventh_val.clear()
seventh_val.send_keys("http://photos.example.net/photos")
eigth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-key")
eigth_key.clear()
eigth_key.send_keys("file")
eigth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(8) .keyvalueeditor-value")
eigth_val.clear()
eigth_val.send_keys("vacation.jpg")
ninth_key = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-key")
ninth_key.clear()
ninth_key.send_keys("size")
ninth_val = self.browser.find_element_by_css_selector("#environment-keyvaleditor .keyvalueeditor-row:nth-of-type(9) .keyvalueeditor-value")
ninth_val.clear()
ninth_val.send_keys("original")
submit_button = self.browser.find_element_by_css_selector("#modal-environments .environments-actions-add-submit")
submit_button.click()
time.sleep(0.3)
close_button = self.browser.find_element_by_css_selector("#modal-environments .modal-header .close")
close_button.click()
time.sleep(1)
environment_selector = self.browser.find_element_by_id("environment-selector")
environment_selector.click()
# Select the environment
manage_env_link = self.browser.find_element_by_css_selector("#environment-selector .dropdown-menu li:nth-of-type(1) a")
manage_env_link.click()
return True
def test_9_get_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_get}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("get?start=something") > 0:
return True
else:
return False
def test_10_post_formdata_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
first_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("{{size}}")
second_formdata_key = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#formdata-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("{{file}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("original") > 0:
return True
else:
return False
def test_11_post_urlencoded_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# Select urlencoded
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(2)").click()
first_formdata_key = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-key")
first_formdata_key.clear()
first_formdata_key.send_keys("size")
first_formdata_value = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(1) .keyvalueeditor-value")
first_formdata_value.clear()
first_formdata_value.send_keys("{{size}}")
second_formdata_key = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-key")
second_formdata_key.clear()
second_formdata_key.send_keys("file")
second_formdata_value = self.browser.find_element_by_css_selector("#urlencoded-keyvaleditor .keyvalueeditor-row:nth-of-type(2) .keyvalueeditor-value")
second_formdata_value.clear()
second_formdata_value.send_keys("{{file}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("original") > 0:
return True
else:
return False
def test_12_post_raw_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
# Select urlencoded
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{{Foo}}={{Name}}")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("John Appleseed") > 0:
return True
else:
return False
def test_13_post_raw_json_environment(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/{{path_post}}")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{\"{{Foo}}\":\"{{Name}}\"")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("John Appleseed") > 0:
return True
else:
return False
# https://github.com/a85/POSTMan-Chrome-Extension/issues/174
def test_14_url_with_semicolon(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get?some=start;val")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("/get?some=start;val") > 0:
return True
else:
return False
# https://github.com/a85/POSTMan-Chrome-Extension/issues/165
def test_15_odata_url(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/Resource(code1='1',code2='1')")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("Not Found") > 0:
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request .request-name")
value = self.browser.execute_script("return arguments[0].innerHTML", first_history_item)
if value.find("http://localhost:5000/Resource(code1='1'<br>,code2='1')") > 0:
return True
else:
return False
else:
return False
def test_16_with_no_cache(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get")
settings_button = self.browser.find_element_by_css_selector(".preferences a:nth-of-type(2)")
settings_button.click()
time.sleep(1)
no_cache_select = self.browser.find_element_by_id("send-no-cache-header")
Select(no_cache_select).select_by_value("true")
close_button = self.browser.find_element_by_css_selector("#modal-settings .modal-header .close")
close_button.click()
time.sleep(1)
self.set_url_field(self.browser, "http://localhost:5000/get")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("no-cache") > 0:
return True
else:
return False
def test_17_without_no_cache(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/get")
settings_button = self.browser.find_element_by_css_selector(".preferences a:nth-of-type(2)")
settings_button.click()
time.sleep(1)
no_cache_select = self.browser.find_element_by_id("send-no-cache-header")
Select(no_cache_select).select_by_value("false")
close_button = self.browser.find_element_by_css_selector("#modal-settings .modal-header .close")
close_button.click()
time.sleep(1)
self.set_url_field(self.browser, "http://localhost:5000/get")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("no-cache") < 0:
return True
else:
return False
def test_18_raw_json_type(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
self.browser.find_element_by_id("headers-keyvaleditor-actions-open").click()
time.sleep(0.1)
first_key = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("Content-Type")
first_val = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("text/json")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{\"{{Foo}}\":\"{{Name}}\"")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("text/json") > 0:
self.reset_request();
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
first_history_item.click()
try:
w = WebDriverWait(self.browser, 10)
w.until(lambda browser: self.browser.find_element_by_id("url").get_attribute("value") == "http://localhost:5000/post")
selected_mode_element = self.browser.find_element_by_id("body-editor-mode-item-selected")
selected_mode_element_value = self.browser.execute_script("return arguments[0].innerHTML", selected_mode_element)
if selected_mode_element_value.find("JSON") == 0:
return True
else:
return False
except:
return False
else:
return False
def test_19_raw_xml_type(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
self.browser.find_element_by_id("headers-keyvaleditor-actions-open").click()
time.sleep(0.1)
self.browser.find_element_by_id("headers-keyvaleditor-actions-open").click()
time.sleep(0.1)
first_key = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-key")
first_key.clear()
first_key.send_keys("Content-Type")
first_val = self.browser.find_element_by_css_selector("#headers-keyvaleditor .keyvalueeditor-row:first-child .keyvalueeditor-value")
first_val.clear()
first_val.send_keys("text/xml")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
self.set_code_mirror_raw_value("{\"{{Foo}}\":\"{{Name}}\"")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("text/xml") > 0:
self.reset_request();
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
first_history_item.click()
try:
w = WebDriverWait(self.browser, 10)
w.until(lambda browser: self.browser.find_element_by_id("url").get_attribute("value") == "http://localhost:5000/post")
selected_mode_element = self.browser.find_element_by_id("body-editor-mode-item-selected")
selected_mode_element_value = self.browser.execute_script("return arguments[0].innerHTML", selected_mode_element)
if selected_mode_element_value.find("XML") == 0:
return True
else:
return False
except:
return False
else:
return False
def na_test_20_raw_large_request(self):
self.reset_request()
self.set_url_field(self.browser, "http://localhost:5000/post")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("POST")
self.browser.find_element_by_css_selector("#data-mode-selector a:nth-of-type(3)").click()
try:
raw_json = open("large_json.json").read()
self.set_code_mirror_raw_value(raw_json)
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("images/user_1.png") > 0:
return True
else:
return False
except:
print traceback.format_exc()
return False
PostmanTestsRequests().run()
|
apache-2.0
|
camradal/ansible
|
lib/ansible/modules/network/sros/sros_rollback.py
|
21
|
7400
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: sros_rollback
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Configure Nokia SR OS rollback
description:
- Configure the rollback feature on remote Nokia devices running
the SR OS operating system. this module provides a stateful
implementation for managing the configuration of the rollback
feature
extends_documentation_fragment: sros
options:
rollback_location:
description:
- The I(rollback_location) specifies the location and filename
of the rollback checkpoint files. This argument supports any
valid local or remote URL as specified in SR OS
required: false
default: null
remote_max_checkpoints:
description:
- The I(remote_max_checkpoints) argument configures the maximum
number of rollback files that can be transferred and saved to
a remote location. Valid values for this argument are in the
range of 1 to 50
required: false
default: null
local_max_checkpoints:
description:
- The I(local_max_checkpoints) argument configures the maximum
number of rollback files that can be saved on the devices local
compact flash. Valid values for this argument are in the range
of 1 to 50
required: false
default: null
rescue_location:
description:
- The I(rescue_location) specifies the location of the
rescue file. This argument supports any valid local
or remote URL as specified in SR OS
required: false
default: null
state:
description:
- The I(state) argument specifies the state of the configuration
entries in the devices active configuration. When the state
value is set to C(true) the configuration is present in the
devices active configuration. When the state value is set to
C(false) the configuration values are removed from the devices
active configuration.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
- name: configure rollback location
sros_rollback:
rollback_location: "cb3:/ansible"
provider: "{{ cli }}"
- name: remove all rollback configuration
sros_rollback:
state: absent
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.sros import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def sanitize_config(lines):
commands = list()
for line in lines:
for index, entry in enumerate(commands):
if line.startswith(entry):
del commands[index]
break
commands.append(line)
return commands
def present(module, commands):
setters = set()
for key, value in module.argument_spec.items():
if module.params[key] is not None:
setter = value.get('setter') or 'set_%s' % key
if setter not in setters:
setters.add(setter)
invoke(setter, module, commands)
def absent(module, commands):
config = module.config.get_config()
if 'rollback-location' in config:
commands.append('configure system rollback no rollback-location')
if 'rescue-location' in config:
commands.append('configure system rollback no rescue-location')
if 'remote-max-checkpoints' in config:
commands.append('configure system rollback no remote-max-checkpoints')
if 'local-max-checkpoints' in config:
commands.append('configure system rollback no remote-max-checkpoints')
def set_rollback_location(module, commands):
value = module.params['rollback_location']
commands.append('configure system rollback rollback-location "%s"' % value)
def set_local_max_checkpoints(module, commands):
value = module.params['local_max_checkpoints']
if not 1 <= value <= 50:
module.fail_json(msg='local_max_checkpoints must be between 1 and 50')
commands.append('configure system rollback local-max-checkpoints %s' % value)
def set_remote_max_checkpoints(module, commands):
value = module.params['remote_max_checkpoints']
if not 1 <= value <= 50:
module.fail_json(msg='remote_max_checkpoints must be between 1 and 50')
commands.append('configure system rollback remote-max-checkpoints %s' % value)
def set_rescue_location(module, commands):
value = module.params['rescue_location']
commands.append('configure system rollback rescue-location "%s"' % value)
def get_config(module):
contents = module.config.get_config()
return NetworkConfig(device_os='sros', contents=contents)
def load_config(module, commands, result):
candidate = NetworkConfig(device_os='sros', contents='\n'.join(commands))
config = get_config(module)
configobjs = candidate.difference(config)
if configobjs:
commands = dumps(configobjs, 'lines')
commands = sanitize_config(commands.split('\n'))
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
module.config(commands)
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
rollback_location=dict(),
local_max_checkpoints=dict(type='int'),
remote_max_checkpoints=dict(type='int'),
rescue_location=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
supports_check_mode=True)
state = module.params['state']
result = dict(changed=False)
commands = list()
invoke(state, module, commands)
try:
load_config(module, commands, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
syhost/android_kernel_oppo_find7a
|
tools/perf/util/setup.py
|
4998
|
1330
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
gpl-2.0
|
pra85/calibre
|
src/cherrypy/_cpchecker.py
|
87
|
14739
|
import os
import warnings
import cherrypy
from cherrypy._cpcompat import iteritems, copykeys, builtins
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
When this object is called at engine startup, it executes each
of its own methods whose names start with ``check_``. If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False::
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace ``check_*`` methods in this way.
"""
on = True
"""If True (the default), run all checks; if False, turn off all checks."""
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and hasattr(method, '__call__'):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
"""Check for Application config with sections that repeat script_name."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip("/").split("/")
for key in app.config.keys():
key_atoms = key.strip("/").split("/")
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
"The application mounted at %r has config " \
"entries that start with its script name: %r" % (sn, key))
def check_site_config_entries_in_app_config(self):
"""Check for mounted Applications that have site-scoped config."""
for sn, app in iteritems(cherrypy.tree.apps):
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in iteritems(app.config):
if section.startswith('/'):
for key, value in iteritems(entries):
for n in ("engine.", "server.", "tree.", "checker."):
if key.startswith(n):
msg.append("[%s] %s = %s" % (section, key, value))
if msg:
msg.insert(0,
"The application mounted at %r contains the following "
"config entries, which are only allowed in site-wide "
"config. Move them to a [global] section and pass them "
"to cherrypy.config.update() instead of tree.mount()." % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
"""Check for mounted Applications that have no config."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_app_config_brackets(self):
"""Check for Application config with extraneous brackets in section names."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith("[") or key.endswith("]"):
warnings.warn(
"The application mounted at %r has config " \
"section names with extraneous brackets: %r. "
"Config *files* need brackets; config *dicts* "
"(e.g. passed to tree.mount) do not." % (sn, key))
def check_static_paths(self):
"""Check Application config for incorrect static paths."""
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(copykeys(app.toolboxes))
ns.extend(copykeys(app.namespaces))
ns.extend(copykeys(app.request_class.namespaces))
ns.extend(copykeys(cherrypy.config.namespaces))
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.items():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
|
gpl-3.0
|
Kazade/NeHe-Website
|
google_appengine/lib/pyasn1/pyasn1/codec/ber/decoder.py
|
15
|
32174
|
# BER decoder
from pyasn1.type import tag, base, univ, char, useful, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat.octets import oct2int, octs2ints
from pyasn1 import debug, error
class AbstractDecoder:
protoComponent = None
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,))
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,))
class AbstractSimpleDecoder(AbstractDecoder):
def _createComponent(self, asn1Spec, tagSet, value=None):
if asn1Spec is None:
return self.protoComponent.clone(value, tagSet)
elif value is None:
return asn1Spec
else:
return asn1Spec.clone(value)
class AbstractConstructedDecoder(AbstractDecoder):
def _createComponent(self, asn1Spec, tagSet, value=None):
if asn1Spec is None:
return self.protoComponent.clone(tagSet)
else:
return asn1Spec.clone()
class EndOfOctetsDecoder(AbstractSimpleDecoder):
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
return eoo.endOfOctets, substrate[:length]
class ExplicitTagDecoder(AbstractSimpleDecoder):
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
return decodeFun(substrate[:length], asn1Spec, tagSet, length)
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
value, substrate = decodeFun(substrate, asn1Spec, tagSet, length)
terminator, substrate = decodeFun(substrate)
if terminator == eoo.endOfOctets:
return value, substrate
else:
raise error.PyAsn1Error('Missing end-of-octets terminator')
explicitTagDecoder = ExplicitTagDecoder()
class IntegerDecoder(AbstractSimpleDecoder):
protoComponent = univ.Integer(0)
precomputedValues = {
'\x00': 0,
'\x01': 1,
'\x02': 2,
'\x03': 3,
'\x04': 4,
'\x05': 5,
'\x06': 6,
'\x07': 7,
'\x08': 8,
'\x09': 9,
'\xff': -1,
'\xfe': -2,
'\xfd': -3,
'\xfc': -4,
'\xfb': -5
}
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun):
substrate = substrate[:length]
if not substrate:
raise error.PyAsn1Error('Empty substrate')
if substrate in self.precomputedValues:
value = self.precomputedValues[substrate]
else:
firstOctet = oct2int(substrate[0])
if firstOctet & 0x80:
value = -1
else:
value = 0
for octet in substrate:
value = value << 8 | oct2int(octet)
return self._createComponent(asn1Spec, tagSet, value), substrate
class BooleanDecoder(IntegerDecoder):
protoComponent = univ.Boolean(0)
def _createComponent(self, asn1Spec, tagSet, value=None):
return IntegerDecoder._createComponent(self, asn1Spec, tagSet, value and 1 or 0)
class BitStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.BitString(())
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun):
substrate = substrate[:length]
if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
if not substrate:
raise error.PyAsn1Error('Missing initial octet')
trailingBits = oct2int(substrate[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
substrate = substrate[1:]
lsb = p = 0; l = len(substrate)-1; b = ()
while p <= l:
if p == l:
lsb = trailingBits
j = 7
o = oct2int(substrate[p])
while j >= lsb:
b = b + ((o>>j)&0x01,)
j = j - 1
p = p + 1
return self._createComponent(asn1Spec, tagSet, b), ''
r = self._createComponent(asn1Spec, tagSet, ())
if not decodeFun:
return r, substrate
while substrate:
component, substrate = decodeFun(substrate)
r = r + component
return r, substrate
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
r = self._createComponent(asn1Spec, tagSet, '')
if not decodeFun:
return r, substrate
while substrate:
component, substrate = decodeFun(substrate)
if component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
class OctetStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.OctetString('')
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun):
substrate = substrate[:length]
if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
return self._createComponent(asn1Spec, tagSet, substrate), ''
r = self._createComponent(asn1Spec, tagSet, '')
if not decodeFun:
return r, substrate
while substrate:
component, substrate = decodeFun(substrate)
r = r + component
return r, substrate
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
r = self._createComponent(asn1Spec, tagSet, '')
if not decodeFun:
return r, substrate
while substrate:
component, substrate = decodeFun(substrate)
if component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
class NullDecoder(AbstractSimpleDecoder):
protoComponent = univ.Null('')
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
substrate = substrate[:length]
r = self._createComponent(asn1Spec, tagSet)
if length:
raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
return r, substrate
class ObjectIdentifierDecoder(AbstractSimpleDecoder):
protoComponent = univ.ObjectIdentifier(())
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun):
substrate = substrate[:length]
if not substrate:
raise error.PyAsn1Error('Empty substrate')
# Get the first subid
subId = oct2int(substrate[0])
oid = divmod(subId, 40)
index = 1
substrateLen = len(substrate)
while index < substrateLen:
subId = oct2int(substrate[index])
index = index + 1
if subId > 127:
# Construct subid from a number of octets
nextSubId = subId
subId = 0
while nextSubId >= 128:
subId = (subId << 7) + (nextSubId & 0x7F)
if index >= substrateLen:
raise error.SubstrateUnderrunError(
'Short substrate for sub-OID past %s' % (oid,)
)
nextSubId = oct2int(substrate[index])
index = index + 1
subId = (subId << 7) + nextSubId
oid = oid + (subId,)
return self._createComponent(asn1Spec, tagSet, oid), substrate[index:]
class RealDecoder(AbstractSimpleDecoder):
protoComponent = univ.Real()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
substrate = substrate[:length]
if not length:
raise error.SubstrateUnderrunError('Short substrate for Real')
fo = oct2int(substrate[0]); substrate = substrate[1:]
if fo & 0x40: # infinite value
value = fo & 0x01 and '-inf' or 'inf'
elif fo & 0x80: # binary enoding
if fo & 0x11 == 0:
n = 1
elif fo & 0x01:
n = 2
elif fo & 0x02:
n = 3
else:
n = oct2int(substrate[0])
eo, substrate = substrate[:n], substrate[n:]
if not eo or not substrate:
raise error.PyAsn1Error('Real exponent screwed')
e = 0
while eo: # exponent
e <<= 8
e |= oct2int(eo[0])
eo = eo[1:]
p = 0
while substrate: # value
p <<= 8
p |= oct2int(substrate[0])
substrate = substrate[1:]
if fo & 0x40: # sign bit
p = -p
value = (p, 2, e)
elif fo & 0xc0 == 0: # character encoding
try:
if fo & 0x3 == 0x1: # NR1
value = (int(substrate), 10, 0)
elif fo & 0x3 == 0x2: # NR2
value = float(substrate)
elif fo & 0x3 == 0x3: # NR3
value = float(substrate)
else:
raise error.SubstrateUnderrunError(
'Unknown NR (tag %s)' % fo
)
except ValueError:
raise error.SubstrateUnderrunError(
'Bad character Real syntax'
)
elif fo & 0xc0 == 0x40: # special real value
pass
else:
raise error.SubstrateUnderrunError(
'Unknown encoding (tag %s)' % fo
)
return self._createComponent(asn1Spec, tagSet, value), substrate
class SequenceDecoder(AbstractConstructedDecoder):
protoComponent = univ.Sequence()
def _getComponentTagMap(self, r, idx):
try:
return r.getComponentTagMapNearPosition(idx)
except error.PyAsn1Error:
return
def _getComponentPositionByType(self, r, t, idx):
return r.getComponentPositionNearType(t, idx)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
substrate = substrate[:length]
r = self._createComponent(asn1Spec, tagSet)
idx = 0
if not decodeFun:
return r, substrate
while substrate:
asn1Spec = self._getComponentTagMap(r, idx)
component, substrate = decodeFun(
substrate, asn1Spec
)
idx = self._getComponentPositionByType(
r, component.getEffectiveTagSet(), idx
)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
r.setDefaultComponents()
r.verifySizeSpec()
return r, substrate
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
r = self._createComponent(asn1Spec, tagSet)
idx = 0
while substrate:
asn1Spec = self._getComponentTagMap(r, idx)
if not decodeFun:
return r, substrate
component, substrate = decodeFun(substrate, asn1Spec)
if component == eoo.endOfOctets:
break
idx = self._getComponentPositionByType(
r, component.getEffectiveTagSet(), idx
)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
r.setDefaultComponents()
r.verifySizeSpec()
return r, substrate
class SequenceOfDecoder(AbstractConstructedDecoder):
protoComponent = univ.SequenceOf()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
substrate = substrate[:length]
r = self._createComponent(asn1Spec, tagSet)
asn1Spec = r.getComponentType()
idx = 0
if not decodeFun:
return r, substrate
while substrate:
component, substrate = decodeFun(
substrate, asn1Spec
)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
r.verifySizeSpec()
return r, substrate
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
r = self._createComponent(asn1Spec, tagSet)
asn1Spec = r.getComponentType()
idx = 0
if not decodeFun:
return r, substrate
while substrate:
component, substrate = decodeFun(substrate, asn1Spec)
if component == eoo.endOfOctets:
break
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
r.verifySizeSpec()
return r, substrate
class SetDecoder(SequenceDecoder):
protoComponent = univ.Set()
def _getComponentTagMap(self, r, idx):
return r.getComponentTagMap()
def _getComponentPositionByType(self, r, t, idx):
nextIdx = r.getComponentPositionByType(t)
if nextIdx is None:
return idx
else:
return nextIdx
class SetOfDecoder(SequenceOfDecoder):
protoComponent = univ.SetOf()
class ChoiceDecoder(AbstractConstructedDecoder):
protoComponent = univ.Choice()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
substrate = substrate[:length]
r = self._createComponent(asn1Spec, tagSet)
if not decodeFun:
return r, substrate
if r.getTagSet() == tagSet: # explicitly tagged Choice
component, substrate = decodeFun(
substrate, r.getComponentTagMap()
)
else:
component, substrate = decodeFun(
substrate, r.getComponentTagMap(), tagSet, length, state
)
if isinstance(component, univ.Choice):
effectiveTagSet = component.getEffectiveTagSet()
else:
effectiveTagSet = component.getTagSet()
r.setComponentByType(effectiveTagSet, component, 0, asn1Spec is None)
return r, substrate
indefLenValueDecoder = valueDecoder
class AnyDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
if asn1Spec is None or \
asn1Spec is not None and tagSet != asn1Spec.getTagSet():
# untagged Any container, recover inner header substrate
length = length + len(fullSubstrate) - len(substrate)
substrate = fullSubstrate
substrate = substrate[:length]
return self._createComponent(asn1Spec, tagSet, value=substrate), ''
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun):
if asn1Spec is not None and tagSet == asn1Spec.getTagSet():
# tagged Any type -- consume header substrate
header = ''
else:
# untagged Any, recover header substrate
header = fullSubstrate[:-len(substrate)]
r = self._createComponent(asn1Spec, tagSet, header)
# Any components do not inherit initial tag
asn1Spec = self.protoComponent
if not decodeFun:
return r, substrate
while substrate:
component, substrate = decodeFun(substrate, asn1Spec)
if component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
# character string types
class UTF8StringDecoder(OctetStringDecoder):
protoComponent = char.UTF8String()
class NumericStringDecoder(OctetStringDecoder):
protoComponent = char.NumericString()
class PrintableStringDecoder(OctetStringDecoder):
protoComponent = char.PrintableString()
class TeletexStringDecoder(OctetStringDecoder):
protoComponent = char.TeletexString()
class VideotexStringDecoder(OctetStringDecoder):
protoComponent = char.VideotexString()
class IA5StringDecoder(OctetStringDecoder):
protoComponent = char.IA5String()
class GraphicStringDecoder(OctetStringDecoder):
protoComponent = char.GraphicString()
class VisibleStringDecoder(OctetStringDecoder):
protoComponent = char.VisibleString()
class GeneralStringDecoder(OctetStringDecoder):
protoComponent = char.GeneralString()
class UniversalStringDecoder(OctetStringDecoder):
protoComponent = char.UniversalString()
class BMPStringDecoder(OctetStringDecoder):
protoComponent = char.BMPString()
# "useful" types
class GeneralizedTimeDecoder(OctetStringDecoder):
protoComponent = useful.GeneralizedTime()
class UTCTimeDecoder(OctetStringDecoder):
protoComponent = useful.UTCTime()
tagMap = {
eoo.endOfOctets.tagSet: EndOfOctetsDecoder(),
univ.Integer.tagSet: IntegerDecoder(),
univ.Boolean.tagSet: BooleanDecoder(),
univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: OctetStringDecoder(),
univ.Null.tagSet: NullDecoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierDecoder(),
univ.Enumerated.tagSet: IntegerDecoder(),
univ.Real.tagSet: RealDecoder(),
univ.Sequence.tagSet: SequenceDecoder(), # conflicts with SequenceOf
univ.Set.tagSet: SetDecoder(), # conflicts with SetOf
univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
# character string types
char.UTF8String.tagSet: UTF8StringDecoder(),
char.NumericString.tagSet: NumericStringDecoder(),
char.PrintableString.tagSet: PrintableStringDecoder(),
char.TeletexString.tagSet: TeletexStringDecoder(),
char.VideotexString.tagSet: VideotexStringDecoder(),
char.IA5String.tagSet: IA5StringDecoder(),
char.GraphicString.tagSet: GraphicStringDecoder(),
char.VisibleString.tagSet: VisibleStringDecoder(),
char.GeneralString.tagSet: GeneralStringDecoder(),
char.UniversalString.tagSet: UniversalStringDecoder(),
char.BMPString.tagSet: BMPStringDecoder(),
# useful types
useful.GeneralizedTime.tagSet: GeneralizedTimeDecoder(),
useful.UTCTime.tagSet: UTCTimeDecoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SetDecoder(),
univ.SetOf.typeId: SetOfDecoder(),
univ.Sequence.typeId: SequenceDecoder(),
univ.SequenceOf.typeId: SequenceOfDecoder(),
univ.Choice.typeId: ChoiceDecoder(),
univ.Any.typeId: AnyDecoder()
}
( stDecodeTag, stDecodeLength, stGetValueDecoder, stGetValueDecoderByAsn1Spec,
stGetValueDecoderByTag, stTryAsExplicitTag, stDecodeValue,
stDumpRawValue, stErrorCondition, stStop ) = [x for x in range(10)]
class Decoder:
defaultErrorState = stErrorCondition
# defaultErrorState = stDumpRawValue
defaultRawDecoder = AnyDecoder()
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
self.__endOfOctetsTagSet = eoo.endOfOctets.getTagSet()
# Tag & TagSet objects caches
self.__tagCache = {}
self.__tagSetCache = {}
def __call__(self, substrate, asn1Spec=None, tagSet=None,
length=None, state=stDecodeTag, recursiveFlag=1):
debug.logger & debug.flagDecoder and debug.logger('decoder called with state %d, working with up to %d octets of substrate: %s' % (state, len(substrate), debug.hexdump(substrate)))
fullSubstrate = substrate
while state != stStop:
if state == stDecodeTag:
# Decode tag
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on tag decoding'
)
firstOctet = substrate[0]
substrate = substrate[1:]
if firstOctet in self.__tagCache:
lastTag = self.__tagCache[firstOctet]
else:
t = oct2int(firstOctet)
tagClass = t&0xC0
tagFormat = t&0x20
tagId = t&0x1F
if tagId == 0x1F:
tagId = 0
while 1:
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on long tag decoding'
)
t = oct2int(substrate[0])
tagId = tagId << 7 | (t&0x7F)
substrate = substrate[1:]
if not t&0x80:
break
lastTag = tag.Tag(
tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
)
if tagId < 31:
# cache short tags
self.__tagCache[firstOctet] = lastTag
if tagSet is None:
if firstOctet in self.__tagSetCache:
tagSet = self.__tagSetCache[firstOctet]
else:
# base tag not recovered
tagSet = tag.TagSet((), lastTag)
if firstOctet in self.__tagCache:
self.__tagSetCache[firstOctet] = tagSet
else:
tagSet = lastTag + tagSet
state = stDecodeLength
debug.logger and debug.logger & debug.flagDecoder and debug.logger('tag decoded into %r, decoding length' % tagSet)
if state == stDecodeLength:
# Decode length
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on length decoding'
)
firstOctet = oct2int(substrate[0])
if firstOctet == 128:
size = 1
length = -1
elif firstOctet < 128:
length, size = firstOctet, 1
else:
size = firstOctet & 0x7F
# encoded in size bytes
length = 0
lengthString = substrate[1:size+1]
# missing check on maximum size, which shouldn't be a
# problem, we can handle more than is possible
if len(lengthString) != size:
raise error.SubstrateUnderrunError(
'%s<%s at %s' %
(size, len(lengthString), tagSet)
)
for char in lengthString:
length = (length << 8) | oct2int(char)
size = size + 1
substrate = substrate[size:]
if length != -1 and len(substrate) < length:
raise error.SubstrateUnderrunError(
'%d-octet short' % (length - len(substrate))
)
state = stGetValueDecoder
debug.logger and debug.logger & debug.flagDecoder and debug.logger('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(substrate)))
if state == stGetValueDecoder:
if asn1Spec is None:
state = stGetValueDecoderByTag
else:
state = stGetValueDecoderByAsn1Spec
#
# There're two ways of creating subtypes in ASN.1 what influences
# decoder operation. These methods are:
# 1) Either base types used in or no IMPLICIT tagging has been
# applied on subtyping.
# 2) Subtype syntax drops base type information (by means of
# IMPLICIT tagging.
# The first case allows for complete tag recovery from substrate
# while the second one requires original ASN.1 type spec for
# decoding.
#
# In either case a set of tags (tagSet) is coming from substrate
# in an incremental, tag-by-tag fashion (this is the case of
# EXPLICIT tag which is most basic). Outermost tag comes first
# from the wire.
#
if state == stGetValueDecoderByTag:
if tagSet in self.__tagMap:
concreteDecoder = self.__tagMap[tagSet]
else:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
_k = tagSet[:1]
if _k in self.__tagMap:
concreteDecoder = self.__tagMap[_k]
else:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
state = stTryAsExplicitTag
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'explicit tag'))
if state == stGetValueDecoderByAsn1Spec:
if isinstance(asn1Spec, (dict, tagmap.TagMap)):
if tagSet in asn1Spec:
__chosenSpec = asn1Spec[tagSet]
else:
__chosenSpec = None
else:
__chosenSpec = asn1Spec
if __chosenSpec is not None and (
tagSet == __chosenSpec.getTagSet() or \
tagSet in __chosenSpec.getTagMap()
):
# use base type for codec lookup to recover untagged types
baseTagSet = __chosenSpec.baseTagSet
if __chosenSpec.typeId is not None and \
__chosenSpec.typeId in self.__typeMap:
# ambiguous type
concreteDecoder = self.__typeMap[__chosenSpec.typeId]
elif baseTagSet in self.__tagMap:
# base type or tagged subtype
concreteDecoder = self.__tagMap[baseTagSet]
else:
concreteDecoder = None
if concreteDecoder:
asn1Spec = __chosenSpec
state = stDecodeValue
else:
state = stTryAsExplicitTag
elif tagSet == self.__endOfOctetsTagSet:
concreteDecoder = self.__tagMap[tagSet]
state = stDecodeValue
else:
state = stTryAsExplicitTag
if debug.logger and debug.logger & debug.flagDecoder:
if isinstance(asn1Spec, base.Asn1Item):
debug.logger('choosing value codec by ASN.1 spec:\n %r -> %r' % (asn1Spec.getTagSet(), asn1Spec.__class__.__name__))
else:
debug.logger('choosing value codec by ASN.1 spec that offers either of the following: ')
for t, v in asn1Spec.getPosMap().items():
debug.logger(' %r -> %s' % (t, v.__class__.__name__))
debug.logger('but neither of: ')
for i in asn1Spec.getNegMap().items():
debug.logger(' %r -> %s' % (t, v.__class__.__name__))
debug.logger('codec %s chosen by ASN.1 spec, decoding %s' % (state == stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'explicit tag'))
if state == stTryAsExplicitTag:
if tagSet and \
tagSet[0][1] == tag.tagFormatConstructed and \
tagSet[0][0] != tag.tagClassUniversal:
# Assume explicit tagging
concreteDecoder = explicitTagDecoder
state = stDecodeValue
else:
state = self.defaultErrorState
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'as failure'))
if state == stDumpRawValue:
concreteDecoder = self.defaultRawDecoder
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
state = stDecodeValue
if state == stDecodeValue:
if recursiveFlag:
decodeFun = self
else:
decodeFun = None
if length == -1: # indef length
value, substrate = concreteDecoder.indefLenValueDecoder(
fullSubstrate, substrate, asn1Spec, tagSet, length,
stGetValueDecoder, decodeFun
)
else:
value, _substrate = concreteDecoder.valueDecoder(
fullSubstrate, substrate, asn1Spec, tagSet, length,
stGetValueDecoder, decodeFun
)
if recursiveFlag:
substrate = substrate[length:]
else:
substrate = _substrate
state = stStop
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, value.prettyPrint(), substrate and debug.hexdump(substrate) or '<none>'))
if state == stErrorCondition:
raise error.PyAsn1Error(
'%r not in asn1Spec: %r' % (tagSet, asn1Spec)
)
debug.logger and debug.logger & debug.flagDecoder and debug.logger('decoder call completed')
return value, substrate
decode = Decoder(tagMap, typeMap)
# XXX
# non-recursive decoding; return position rather than substrate
|
bsd-3-clause
|
AyoubZahid/odoo
|
addons/purchase/migrations/9.0.1.2/pre-create-properties.py
|
101
|
1408
|
# -*- coding: utf-8 -*-
def convert_field(cr, model, field, target_model):
table = model.replace('.', '_')
cr.execute("""SELECT 1
FROM information_schema.columns
WHERE table_name = %s
AND column_name = %s
""", (table, field))
if not cr.fetchone():
return
cr.execute("SELECT id FROM ir_model_fields WHERE model=%s AND name=%s", (model, field))
[fields_id] = cr.fetchone()
cr.execute("""
INSERT INTO ir_property(name, type, fields_id, company_id, res_id, value_reference)
SELECT %(field)s, 'many2one', %(fields_id)s, company_id, CONCAT('{model},', id),
CONCAT('{target_model},', {field})
FROM {table} t
WHERE {field} IS NOT NULL
AND NOT EXISTS(SELECT 1
FROM ir_property
WHERE fields_id=%(fields_id)s
AND company_id=t.company_id
AND res_id=CONCAT('{model},', t.id))
""".format(**locals()), locals())
cr.execute('ALTER TABLE "{0}" DROP COLUMN "{1}" CASCADE'.format(table, field))
def migrate(cr, version):
convert_field(cr, 'res.partner', 'property_purchase_currency_id', 'res.currency')
convert_field(cr, 'product.template',
'property_account_creditor_price_difference', 'account.account')
|
gpl-3.0
|
seleniumbase/SeleniumBase
|
examples/chart_maker/pie_charts.py
|
1
|
2140
|
from seleniumbase import BaseCase
class PieCharts(BaseCase):
def test_pie_charts(self):
self.create_presentation(theme="serif", transition="convex")
self.create_pie_chart(labels=False)
self.add_data_point("Meringue Cream", 3, color="#f1eeea")
self.add_data_point("Lemon Filling", 3, color="#e9d655")
self.add_data_point("Graham Cracker Crust", 1, color="#9d5b34")
self.add_slide("<p>Lemon Meringue Pie</p>" + self.extract_chart())
self.create_pie_chart(labels=False)
self.add_data_point("Blueberries", 1, color="#5c81b7")
self.add_data_point("Blueberry Filling", 2, color="#12405e")
self.add_data_point("Golden Brown Crust", 1, color="#cd7b54")
self.add_slide("<p>Blueberry Pie</p>" + self.extract_chart())
self.create_pie_chart(labels=False)
self.add_data_point("Strawberries", 1, color="#ff282c")
self.add_data_point("Kiwis", 1, color="#a9c208")
self.add_data_point("Apricots", 1, color="#f47a14")
self.add_data_point("Raspberries", 1, color="#b10019")
self.add_data_point("Black Berries", 1, color="#44001e")
self.add_data_point("Blueberries", 1, color="#5c81b7")
self.add_data_point("Custard", 3, color="#eee896")
self.add_data_point("Golden Crust", 4, color="#dca422")
self.add_slide("<p>Fruit Tart Pie</p>" + self.extract_chart())
self.create_pie_chart(labels=False)
self.add_data_point("Apple Crust", 4, color="#b66327")
self.add_data_point("Apple Filling", 5, color="#c5903e")
self.add_data_point("Cinnamon", 1, color="#76210d")
self.add_data_point("Whipped Cream", 2, color="#f2f2f2")
self.add_slide("<p>Apple Pie</p>" + self.extract_chart())
self.create_pie_chart(labels=False)
self.add_data_point("Sponge Cake", 4, color="#e0d5a0")
self.add_data_point("Custard", 3, color="#eee896")
self.add_data_point("Chocolate", 1, color="#5c3625")
self.add_slide("<p>Boston Cream Pie</p>" + self.extract_chart())
self.begin_presentation(filename="pie_charts.html")
|
mit
|
sam-m888/gramps
|
gramps/gui/editors/displaytabs/buttontab.py
|
5
|
12717
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2009-2011 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python classes
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
#-------------------------------------------------------------------------
#
# Gramps classes
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from ...widgets import SimpleButton
from .grampstab import GrampsTab
from gramps.gen.errors import WindowActiveError
from ...utils import match_primary_mask
_KP_ENTER = Gdk.keyval_from_name("KP_Enter")
_RETURN = Gdk.keyval_from_name("Return")
_DEL = Gdk.keyval_from_name("Delete")
_ADD = Gdk.keyval_from_name("Insert")
_OPEN = Gdk.keyval_from_name("o")
_LEFT = Gdk.keyval_from_name("Left")
_RIGHT = Gdk.keyval_from_name("Right")
#-------------------------------------------------------------------------
#
# Classes
#
#-------------------------------------------------------------------------
class ButtonTab(GrampsTab):
"""
This class derives from the base GrampsTab, yet is not a usable Tab. It
serves as another base tab for classes which need an Add/Edit/Remove button
combination.
"""
_MSG = {
'add' : _('Add'),
'del' : _('Remove'),
'edit' : _('Edit'),
'share' : _('Share'),
'jump' : _('Jump To'),
'up' : _('Move Up'),
'down' : _('Move Down'),
'left' : _('Move Left'),
'right' : _('Move right')
}
L_R = 2 # indicator for left/right move buttons
def __init__(self, dbstate, uistate, track, name, share_button=False,
move_buttons=False, jump_button=False, top_label=None):
"""
Similar to the base class, except after Build.
@param dbstate: The database state. Contains a reference to
the database, along with other state information. The GrampsTab
uses this to access the database and to pass to and created
child windows (such as edit dialogs).
@type dbstate: DbState
@param uistate: The UI state. Used primarily to pass to any created
subwindows.
@type uistate: DisplayState
@param track: The window tracking mechanism used to manage windows.
This is only used to pass to generted child windows.
@type track: list
@param name: Notebook label name
@type name: str/unicode
@param share_button: Add a share button to the Notebook tab or not
@type name: bool
@param move_buttons: Add up and down button to the Notebook tab or not
@type name: bool
@param jump_button: Add a goto button
@type name: bool
@param top_label: Add a label in front of the buttons if given
@type top_label: string or None for no label
"""
self.dirty_selection = False
GrampsTab.__init__(self,dbstate, uistate, track, name)
self._create_buttons(share_button, move_buttons, jump_button, top_label)
def _create_buttons(self, share_button, move_buttons, jump_button,
top_label):
"""
Create a button box consisting of three buttons, one for Add,
one for Edit, and one for Delete.
Add buttons for Share, Move and Jump depending on parameters. This
button box is then appended hbox (self).
Prepend a label if top_label given
Note: some ButtonTab subclasses override this method.
"""
if top_label:
self.top_label = Gtk.Label(label=top_label)
self.top_label.set_use_markup(True)
self.track_ref_for_deletion("top_label")
self.add_btn = SimpleButton('list-add', self.add_button_clicked)
self.edit_btn = SimpleButton('gtk-edit', self.edit_button_clicked)
self.del_btn = SimpleButton('list-remove', self.del_button_clicked)
self.track_ref_for_deletion("add_btn")
self.track_ref_for_deletion("edit_btn")
self.track_ref_for_deletion("del_btn")
self.add_btn.set_tooltip_text(self._MSG['add'])
self.edit_btn.set_tooltip_text(self._MSG['edit'])
self.del_btn.set_tooltip_text(self._MSG['del'])
if share_button:
self.share_btn = SimpleButton('gtk-index', self.share_button_clicked)
self.share_btn.set_tooltip_text(self._MSG['share'])
self.track_ref_for_deletion("share_btn")
else:
self.share_btn = None
if move_buttons:
l_r = move_buttons == self.L_R
self.up_btn = SimpleButton('go-previous' if l_r else 'go-up',
self.up_button_clicked)
self.up_btn.set_tooltip_text(self._MSG['left' if l_r else 'up'])
self.down_btn = SimpleButton('go-next' if l_r else 'go-down',
self.down_button_clicked)
self.down_btn.set_tooltip_text(
self._MSG['right' if l_r else 'down'])
self.track_ref_for_deletion("up_btn")
self.track_ref_for_deletion("down_btn")
else:
self.up_btn = None
self.down_btn = None
if jump_button:
self.jump_btn = SimpleButton('go-jump', self.jump_button_clicked)
self.track_ref_for_deletion("jump_btn")
self.jump_btn.set_tooltip_text(self._MSG['jump'])
else:
self.jump_btn = None
hbox = Gtk.Box()
hbox.set_spacing(6)
if top_label:
hbox.pack_start(self.top_label, False, True, 0)
hbox.pack_start(self.add_btn, False, True, 0)
if share_button:
hbox.pack_start(self.share_btn, False, True, 0)
hbox.pack_start(self.edit_btn, False, True, 0)
hbox.pack_start(self.del_btn, False, True, 0)
if move_buttons:
hbox.pack_start(self.up_btn, False, True, 0)
hbox.pack_start(self.down_btn, False, True, 0)
if self.jump_btn:
hbox.pack_start(self.jump_btn, False, True, 0)
hbox.show_all()
self.pack_start(hbox, False, True, 0)
if self.dbstate.db.readonly:
self.add_btn.set_sensitive(False)
self.del_btn.set_sensitive(False)
if share_button:
self.share_btn.set_sensitive(False)
if jump_button and self.jump_btn:
self.jump_btn.set_sensitive(False)
if move_buttons:
self.up_btn.set_sensitive(False)
self.down_btn.set_sensitive(False)
def double_click(self, obj, event):
"""
Handles the double click on list. If the double click occurs,
the Edit button handler is called
"""
if (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS
and event.button == 1):
try:
self.edit_button_clicked(obj)
except WindowActiveError:
pass
def key_pressed(self, obj, event):
"""
Handles the return key being pressed on list. If the key is pressed,
the Edit button handler is called
"""
if event.type == Gdk.EventType.KEY_PRESS:
#print 'key pressed', event.keyval, event.get_state(), _ADD
if event.keyval in (_RETURN, _KP_ENTER):
try:
self.edit_button_clicked(obj)
except WindowActiveError:
pass
elif event.keyval in (_DEL,) and self.del_btn:
if self.dirty_selection or self.dbstate.db.readonly:
return
self.del_button_clicked(obj)
elif event.keyval in (_ADD,) and self.add_btn:
if self.dirty_selection or self.dbstate.db.readonly:
return
self.add_button_clicked(obj)
elif event.keyval in (_OPEN,) and self.share_btn and \
match_primary_mask(event.get_state()):
self.share_button_clicked(obj)
elif event.keyval in (_LEFT,) and \
(event.get_state() & Gdk.ModifierType.MOD1_MASK):
self.prev_page()
elif event.keyval in (_RIGHT,) and \
(event.get_state() & Gdk.ModifierType.MOD1_MASK):
self.next_page()
else:
return
return True
def add_button_clicked(self, obj):
"""
Function called with the Add button is clicked. This function
should be overridden by the derived class.
"""
print("Uncaught Add clicked")
def share_button_clicked(self, obj):
"""
Function called with the Share button is clicked. This function
should be overridden by the derived class.
"""
print("Uncaught Share clicked")
def jump_button_clicked(self, obj):
"""
Function called with the Jump button is clicked. This function
should be overridden by the derived class.
"""
print("Uncaught Jump clicked")
def del_button_clicked(self, obj):
"""
Function called with the Delete button is clicked. This function
should be overridden by the derived class.
"""
print("Uncaught Delete clicked")
def edit_button_clicked(self, obj):
"""
Function called with the Edit button is clicked or the double
click is caught. This function should be overridden by the derived
class.
"""
print("Uncaught Edit clicked")
def up_button_clicked(self, obj):
"""
Function called with the Up button is clicked.
This function should be overridden by the derived class.
"""
print("Uncaught Up clicked")
def down_button_clicked(self, obj):
"""
Function called with the Down button is clicked.
This function should be overridden by the derived class.
"""
print("Uncaught Down clicked")
def _selection_changed(self, obj=None):
"""
Attached to the selection's 'changed' signal. Checks
to see if anything is selected. If it is, the edit and
delete buttons are enabled, otherwise the are disabled.
"""
# Comparing to None is important, as empty strings
# and 0 can be returned
# This method is called as callback on change, and can be called
# explicitly, dirty_selection must make sure they do not interact
if self.dirty_selection:
return
if self.get_selected() is not None:
self.edit_btn.set_sensitive(True)
if self.jump_btn:
self.jump_btn.set_sensitive(True)
if not self.dbstate.db.readonly:
self.del_btn.set_sensitive(True)
# note: up and down cannot be set unsensitive after clicked
# or they do not respond to a next click
#if self.up_btn :
# self.up_btn.set_sensitive(True)
# self.down_btn.set_sensitive(True)
else:
self.edit_btn.set_sensitive(False)
if self.jump_btn:
self.jump_btn.set_sensitive(False)
if not self.dbstate.db.readonly:
self.del_btn.set_sensitive(False)
# note: up and down cannot be set unsensitive after clicked
# or they do not respond to a next click
#if self.up_btn :
# self.up_btn.set_sensitive(False)
# self.down_btn.set_sensitive(False)
|
gpl-2.0
|
glneo/gnuradio-davisaf
|
gr-audio/examples/python/noise.py
|
10
|
1929
|
#!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src = gr.glfsr_source_b(32) # Pseudorandom noise source
b2f = gr.chunks_to_symbols_bf([ampl, -ampl], 1)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src, b2f, dst)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
retr0h/ansible
|
lib/ansible/runner/lookup_plugins/nested.py
|
174
|
2285
|
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.utils as utils
from ansible.utils import safe_eval
import ansible.errors as errors
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
elif isinstance(term, tuple):
ret.extend(term)
else:
ret.append(term)
return ret
def combine(a,b):
results = []
for x in a:
for y in b:
results.append(flatten([x,y]))
return results
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def __lookup_injects(self, terms, inject):
results = []
for x in terms:
intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
results.append(intermediate)
return results
def run(self, terms, inject=None, **kwargs):
# this code is common with 'items.py' consider moving to utils if we need it again
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
terms = self.__lookup_injects(terms, inject)
my_list = terms[:]
my_list.reverse()
result = []
if len(my_list) == 0:
raise errors.AnsibleError("with_nested requires at least one element in the nested list")
result = my_list.pop()
while len(my_list) > 0:
result2 = combine(result, my_list.pop())
result = result2
new_result = []
for x in result:
new_result.append(flatten(x))
return new_result
|
gpl-3.0
|
vinodpanicker/scancode-toolkit
|
tests/cluecode/data/finder/email/thomas.py
|
12
|
10471
|
# This module is part of the Divmod project and is Copyright 2003 Amir Bakhtiar:
# [email protected]. This is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
import operator
import re
import math
from sets import Set
class BayesData(dict):
def __init__(self, name='', pool=None):
self.name = name
self.training = []
self.pool = pool
self.tokenCount = 0
self.trainCount = 0
def trainedOn(self, item):
return item in self.training
def __repr__(self):
return '<BayesDict: %s, %s tokens>' % (self.name, self.tokenCount)
class Bayes(object):
def __init__(self, tokenizer=None, combiner=None, dataClass=None):
if dataClass is None:
self.dataClass = BayesData
else:
self.dataClass = dataClass
self.corpus = self.dataClass('__Corpus__')
self.pools = {}
self.pools['__Corpus__'] = self.corpus
self.trainCount = 0
self.dirty = True
# The tokenizer takes an object and returns
# a list of strings
if tokenizer is None:
self._tokenizer = Tokenizer()
else:
self._tokenizer = tokenizer
# The combiner combines probabilities
if combiner is None:
self.combiner = self.robinson
else:
self.combiner = combiner
def commit(self):
self.save()
def newPool(self, poolName):
"""Create a new pool, without actually doing any
training.
"""
self.dirty = True # not always true, but it's simple
return self.pools.setdefault(poolName, self.dataClass(poolName))
def removePool(self, poolName):
del(self.pools[poolName])
self.dirty = True
def renamePool(self, poolName, newName):
self.pools[newName] = self.pools[poolName]
self.pools[newName].name = newName
self.removePool(poolName)
self.dirty = True
def mergePools(self, destPool, sourcePool):
"""Merge an existing pool into another.
The data from sourcePool is merged into destPool.
The arguments are the names of the pools to be merged.
The pool named sourcePool is left in tact and you may
want to call removePool() to get rid of it.
"""
sp = self.pools[sourcePool]
dp = self.pools[destPool]
for tok, count in sp.items():
if dp.get(tok):
dp[tok] += count
else:
dp[tok] = count
dp.tokenCount += 1
self.dirty = True
def poolData(self, poolName):
"""Return a list of the (token, count) tuples.
"""
return self.pools[poolName].items()
def poolTokens(self, poolName):
"""Return a list of the tokens in this pool.
"""
return [tok for tok, count in self.poolData(poolName)]
def save(self, fname='bayesdata.dat'):
from cPickle import dump
fp = open(fname, 'wb')
dump(self.pools, fp)
fp.close()
def load(self, fname='bayesdata.dat'):
from cPickle import load
fp = open(fname, 'rb')
self.pools = load(fp)
fp.close()
self.corpus = self.pools['__Corpus__']
self.dirty = True
def poolNames(self):
"""Return a sorted list of Pool names.
Does not include the system pool '__Corpus__'.
"""
pools = self.pools.keys()
pools.remove('__Corpus__')
pools = [pool for pool in pools]
pools.sort()
return pools
def buildCache(self):
""" merges corpora and computes probabilities
"""
self.cache = {}
for pname, pool in self.pools.items():
# skip our special pool
if pname == '__Corpus__':
continue
poolCount = pool.tokenCount
themCount = max(self.corpus.tokenCount - poolCount, 1)
cacheDict = self.cache.setdefault(pname, self.dataClass(pname))
for word, totCount in self.corpus.items():
# for every word in the copus
# check to see if this pool contains this word
thisCount = float(pool.get(word, 0.0))
if (thisCount == 0.0):
continue
otherCount = float(totCount) - thisCount
if not poolCount:
goodMetric = 1.0
else:
goodMetric = min(1.0, otherCount/poolCount)
badMetric = min(1.0, thisCount/themCount)
f = badMetric / (goodMetric + badMetric)
# PROBABILITY_THRESHOLD
if abs(f-0.5) >= 0.1 :
# GOOD_PROB, BAD_PROB
cacheDict[word] = max(0.0001, min(0.9999, f))
def poolProbs(self):
if self.dirty:
self.buildCache()
self.dirty = False
return self.cache
def getTokens(self, obj):
"""By default, we expect obj to be a screen and split
it on whitespace.
Note that this does not change the case.
In some applications you may want to lowecase everthing
so that "king" and "King" generate the same token.
Override this in your subclass for objects other
than text.
Alternatively, you can pass in a tokenizer as part of
instance creation.
"""
return self._tokenizer.tokenize(obj)
def getProbs(self, pool, words):
""" extracts the probabilities of tokens in a message
"""
probs = [(word, pool[word]) for word in words if word in pool]
probs.sort(lambda x,y: cmp(y[1],x[1]))
return probs[:2048]
def train(self, pool, item, uid=None):
"""Train Bayes by telling him that item belongs
in pool. uid is optional and may be used to uniquely
identify the item that is being trained on.
"""
tokens = self.getTokens(item)
pool = self.pools.setdefault(pool, self.dataClass(pool))
self._train(pool, tokens)
self.corpus.trainCount += 1
pool.trainCount += 1
if uid:
pool.training.append(uid)
self.dirty = True
def untrain(self, pool, item, uid=None):
tokens = self.getTokens(item)
pool = self.pools.get(pool, None)
if not pool:
return
self._untrain(pool, tokens)
# I guess we want to count this as additional training?
self.corpus.trainCount += 1
pool.trainCount += 1
if uid:
pool.training.remove(uid)
self.dirty = True
def _train(self, pool, tokens):
wc = 0
for token in tokens:
count = pool.get(token, 0)
pool[token] = count + 1
count = self.corpus.get(token, 0)
self.corpus[token] = count + 1
wc += 1
pool.tokenCount += wc
self.corpus.tokenCount += wc
def _untrain(self, pool, tokens):
for token in tokens:
count = pool.get(token, 0)
if count:
if count == 1:
del(pool[token])
else:
pool[token] = count - 1
pool.tokenCount -= 1
count = self.corpus.get(token, 0)
if count:
if count == 1:
del(self.corpus[token])
else:
self.corpus[token] = count - 1
self.corpus.tokenCount -= 1
def trainedOn(self, msg):
for p in self.cache.values():
if msg in p.training:
return True
return False
def guess(self, msg):
tokens = Set(self.getTokens(msg))
pools = self.poolProbs()
res = {}
for pname, pprobs in pools.items():
p = self.getProbs(pprobs, tokens)
if len(p) != 0:
res[pname]=self.combiner(p, pname)
res = res.items()
res.sort(lambda x,y: cmp(y[1], x[1]))
return res
def robinson(self, probs, ignore):
""" computes the probability of a message being spam (Robinson's method)
P = 1 - prod(1-p)^(1/n)
Q = 1 - prod(p)^(1/n)
S = (1 + (P-Q)/(P+Q)) / 2
Courtesy of http://christophe.delord.free.fr/en/index.html
"""
nth = 1./len(probs)
P = 1.0 - reduce(operator.mul, map(lambda p: 1.0-p[1], probs), 1.0) ** nth
Q = 1.0 - reduce(operator.mul, map(lambda p: p[1], probs)) ** nth
S = (P - Q) / (P + Q)
return (1 + S) / 2
def robinsonFisher(self, probs, ignore):
""" computes the probability of a message being spam (Robinson-Fisher method)
H = C-1( -2.ln(prod(p)), 2*n )
S = C-1( -2.ln(prod(1-p)), 2*n )
I = (1 + H - S) / 2
Courtesy of http://christophe.delord.free.fr/en/index.html
"""
n = len(probs)
try: H = chi2P(-2.0 * math.log(reduce(operator.mul, map(lambda p: p[1], probs), 1.0)), 2*n)
except OverflowError: H = 0.0
try: S = chi2P(-2.0 * math.log(reduce(operator.mul, map(lambda p: 1.0-p[1], probs), 1.0)), 2*n)
except OverflowError: S = 0.0
return (1 + H - S) / 2
def __repr__(self):
return '<Bayes: %s>' % [self.pools[p] for p in self.poolNames()]
def __len__(self):
return len(self.corpus)
class Tokenizer:
"""A simple regex-based whitespace tokenizer.
It expects a string and can return all tokens lower-cased
or in their existing case.
"""
WORD_RE = re.compile('\\w+', re.U)
def __init__(self, lower=False):
self.lower = lower
def tokenize(self, obj):
for match in self.WORD_RE.finditer(obj):
if self.lower:
yield match.group().lower()
else:
yield match.group()
def chi2P(chi, df):
""" return P(chisq >= chi, with df degree of freedom)
df must be even
"""
assert df & 1 == 0
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df/2):
term *= m/i
sum += term
return min(sum, 1.0)
|
apache-2.0
|
rhgong/itk-with-dom
|
Wrapping/Generators/Python/Tests/BinaryErodeImageFilter.py
|
11
|
1231
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the BinaryErodeImageFilter
#
import itk
from sys import argv
itk.auto_progress(2)
dim = 2
IType = itk.Image[itk.UC, dim]
reader = itk.ImageFileReader[IType].New( FileName=argv[1] )
kernel = itk.strel(dim, 5)
filter = itk.BinaryErodeImageFilter[IType, IType, kernel].New( reader,
ErodeValue=200,
Kernel=kernel )
writer = itk.ImageFileWriter[IType].New( filter, FileName=argv[2] )
writer.Update()
|
apache-2.0
|
nnethercote/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/mod_pywebsocket/_stream_hybi.py
|
23
|
32387
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for parsing/building frames
of the WebSocket protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
from collections import deque
import logging
import os
import struct
import time
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import InvalidUTF8Exception
from mod_pywebsocket._stream_base import StreamBase
from mod_pywebsocket._stream_base import UnsupportedFrameException
_NOOP_MASKER = util.NoopMasker()
class Frame(object):
def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
opcode=None, payload=''):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
# Helper functions made public to be used for writing unittests for WebSocket
# clients.
def create_length_header(length, mask):
"""Creates a length header.
Args:
length: Frame length. Must be less than 2^63.
mask: Mask bit. Must be boolean.
Raises:
ValueError: when bad data is given.
"""
if mask:
mask_bit = 1 << 7
else:
mask_bit = 0
if length < 0:
raise ValueError('length must be non negative integer')
elif length <= 125:
return chr(mask_bit | length)
elif length < (1 << 16):
return chr(mask_bit | 126) + struct.pack('!H', length)
elif length < (1 << 63):
return chr(mask_bit | 127) + struct.pack('!Q', length)
else:
raise ValueError('Payload is too big for one frame')
def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
"""Creates a frame header.
Raises:
Exception: when bad data is given.
"""
if opcode < 0 or 0xf < opcode:
raise ValueError('Opcode out of range')
if payload_length < 0 or (1 << 63) <= payload_length:
raise ValueError('payload_length out of range')
if (fin | rsv1 | rsv2 | rsv3) & ~1:
raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
header = ''
first_byte = ((fin << 7)
| (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
| opcode)
header += chr(first_byte)
header += create_length_header(payload_length, mask)
return header
def _build_frame(header, body, mask):
if not mask:
return header + body
masking_nonce = os.urandom(4)
masker = util.RepeatedXorMasker(masking_nonce)
return header + masking_nonce + masker.mask(body)
def _filter_and_format_frame_object(frame, mask, frame_filters):
for frame_filter in frame_filters:
frame_filter.filter(frame)
header = create_header(
frame.opcode, len(frame.payload), frame.fin,
frame.rsv1, frame.rsv2, frame.rsv3, mask)
return _build_frame(header, frame.payload, mask)
def create_binary_frame(
message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
"""Creates a simple binary frame with no extension, reserved bit."""
frame = Frame(fin=fin, opcode=opcode, payload=message)
return _filter_and_format_frame_object(frame, mask, frame_filters)
def create_text_frame(
message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
"""Creates a simple text frame with no extension, reserved bit."""
encoded_message = message.encode('utf-8')
return create_binary_frame(encoded_message, opcode, fin, mask,
frame_filters)
def parse_frame(receive_bytes, logger=None,
ws_version=common.VERSION_HYBI_LATEST,
unmask_receive=True):
"""Parses a frame. Returns a tuple containing each header field and
payload.
Args:
receive_bytes: a function that reads frame data from a stream or
something similar. The function takes length of the bytes to be
read. The function must raise ConnectionTerminatedException if
there is not enough data to be read.
logger: a logging object.
ws_version: the version of WebSocket protocol.
unmask_receive: unmask received frames. When received unmasked
frame, raises InvalidFrameException.
Raises:
ConnectionTerminatedException: when receive_bytes raises it.
InvalidFrameException: when the frame contains invalid data.
"""
if not logger:
logger = logging.getLogger()
logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
received = receive_bytes(2)
first_byte = ord(received[0])
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
second_byte = ord(received[1])
mask = (second_byte >> 7) & 1
payload_length = second_byte & 0x7f
logger.log(common.LOGLEVEL_FINE,
'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
'Mask=%s, Payload_length=%s',
fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
if (mask == 1) != unmask_receive:
raise InvalidFrameException(
'Mask bit on the received frame did\'nt match masking '
'configuration for received frames')
# The HyBi and later specs disallow putting a value in 0x0-0xFFFF
# into the 8-octet extended payload length field (or 0x0-0xFD in
# 2-octet field).
valid_length_encoding = True
length_encoding_bytes = 1
if payload_length == 127:
logger.log(common.LOGLEVEL_FINE,
'Receive 8-octet extended payload length')
extended_payload_length = receive_bytes(8)
payload_length = struct.unpack(
'!Q', extended_payload_length)[0]
if payload_length > 0x7FFFFFFFFFFFFFFF:
raise InvalidFrameException(
'Extended payload length >= 2^63')
if ws_version >= 13 and payload_length < 0x10000:
valid_length_encoding = False
length_encoding_bytes = 8
logger.log(common.LOGLEVEL_FINE,
'Decoded_payload_length=%s', payload_length)
elif payload_length == 126:
logger.log(common.LOGLEVEL_FINE,
'Receive 2-octet extended payload length')
extended_payload_length = receive_bytes(2)
payload_length = struct.unpack(
'!H', extended_payload_length)[0]
if ws_version >= 13 and payload_length < 126:
valid_length_encoding = False
length_encoding_bytes = 2
logger.log(common.LOGLEVEL_FINE,
'Decoded_payload_length=%s', payload_length)
if not valid_length_encoding:
logger.warning(
'Payload length is not encoded using the minimal number of '
'bytes (%d is encoded using %d bytes)',
payload_length,
length_encoding_bytes)
if mask == 1:
logger.log(common.LOGLEVEL_FINE, 'Receive mask')
masking_nonce = receive_bytes(4)
masker = util.RepeatedXorMasker(masking_nonce)
logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
else:
masker = _NOOP_MASKER
logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
if logger.isEnabledFor(common.LOGLEVEL_FINE):
receive_start = time.time()
raw_payload_bytes = receive_bytes(payload_length)
if logger.isEnabledFor(common.LOGLEVEL_FINE):
logger.log(
common.LOGLEVEL_FINE,
'Done receiving payload data at %s MB/s',
payload_length / (time.time() - receive_start) / 1000 / 1000)
logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
if logger.isEnabledFor(common.LOGLEVEL_FINE):
unmask_start = time.time()
unmasked_bytes = masker.mask(raw_payload_bytes)
if logger.isEnabledFor(common.LOGLEVEL_FINE):
logger.log(
common.LOGLEVEL_FINE,
'Done unmasking payload data at %s MB/s',
payload_length / (time.time() - unmask_start) / 1000 / 1000)
return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3
class FragmentedFrameBuilder(object):
"""A stateful class to send a message as fragments."""
def __init__(self, mask, frame_filters=[], encode_utf8=True):
"""Constructs an instance."""
self._mask = mask
self._frame_filters = frame_filters
# This is for skipping UTF-8 encoding when building text type frames
# from compressed data.
self._encode_utf8 = encode_utf8
self._started = False
# Hold opcode of the first frame in messages to verify types of other
# frames in the message are all the same.
self._opcode = common.OPCODE_TEXT
def build(self, payload_data, end, binary):
if binary:
frame_type = common.OPCODE_BINARY
else:
frame_type = common.OPCODE_TEXT
if self._started:
if self._opcode != frame_type:
raise ValueError('Message types are different in frames for '
'the same message')
opcode = common.OPCODE_CONTINUATION
else:
opcode = frame_type
self._opcode = frame_type
if end:
self._started = False
fin = 1
else:
self._started = True
fin = 0
if binary or not self._encode_utf8:
return create_binary_frame(
payload_data, opcode, fin, self._mask, self._frame_filters)
else:
return create_text_frame(
payload_data, opcode, fin, self._mask, self._frame_filters)
def _create_control_frame(opcode, body, mask, frame_filters):
frame = Frame(opcode=opcode, payload=body)
for frame_filter in frame_filters:
frame_filter.filter(frame)
if len(frame.payload) > 125:
raise BadOperationException(
'Payload data size of control frames must be 125 bytes or less')
header = create_header(
frame.opcode, len(frame.payload), frame.fin,
frame.rsv1, frame.rsv2, frame.rsv3, mask)
return _build_frame(header, frame.payload, mask)
def create_ping_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
def create_pong_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
def create_close_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(
common.OPCODE_CLOSE, body, mask, frame_filters)
def create_closing_handshake_body(code, reason):
body = ''
if code is not None:
if (code > common.STATUS_USER_PRIVATE_MAX or
code < common.STATUS_NORMAL_CLOSURE):
raise BadOperationException('Status code is out of range')
if (code == common.STATUS_NO_STATUS_RECEIVED or
code == common.STATUS_ABNORMAL_CLOSURE or
code == common.STATUS_TLS_HANDSHAKE):
raise BadOperationException('Status code is reserved pseudo '
'code')
encoded_reason = reason.encode('utf-8')
body = struct.pack('!H', code) + encoded_reason
return body
class StreamOptions(object):
"""Holds option values to configure Stream objects."""
def __init__(self):
"""Constructs StreamOptions."""
# Filters applied to frames.
self.outgoing_frame_filters = []
self.incoming_frame_filters = []
# Filters applied to messages. Control frames are not affected by them.
self.outgoing_message_filters = []
self.incoming_message_filters = []
self.encode_text_message_to_utf8 = True
self.mask_send = False
self.unmask_receive = True
class Stream(StreamBase):
"""A class for parsing/building frames of the WebSocket protocol
(RFC 6455).
"""
def __init__(self, request, options):
"""Constructs an instance.
Args:
request: mod_python request.
"""
StreamBase.__init__(self, request)
self._logger = util.get_class_logger(self)
self._options = options
self._request.client_terminated = False
self._request.server_terminated = False
# Holds body of received fragments.
self._received_fragments = []
# Holds the opcode of the first fragment.
self._original_opcode = None
self._writer = FragmentedFrameBuilder(
self._options.mask_send, self._options.outgoing_frame_filters,
self._options.encode_text_message_to_utf8)
self._ping_queue = deque()
def _receive_frame(self):
"""Receives a frame and return data in the frame as a tuple containing
each header field and payload separately.
Raises:
ConnectionTerminatedException: when read returns empty
string.
InvalidFrameException: when the frame contains invalid data.
"""
def _receive_bytes(length):
return self.receive_bytes(length)
return parse_frame(receive_bytes=_receive_bytes,
logger=self._logger,
ws_version=self._request.ws_version,
unmask_receive=self._options.unmask_receive)
def _receive_frame_as_frame_object(self):
opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
opcode=opcode, payload=unmasked_bytes)
def receive_filtered_frame(self):
"""Receives a frame and applies frame filters and message filters.
The frame to be received must satisfy following conditions:
- The frame is not fragmented.
- The opcode of the frame is TEXT or BINARY.
DO NOT USE this method except for testing purpose.
"""
frame = self._receive_frame_as_frame_object()
if not frame.fin:
raise InvalidFrameException(
'Segmented frames must not be received via '
'receive_filtered_frame()')
if (frame.opcode != common.OPCODE_TEXT and
frame.opcode != common.OPCODE_BINARY):
raise InvalidFrameException(
'Control frames must not be received via '
'receive_filtered_frame()')
for frame_filter in self._options.incoming_frame_filters:
frame_filter.filter(frame)
for message_filter in self._options.incoming_message_filters:
frame.payload = message_filter.filter(frame.payload)
return frame
def send_message(self, message, end=True, binary=False):
"""Send message.
Args:
message: text in unicode or binary in str to send.
binary: send message as binary frame.
Raises:
BadOperationException: when called on a server-terminated
connection or called with inconsistent message type or
binary parameter.
"""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
try:
# Set this to any positive integer to limit maximum size of data in
# payload data of each frame.
MAX_PAYLOAD_DATA_SIZE = -1
if MAX_PAYLOAD_DATA_SIZE <= 0:
self._write(self._writer.build(message, end, binary))
return
bytes_written = 0
while True:
end_for_this_frame = end
bytes_to_write = len(message) - bytes_written
if (MAX_PAYLOAD_DATA_SIZE > 0 and
bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
end_for_this_frame = False
bytes_to_write = MAX_PAYLOAD_DATA_SIZE
frame = self._writer.build(
message[bytes_written:bytes_written + bytes_to_write],
end_for_this_frame,
binary)
self._write(frame)
bytes_written += bytes_to_write
# This if must be placed here (the end of while block) so that
# at least one frame is sent.
if len(message) <= bytes_written:
break
except ValueError as e:
raise BadOperationException(e)
def _get_message_from_frame(self, frame):
"""Gets a message from frame. If the message is composed of fragmented
frames and the frame is not the last fragmented frame, this method
returns None. The whole message will be returned when the last
fragmented frame is passed to this method.
Raises:
InvalidFrameException: when the frame doesn't match defragmentation
context, or the frame contains invalid data.
"""
if frame.opcode == common.OPCODE_CONTINUATION:
if not self._received_fragments:
if frame.fin:
raise InvalidFrameException(
'Received a termination frame but fragmentation '
'not started')
else:
raise InvalidFrameException(
'Received an intermediate frame but '
'fragmentation not started')
if frame.fin:
# End of fragmentation frame
self._received_fragments.append(frame.payload)
message = ''.join(self._received_fragments)
self._received_fragments = []
return message
else:
# Intermediate frame
self._received_fragments.append(frame.payload)
return None
else:
if self._received_fragments:
if frame.fin:
raise InvalidFrameException(
'Received an unfragmented frame without '
'terminating existing fragmentation')
else:
raise InvalidFrameException(
'New fragmentation started without terminating '
'existing fragmentation')
if frame.fin:
# Unfragmented frame
self._original_opcode = frame.opcode
return frame.payload
else:
# Start of fragmentation frame
if common.is_control_opcode(frame.opcode):
raise InvalidFrameException(
'Control frames must not be fragmented')
self._original_opcode = frame.opcode
self._received_fragments.append(frame.payload)
return None
def _process_close_message(self, message):
"""Processes close message.
Args:
message: close message.
Raises:
InvalidFrameException: when the message is invalid.
"""
self._request.client_terminated = True
# Status code is optional. We can have status reason only if we
# have status code. Status reason can be empty string. So,
# allowed cases are
# - no application data: no code no reason
# - 2 octet of application data: has code but no reason
# - 3 or more octet of application data: both code and reason
if len(message) == 0:
self._logger.debug('Received close frame (empty body)')
self._request.ws_close_code = (
common.STATUS_NO_STATUS_RECEIVED)
elif len(message) == 1:
raise InvalidFrameException(
'If a close frame has status code, the length of '
'status code must be 2 octet')
elif len(message) >= 2:
self._request.ws_close_code = struct.unpack(
'!H', message[0:2])[0]
self._request.ws_close_reason = message[2:].decode(
'utf-8', 'replace')
self._logger.debug(
'Received close frame (code=%d, reason=%r)',
self._request.ws_close_code,
self._request.ws_close_reason)
# As we've received a close frame, no more data is coming over the
# socket. We can now safely close the socket without worrying about
# RST sending.
if self._request.server_terminated:
self._logger.debug(
'Received ack for server-initiated closing handshake')
return
self._logger.debug(
'Received client-initiated closing handshake')
code = common.STATUS_NORMAL_CLOSURE
reason = ''
if hasattr(self._request, '_dispatcher'):
dispatcher = self._request._dispatcher
code, reason = dispatcher.passive_closing_handshake(
self._request)
if code is None and reason is not None and len(reason) > 0:
self._logger.warning(
'Handler specified reason despite code being None')
reason = ''
if reason is None:
reason = ''
self._send_closing_handshake(code, reason)
self._logger.debug(
'Acknowledged closing handshake initiated by the peer '
'(code=%r, reason=%r)', code, reason)
def _process_ping_message(self, message):
"""Processes ping message.
Args:
message: ping message.
"""
try:
handler = self._request.on_ping_handler
if handler:
handler(self._request, message)
return
except AttributeError as e:
pass
self._send_pong(message)
def _process_pong_message(self, message):
"""Processes pong message.
Args:
message: pong message.
"""
# TODO(tyoshino): Add ping timeout handling.
inflight_pings = deque()
while True:
try:
expected_body = self._ping_queue.popleft()
if expected_body == message:
# inflight_pings contains pings ignored by the
# other peer. Just forget them.
self._logger.debug(
'Ping %r is acked (%d pings were ignored)',
expected_body, len(inflight_pings))
break
else:
inflight_pings.append(expected_body)
except IndexError as e:
# The received pong was unsolicited pong. Keep the
# ping queue as is.
self._ping_queue = inflight_pings
self._logger.debug('Received a unsolicited pong')
break
try:
handler = self._request.on_pong_handler
if handler:
handler(self._request, message)
except AttributeError as e:
pass
def receive_message(self):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Returns:
payload data of the frame
- as unicode instance if received text frame
- as str instance if received binary frame
or None iff received closing handshake.
Raises:
BadOperationException: when called on a client-terminated
connection.
ConnectionTerminatedException: when read returns empty
string.
InvalidFrameException: when the frame contains invalid
data.
UnsupportedFrameException: when the received frame has
flags, opcode we cannot handle. You can ignore this
exception and continue receiving the next frame.
"""
if self._request.client_terminated:
raise BadOperationException(
'Requested receive_message after receiving a closing '
'handshake')
while True:
# mp_conn.read will block if no bytes are available.
# Timeout is controlled by TimeOut directive of Apache.
frame = self._receive_frame_as_frame_object()
# Check the constraint on the payload size for control frames
# before extension processes the frame.
# See also http://tools.ietf.org/html/rfc6455#section-5.5
if (common.is_control_opcode(frame.opcode) and
len(frame.payload) > 125):
raise InvalidFrameException(
'Payload data size of control frames must be 125 bytes or '
'less')
for frame_filter in self._options.incoming_frame_filters:
frame_filter.filter(frame)
if frame.rsv1 or frame.rsv2 or frame.rsv3:
raise UnsupportedFrameException(
'Unsupported flag is set (rsv = %d%d%d)' %
(frame.rsv1, frame.rsv2, frame.rsv3))
message = self._get_message_from_frame(frame)
if message is None:
continue
for message_filter in self._options.incoming_message_filters:
message = message_filter.filter(message)
if self._original_opcode == common.OPCODE_TEXT:
# The WebSocket protocol section 4.4 specifies that invalid
# characters must be replaced with U+fffd REPLACEMENT
# CHARACTER.
try:
return message.decode('utf-8')
except UnicodeDecodeError as e:
raise InvalidUTF8Exception(e)
elif self._original_opcode == common.OPCODE_BINARY:
return message
elif self._original_opcode == common.OPCODE_CLOSE:
self._process_close_message(message)
return None
elif self._original_opcode == common.OPCODE_PING:
self._process_ping_message(message)
elif self._original_opcode == common.OPCODE_PONG:
self._process_pong_message(message)
else:
raise UnsupportedFrameException(
'Opcode %d is not supported' % self._original_opcode)
def _send_closing_handshake(self, code, reason):
body = create_closing_handshake_body(code, reason)
frame = create_close_frame(
body, mask=self._options.mask_send,
frame_filters=self._options.outgoing_frame_filters)
self._request.server_terminated = True
self._write(frame)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='',
wait_response=True):
"""Closes a WebSocket connection. Note that this method blocks until
it receives acknowledgement to the closing handshake.
Args:
code: Status code for close frame. If code is None, a close
frame with empty body will be sent.
reason: string representing close reason.
wait_response: True when caller want to wait the response.
Raises:
BadOperationException: when reason is specified with code None
or reason is not an instance of both str and unicode.
"""
if self._request.server_terminated:
self._logger.debug(
'Requested close_connection but server is already terminated')
return
# When we receive a close frame, we call _process_close_message().
# _process_close_message() immediately acknowledges to the
# server-initiated closing handshake and sets server_terminated to
# True. So, here we can assume that we haven't received any close
# frame. We're initiating a closing handshake.
if code is None:
if reason is not None and len(reason) > 0:
raise BadOperationException(
'close reason must not be specified if code is None')
reason = ''
else:
if not isinstance(reason, str) and not isinstance(reason, unicode):
raise BadOperationException(
'close reason must be an instance of str or unicode')
self._send_closing_handshake(code, reason)
self._logger.debug(
'Initiated closing handshake (code=%r, reason=%r)',
code, reason)
if (code == common.STATUS_GOING_AWAY or
code == common.STATUS_PROTOCOL_ERROR) or not wait_response:
# It doesn't make sense to wait for a close frame if the reason is
# protocol error or that the server is going away. For some of
# other reasons, it might not make sense to wait for a close frame,
# but it's not clear, yet.
return
# TODO(ukai): 2. wait until the /client terminated/ flag has been set,
# or until a server-defined timeout expires.
#
# For now, we expect receiving closing handshake right after sending
# out closing handshake.
message = self.receive_message()
if message is not None:
raise ConnectionTerminatedException(
'Didn\'t receive valid ack for closing handshake')
# TODO: 3. close the WebSocket connection.
# note: mod_python Connection (mp_conn) doesn't have close method.
def send_ping(self, body=''):
frame = create_ping_frame(
body,
self._options.mask_send,
self._options.outgoing_frame_filters)
self._write(frame)
self._ping_queue.append(body)
def _send_pong(self, body):
frame = create_pong_frame(
body,
self._options.mask_send,
self._options.outgoing_frame_filters)
self._write(frame)
def get_last_received_opcode(self):
"""Returns the opcode of the WebSocket message which the last received
frame belongs to. The return value is valid iff immediately after
receive_message call.
"""
return self._original_opcode
# vi:sts=4 sw=4 et
|
mpl-2.0
|
scafield/Wayfinder
|
plugins/ti.alloy/plugin.py
|
1729
|
5251
|
import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
|
apache-2.0
|
CyanogenMod/android_external_chromium_org
|
third_party/protobuf/python/google/protobuf/internal/service_reflection_test.py
|
560
|
5127
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.internal.service_reflection."""
__author__ = '[email protected] (Petar Petrov)'
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import service_reflection
from google.protobuf import service
class FooUnitTest(unittest.TestCase):
def testService(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request, response, callback):
self.method = method
self.controller = controller
self.request = request
callback(response)
class MockRpcController(service.RpcController):
def SetFailed(self, msg):
self.failure_message = msg
self.callback_response = None
class MyService(unittest_pb2.TestService):
pass
self.callback_response = None
def MyCallback(response):
self.callback_response = response
rpc_controller = MockRpcController()
channel = MockRpcChannel()
srvc = MyService()
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual('Method Foo not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
rpc_controller.failure_message = None
service_descriptor = unittest_pb2.TestService.GetDescriptor()
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual('Method Bar not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
class MyServiceImpl(unittest_pb2.TestService):
def Foo(self, rpc_controller, request, done):
self.foo_called = True
def Bar(self, rpc_controller, request, done):
self.bar_called = True
srvc = MyServiceImpl()
rpc_controller.failure_message = None
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.foo_called)
rpc_controller.failure_message = None
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.bar_called)
def testServiceStub(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request,
response_class, callback):
self.method = method
self.controller = controller
self.request = request
callback(response_class())
self.callback_response = None
def MyCallback(response):
self.callback_response = response
channel = MockRpcChannel()
stub = unittest_pb2.TestService_Stub(channel)
rpc_controller = 'controller'
request = 'request'
# GetDescriptor now static, still works as instance method for compatability
self.assertEqual(unittest_pb2.TestService_Stub.GetDescriptor(),
stub.GetDescriptor())
# Invoke method.
stub.Foo(rpc_controller, request, MyCallback)
self.assertTrue(isinstance(self.callback_response,
unittest_pb2.FooResponse))
self.assertEqual(request, channel.request)
self.assertEqual(rpc_controller, channel.controller)
self.assertEqual(stub.GetDescriptor().methods[0], channel.method)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
rayners/offlineimap
|
offlineimap/CustomConfig.py
|
2
|
5038
|
# Copyright (C) 2003 John Goerzen
# <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from ConfigParser import SafeConfigParser
from offlineimap.localeval import LocalEval
import os
class CustomConfigParser(SafeConfigParser):
def getdefault(self, section, option, default, *args, **kwargs):
"""Same as config.get, but returns the "default" option if there
is no such option specified."""
if self.has_option(section, option):
return apply(self.get, [section, option] + list(args), kwargs)
else:
return default
def getdefaultint(self, section, option, default, *args, **kwargs):
if self.has_option(section, option):
return apply(self.getint, [section, option] + list(args), kwargs)
else:
return default
def getdefaultfloat(self, section, option, default, *args, **kwargs):
if self.has_option(section, option):
return apply(self.getfloat, [section, option] + list(args), kwargs)
else:
return default
def getdefaultboolean(self, section, option, default, *args, **kwargs):
if self.has_option(section, option):
return apply(self.getboolean, [section, option] + list(args),
kwargs)
else:
return default
def getmetadatadir(self):
metadatadir = os.path.expanduser(self.getdefault("general", "metadata", "~/.offlineimap"))
if not os.path.exists(metadatadir):
os.mkdir(metadatadir, 0700)
return metadatadir
def getlocaleval(self):
if self.has_option("general", "pythonfile"):
path = os.path.expanduser(self.get("general", "pythonfile"))
else:
path = None
return LocalEval(path)
def getsectionlist(self, key):
"""Returns a list of sections that start with key + " ". That is,
if key is "Account", returns all section names that start with
"Account ", but strips off the "Account ". For instance, for
"Account Test", returns "Test"."""
key = key + ' '
return [x[len(key):] for x in self.sections() \
if x.startswith(key)]
def CustomConfigDefault():
"""Just a constant that won't occur anywhere else.
This allows us to differentiate if the user has passed in any
default value to the getconf* functions in ConfigHelperMixin
derived classes."""
pass
class ConfigHelperMixin:
"""Allow comfortable retrieving of config values pertaining to a section.
If a class inherits from this cls:`ConfigHelperMixin`, it needs
to provide 2 functions: meth:`getconfig` (returning a
ConfigParser object) and meth:`getsection` (returning a string
which represents the section to look up). All calls to getconf*
will then return the configuration values for the ConfigParser
object in the specific section."""
def _confighelper_runner(self, option, default, defaultfunc, mainfunc):
"""Return config value for getsection()"""
if default == CustomConfigDefault:
return apply(mainfunc, [self.getsection(), option])
else:
return apply(defaultfunc, [self.getsection(), option, default])
def getconf(self, option,
default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefault,
self.getconfig().get)
def getconfboolean(self, option, default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefaultboolean,
self.getconfig().getboolean)
def getconfint(self, option, default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefaultint,
self.getconfig().getint)
def getconffloat(self, option, default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefaultfloat,
self.getconfig().getfloat)
|
gpl-2.0
|
WorldBank-Transport/DRIVER
|
app/driver/settings.py
|
2
|
15418
|
"""
Django settings for driver project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEVELOP = True if os.environ.get('DJANGO_ENV', 'development') == 'development' else False
STAGING = True if os.environ.get('DJANGO_ENV', 'staging') == 'staging' else False
PRODUCTION = not DEVELOP and not STAGING
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = DEVELOP
ALLOWED_HOST = os.environ.get('ALLOWED_HOST', None)
if ALLOWED_HOST:
ALLOWED_HOSTS = [ALLOWED_HOST]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.gis',
'django.contrib.postgres',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'storages',
'django_extensions',
'djangooidc',
'django_filters',
'rest_framework_gis',
'grout',
'driver',
'driver_auth',
'data',
'user_filters',
'black_spots',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
)
if DEBUG:
# Perform set up for Django Debug Toolbar
INSTALLED_APPS += (
'debug_toolbar',
)
# Prepend the Debug Toolbar middleware class to the begining of the list
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
) + MIDDLEWARE_CLASSES
# Show toolbar in local dev
DEBUG_TOOLBAR_CONFIG = {
# Since REMOTE_HOST gets overloaded by routing through Docker and Nginx, we can't rely on
# it like DDT normally does internally.
# Until an alternative is available, we have to trust DEBUG=True is safety enough
'SHOW_TOOLBAR_CALLBACK': lambda(request): True
}
ROOT_URLCONF = 'driver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'driver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.environ.get('DRIVER_DB_NAME', 'postgres'),
'HOST': os.environ.get('DRIVER_DB_HOST', 'localhost'),
'PORT': os.environ.get('DRIVER_DB_PORT', 5432),
'USER': os.environ.get('DRIVER_DB_USER', 'postgres'),
'PASSWORD': os.environ.get('DRIVER_DB_PASSWORD', 'postgres'),
'CONN_MAX_AGE': 3600, # in seconds
'OPTIONS': {
'sslmode': 'require'
}
}
}
POSTGIS_VERSION = tuple(
map(int, os.environ.get('DJANGO_POSTGIS_VERSION', '2.1.3').split("."))
)
# File storage
DEFAULT_FILE_STORAGE = 'storages.backends.overwrite.OverwriteStorage'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.environ.get("DRIVER_LOCAL_TIME_ZONE", 'Asia/Manila')
USE_I18N = True
USE_L10N = True
USE_TZ = True
OSM_EXTRACT_URL = os.environ.get('DRIVER_OSM_EXTRACT_URL',
'https://download.geofabrik.de/asia/philippines-latest.osm.pbf')
BLACKSPOT_RECORD_TYPE_LABEL = os.environ.get('BLACKSPOT_RECORD_TYPE_LABEL', 'Incident')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.environ['DJANGO_STATIC_ROOT']
# Media files (uploaded via API)
# https://docs.djangoproject.com/en/1.8/topics/files/
MEDIA_ROOT = os.environ['DJANGO_MEDIA_ROOT']
MEDIA_URL = '/media/'
# use cookie-based sessions
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
'grout': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'driver_auth': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'data': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'user_filters': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'black_spots': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'oic': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'djangooidc': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# user and group settings
DEFAULT_ADMIN_EMAIL = os.environ.get("DRIVER_ADMIN_EMAIL", '[email protected]')
DEFAULT_ADMIN_USERNAME = os.environ.get("DRIVER_ADMIN_USERNAME", 'admin')
DEFAULT_ADMIN_PASSWORD = os.environ.get("DRIVER_ADMIN_PASSWORD", 'admin')
# the client keeps these group names in the editor's config.js
DRIVER_GROUPS = {
'READ_ONLY': os.environ.get('DRIVER_READ_ONLY_GROUP', 'public'),
'READ_WRITE': os.environ.get('DRIVER_READ_WRITE_GROUP', 'analyst'),
'ADMIN': os.environ.get('DRIVER_ADMIN_GROUP', 'admin')
}
# Django Rest Framework
# http://www.django-rest-framework.org/
# TODO: Switch to CORS_ORIGIN_REGEX_WHITELIST when we have a domain in place
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# NB: session auth must appear before token auth for both to work.
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
}
# django-redis cache configuration
# https://niwinz.github.io/django-redis/latest/
# https://docs.djangoproject.com/en/1.8/topics/cache/#cache-arguments
REDIS_HOST = os.environ.get('DRIVER_REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('DRIVER_REDIS_PORT', '6379')
# JAR file cache TLL (keep in redis for this many seconds since creation or last retrieval)
JARFILE_REDIS_TTL_SECONDS = os.environ.get('DRIVER_JAR_TTL_SECONDS', 60 * 60 * 24 * 30) # 30 days
CACHES = {
"default": {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://' + REDIS_HOST + ':' + REDIS_PORT + '/2',
'TIMEOUT': None, # never expire
'KEY_PREFIX': 'DJANGO',
'VERSION': 1,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'SOCKET_CONNECT_TIMEOUT': 5, # seconds
'SOCKET_TIMEOUT': 5, # seconds
'MAX_ENTRIES': 900, # defaults to 300
'CULL_FREQUENCY': 4, # fraction culled when max reached (1 / CULL_FREQ); default: 3
# 'COMPRESS_MIN_LEN': 0, # set to value > 0 to enable compression
}
},
"jars": {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://' + REDIS_HOST + ':' + REDIS_PORT + '/3',
'TIMEOUT': JARFILE_REDIS_TTL_SECONDS,
'KEY_PREFIX': None,
'VERSION': 1,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'SOCKET_CONNECT_TIMEOUT': 5, # seconds
'SOCKET_TIMEOUT': 5, # seconds
'MAX_ENTRIES': 300, # defaults to 300
'CULL_FREQUENCY': 4, # fraction culled when max reached (1 / CULL_FREQ); default: 3
# 'COMPRESS_MIN_LEN': 0, # set to value > 0 to enable compression
}
},
"boundaries": {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{host}:{port}/4'.format(host=REDIS_HOST, port=REDIS_PORT),
# Timeout is set and renewed at the individual key level in data/filters.py
'TIMEOUT': None,
'KEY_PREFIX': 'boundary',
'VERSION': 1,
}
}
# Celery
BROKER_URL = 'redis://{}:{}/0'.format(REDIS_HOST, REDIS_PORT)
CELERY_RESULT_BACKEND = 'redis://{}:{}/1'.format(REDIS_HOST, REDIS_PORT)
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ROUTES = {
'black_spots.tasks.calculate_black_spots.calculate_black_spots': {'queue': 'taskworker'},
'black_spots.tasks.get_segments.cleanup': {'queue': 'taskworker'},
'black_spots.tasks.get_segments.create_segments_tar': {'queue': 'taskworker'},
'black_spots.tasks.get_segments.get_segments_shp': {'queue': 'taskworker'},
'black_spots.tasks.load_road_network.load_road_network': {'queue': 'taskworker'},
'black_spots.tasks.load_blackspot_geoms.load_blackspot_geoms': {'queue': 'taskworker'},
'black_spots.tasks.generate_training_input.get_training_noprecip': {'queue': 'taskworker'},
'black_spots.tasks.generate_training_input.get_training_precip': {'queue': 'taskworker'},
'data.tasks.remove_duplicates.remove_duplicates': {'queue': 'taskworker'},
'data.tasks.export_csv.export_csv': {'queue': 'taskworker'},
'data.tasks.fetch_record_csv.export_records': {'queue': 'taskworker'}
}
# This needs to match the proxy configuration in nginx so that requests for files generated
# by celery jobs go to the right place.
CELERY_DOWNLOAD_PREFIX = '/download/'
CELERY_EXPORTS_FILE_PATH = '/var/www/media'
# Deduplication settings
DEDUPE_TIME_RANGE_HOURS = float(os.environ.get('DRIVER_DEDUPE_TIME_RANGE_HOURS', '12'))
# .001 ~= 110m
DEDUPE_DISTANCE_DEGREES = float(os.environ.get('DRIVER_DEDUPE_DISTANCE_DEGREES', '0.0008'))
GROUT = {
# It is suggested to change this if you know that your data will be limited to
# a certain part of the world, for example to a UTM Grid projection or a state
# plane.
'SRID': 4326,
}
## django-oidc settings
HOST_URL = os.environ.get('DRIVER_APP_HOST', os.environ.get('HOSTNAME'))
# TODO: conditionally set for GLUU in production
GOOGLE_OAUTH_CLIENT_ID = os.environ.get('OAUTH_CLIENT_ID', '')
GOOGLE_OAUTH_CLIENT_SECRET = os.environ.get('OAUTH_CLIENT_SECRET', '')
# Forecast.io settings
FORECAST_IO_API_KEY = os.environ.get('FORECAST_IO_API_KEY', '')
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
if GOOGLE_OAUTH_CLIENT_ID:
AUTHENTICATION_BACKENDS += ('djangooidc.backends.OpenIdConnectBackend',)
LOGIN_URL = 'openid'
OIDC_ALLOW_DYNAMIC_OP = False
OIDC_CREATE_UNKNOWN_USER = True
OIDC_VERIFY_SSL = True
# Information used when registering the client, this may be the same for all OPs
# Ignored if auto registration is not used.
OIDC_DYNAMIC_CLIENT_REGISTRATION_DATA = {
"application_type": "web",
"contacts": ["[email protected]", "[email protected]"],
"redirect_uris": [HOST_URL + "/openid/callback/login/", ],
"post_logout_redirect_uris": [HOST_URL + "/openid/callback/logout/", ]
}
# Default is using the 'code' workflow, which requires direct connectivity from your website to the OP.
OIDC_DEFAULT_BEHAVIOUR = {
"response_type": "code",
"scope": ["openid", "email"],
}
OIDC_PROVIDERS = { }
if len(GOOGLE_OAUTH_CLIENT_ID) > 0:
# see: https://developers.google.com/identity/protocols/OpenIDConnect?hl=en
# example config towards bottom of page
OIDC_PROVIDERS['google.com'] = {
"provider_info": {
"issuer": "https://accounts.google.com",
"authorization_endpoint": "https://accounts.google.com/o/oauth2/v2/auth",
"token_endpoint": "https://www.googleapis.com/oauth2/v4/token",
"userinfo_endpoint": "https://www.googleapis.com/oauth2/v3/userinfo",
"revocation_endpoint": "https://accounts.google.com/o/oauth2/revoke",
"jwks_uri": "https://www.googleapis.com/oauth2/v3/certs",
"response_types_supported": [
"code",
"token",
"id_token",
"code token",
"code id_token",
"token id_token",
"code token id_token",
"none"
], "subject_types_supported": [
"public"
], "id_token_signing_alg_values_supported": [
"RS256"
], "scopes_supported": [
"openid",
"email",
"profile"
], "token_endpoint_auth_methods_supported": [
"client_secret_post",
"client_secret_basic"
], "claims_supported": [
"aud",
"email",
"email_verified",
"exp",
"family_name",
"given_name",
"iat",
"iss",
"locale",
"name",
"picture",
"sub"
]
},
"behaviour": OIDC_DEFAULT_BEHAVIOUR,
"client_registration": {
"client_id": GOOGLE_OAUTH_CLIENT_ID,
"client_secret": GOOGLE_OAUTH_CLIENT_SECRET,
"redirect_uris": [HOST_URL + "/openid/callback/login/"],
"post_logout_redirect_uris": [HOST_URL + "/openid/callback/logout/"],
}
}
# These fields will be visible to read-only users
READ_ONLY_FIELDS_REGEX = r'Details$'
|
gpl-3.0
|
RaviTezu/yowsup
|
yowsup/layers/interface/interface.py
|
22
|
2607
|
from yowsup.layers import YowLayer, YowLayerEvent
from yowsup.layers.protocol_iq.protocolentities import IqProtocolEntity
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.auth import YowAuthenticationProtocolLayer
import inspect
class ProtocolEntityCallback(object):
def __init__(self, entityType):
self.entityType = entityType
def __call__(self, fn):
fn.callback = self.entityType
return fn
class YowInterfaceLayer(YowLayer):
def __init__(self):
self.callbacks = {}
self.iqRegistry = {}
members = inspect.getmembers(self, predicate=inspect.ismethod)
for m in members:
if hasattr(m[1], "callback"):
fname = m[0]
fn = m[1]
self.callbacks[fn.callback] = getattr(self, fname)
def _sendIq(self, iqEntity, onSuccess = None, onError = None):
assert iqEntity.getTag() == "iq", "Expected *IqProtocolEntity in _sendIq, got %s" % iqEntity.getTag()
self.iqRegistry[iqEntity.getId()] = (iqEntity, onSuccess, onError)
self.toLower(iqEntity)
def processIqRegistry(self, entity):
"""
:type entity: IqProtocolEntity
"""
if entity.getTag() == "iq":
iq_id = entity.getId()
if iq_id in self.iqRegistry:
originalIq, successClbk, errorClbk = self.iqRegistry[iq_id]
del self.iqRegistry[iq_id]
if entity.getType() == IqProtocolEntity.TYPE_RESULT and successClbk:
successClbk(entity, originalIq)
elif entity.getType() == IqProtocolEntity.TYPE_ERROR and errorClbk:
errorClbk(entity, originalIq)
return True
return False
def getOwnJid(self, full = True):
jid = self.getProp(YowAuthenticationProtocolLayer.PROP_CREDENTIALS)[0]
if jid:
return jid + "@s.whatsapp.net" if full else jid
return None
def connect(self):
loginEvent = YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT)
self.broadcastEvent(loginEvent)
def disconnect(self):
disconnectEvent = YowLayerEvent(YowNetworkLayer.EVENT_STATE_DISCONNECT)
self.broadcastEvent(disconnectEvent)
def send(self, data):
self.toLower(data)
def receive(self, entity):
if not self.processIqRegistry(entity):
entityType = entity.getTag()
if entityType in self.callbacks:
self.callbacks[entityType](entity)
def __str__(self):
return "Interface Layer"
|
gpl-3.0
|
dsiddharth/access-keys
|
keystone/tests/fixtures/appserver.py
|
2
|
2693
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import fixtures
from paste import deploy
from keystone.common import environment
from keystone import config
CONF = config.CONF
MAIN = 'main'
ADMIN = 'admin'
class AppServer(fixtures.Fixture):
"""A fixture for managing an application server instance.
"""
def __init__(self, config, name, cert=None, key=None, ca=None,
cert_required=False, host='127.0.0.1', port=0):
super(AppServer, self).__init__()
self.config = config
self.name = name
self.cert = cert
self.key = key
self.ca = ca
self.cert_required = cert_required
self.host = host
self.port = port
def setUp(self):
super(AppServer, self).setUp()
app = deploy.loadapp(self.config, name=self.name)
self.server = environment.Server(app, self.host, self.port)
self._setup_SSL_if_requested()
self.server.start(key='socket')
# some tests need to know the port we ran on.
self.port = self.server.socket_info['socket'][1]
self._update_config_opt()
self.addCleanup(self.server.kill)
def _setup_SSL_if_requested(self):
# TODO(dstanek): fix environment.Server to take a SSLOpts instance
# so that the params are either always set or not
if (self.cert is not None and
self.ca is not None and
self.key is not None):
self.server.set_ssl(certfile=self.cert,
keyfile=self.key,
ca_certs=self.ca,
cert_required=self.cert_required)
def _update_config_opt(self):
"""Updates the config with the actual port used."""
opt_name = self._get_config_option_for_section_name()
CONF.set_override(opt_name, self.port)
def _get_config_option_for_section_name(self):
"""Maps Paster config section names to port option names."""
return {'admin': 'admin_port', 'main': 'public_port'}[self.name]
|
apache-2.0
|
happyboy310/keras
|
tests/auto/test_tasks.py
|
41
|
6067
|
from __future__ import print_function
import numpy as np
np.random.seed(1337)
from keras.utils.test_utils import get_test_data
from keras.models import Sequential
from keras.layers.core import Dense, Activation, TimeDistributedDense, Flatten
from keras.layers.recurrent import GRU
from keras.layers.convolutional import Convolution2D
from keras.utils.np_utils import to_categorical
import unittest
class TestRegularizers(unittest.TestCase):
def test_vector_clf(self):
nb_hidden = 10
print('vector classification data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,),
classification=True, nb_class=2)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Dense(X_train.shape[-1], nb_hidden))
model.add(Activation('relu'))
model.add(Dense(nb_hidden, y_train.shape[-1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
print(history.history)
self.assertTrue(history.history['val_acc'][-1] > 0.9)
def test_vector_reg(self):
nb_hidden = 10
print('vector regression data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,), output_shape=(2,),
classification=False)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
model = Sequential()
model.add(Dense(X_train.shape[-1], nb_hidden))
model.add(Activation('tanh'))
model.add(Dense(nb_hidden, y_train.shape[-1]))
model.compile(loss='hinge', optimizer='adagrad')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2)
self.assertTrue(history.history['val_loss'][-1] < 0.9)
def test_temporal_clf(self):
print('temporal classification data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5,10),
classification=True, nb_class=2)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(GRU(X_train.shape[-1], y_train.shape[-1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
self.assertTrue(history.history['val_acc'][-1] > 0.9)
def test_temporal_reg(self):
print('temporal regression data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(2,),
classification=False)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
model = Sequential()
model.add(GRU(X_train.shape[-1], y_train.shape[-1]))
model.compile(loss='hinge', optimizer='adam')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2)
self.assertTrue(history.history['val_loss'][-1] < 0.8)
def test_seq_to_seq(self):
print('sequence to sequence data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(5, 10),
classification=False)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
model = Sequential()
model.add(TimeDistributedDense(X_train.shape[-1], y_train.shape[-1]))
model.compile(loss='hinge', optimizer='rmsprop')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2)
self.assertTrue(history.history['val_loss'][-1] < 0.75)
def test_img_clf(self):
print('image classification data:')
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32),
classification=True, nb_class=2)
print('X_train:', X_train.shape)
print('X_test:', X_test.shape)
print('y_train:', y_train.shape)
print('y_test:', y_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Convolution2D(32, 3, 32, 32))
model.add(Activation('sigmoid'))
model.add(Flatten())
model.add(Dense(32, y_test.shape[-1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd')
history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2)
self.assertTrue(history.history['val_acc'][-1] > 0.9)
if __name__ == '__main__':
print('Test different types of classification and regression tasks')
unittest.main()
|
mit
|
oinopion/django
|
tests/template_tests/filter_tests/test_length_is.py
|
360
|
3204
|
from django.template.defaultfilters import length_is
from django.test import SimpleTestCase
from ..utils import setup
class LengthIsTests(SimpleTestCase):
@setup({'length_is01': '{% if some_list|length_is:"4" %}Four{% endif %}'})
def test_length_is01(self):
output = self.engine.render_to_string('length_is01', {'some_list': ['4', None, True, {}]})
self.assertEqual(output, 'Four')
@setup({'length_is02': '{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is02(self):
output = self.engine.render_to_string('length_is02', {'some_list': ['4', None, True, {}, 17]})
self.assertEqual(output, 'Not Four')
@setup({'length_is03': '{% if mystring|length_is:"4" %}Four{% endif %}'})
def test_length_is03(self):
output = self.engine.render_to_string('length_is03', {'mystring': 'word'})
self.assertEqual(output, 'Four')
@setup({'length_is04': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is04(self):
output = self.engine.render_to_string('length_is04', {'mystring': 'Python'})
self.assertEqual(output, 'Not Four')
@setup({'length_is05': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is05(self):
output = self.engine.render_to_string('length_is05', {'mystring': ''})
self.assertEqual(output, 'Not Four')
@setup({'length_is06': '{% with var|length as my_length %}{{ my_length }}{% endwith %}'})
def test_length_is06(self):
output = self.engine.render_to_string('length_is06', {'var': 'django'})
self.assertEqual(output, '6')
# Boolean return value from length_is should not be coerced to a string
@setup({'length_is07': '{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}'})
def test_length_is07(self):
output = self.engine.render_to_string('length_is07', {})
self.assertEqual(output, 'Length not 0')
@setup({'length_is08': '{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}'})
def test_length_is08(self):
output = self.engine.render_to_string('length_is08', {})
self.assertEqual(output, 'Length is 1')
# Invalid uses that should fail silently.
@setup({'length_is09': '{{ var|length_is:"fish" }}'})
def test_length_is09(self):
output = self.engine.render_to_string('length_is09', {'var': 'django'})
self.assertEqual(output, '')
@setup({'length_is10': '{{ int|length_is:"1" }}'})
def test_length_is10(self):
output = self.engine.render_to_string('length_is10', {'int': 7})
self.assertEqual(output, '')
@setup({'length_is11': '{{ none|length_is:"1" }}'})
def test_length_is11(self):
output = self.engine.render_to_string('length_is11', {'none': None})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_empty_list(self):
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
def test_string(self):
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
|
bsd-3-clause
|
xq262144/hue
|
desktop/core/ext-py/Django-1.6.10/tests/raw_query/models.py
|
55
|
1030
|
from django.db import models
class Author(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
dob = models.DateField()
def __init__(self, *args, **kwargs):
super(Author, self).__init__(*args, **kwargs)
# Protect against annotations being passed to __init__ --
# this'll make the test suite get angry if annotations aren't
# treated differently than fields.
for k in kwargs:
assert k in [f.attname for f in self._meta.fields], \
"Author.__init__ got an unexpected parameter: %s" % k
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author)
paperback = models.BooleanField(default=False)
opening_line = models.TextField()
class Coffee(models.Model):
brand = models.CharField(max_length=255, db_column="name")
class Reviewer(models.Model):
reviewed = models.ManyToManyField(Book)
class FriendlyAuthor(Author):
pass
|
apache-2.0
|
nicolas998/Op_Interpolated
|
06_Codigos/Genera_kml.py
|
2
|
10704
|
#!/usr/bin/env python
import argparse
import textwrap
import os
from wmf import wmf
from osgeo import ogr
import pickle
import numpy as np
#Parametros de entrada del trazador
parser=argparse.ArgumentParser(
prog='Consulta_Caudal',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Actualiza el kml obtenido por el modelo para que sea legible en la pagina del SIATA.
'''))
#Parametros obligatorios
parser.add_argument("cuencaNC",help="Binario con la cuenca que se le va a obtener el kml")
parser.add_argument("umbral",help="Umbral para la generacion de red hidrica en la cuenca", type = float)
parser.add_argument("dx",help="Delta de x plano para la estimacion de longitud de cauces", type = float)
parser.add_argument("ruta",help="ruta donde se guarda el kml")
parser.add_argument("precip",help="ruta a mapa de precipitacion o contante de precipitacion")
parser.add_argument("-g","--geomorfo",
help = "ruta donde se encuentra el diccionario con las propiedades geomorfologicas para cada elemento",
default = None)
parser.add_argument("-t","--tiempoC",
help = "ruta donde se encuentran las figuras de tiempo de concentracion para desplegar en la pagina (esta la dan los de sistemas)",
default = None)
parser.add_argument('-1','--coefMax', nargs='+',
help='Coeficientes de regionalizacion maxima defecto:[6.71, 3.29]', default = [6.71, 3.29], type = float)
parser.add_argument('-2','--expoMax', nargs='+',
help='Exponentes de regionalizacion maxima defecto:[0.82, 0.64]', default = [0.82, 0.64], type = float)
parser.add_argument('-3','--coefMin', nargs='+',
help='Exponentes de regionalizacion minima defecto:[0.4168, 0.2]', default = [0.4168, 0.2], type = float)
parser.add_argument('-4','--expoMin', nargs='+',
help='Exponentes de regionalizacion minima defecto:[1.058, 0.98]', default = [1.058, 0.98], type = float)
parser.add_argument('-s','--shpinfo', help ="Shp con informacion de nombres de cauces, veredas, mpios y demas")
parser.add_argument('-p','--posiciones',
help="Posiciones en las que se encuentran dentro del shp: 'Municipio','Tipo_Canal', 'N_Cauce', 'Barrio', 'Vereda'",
nargs='+', default = [10,11,12,13,14], type = int)
#lee todos los argumentos
args=parser.parse_args()
print args.expoMax
#-----------------------------------------------------------------------------------------------------
#Carga la cuenca y escribe un kml
#-----------------------------------------------------------------------------------------------------
#Carga la cuenca
cu = wmf.SimuBasin(0,0,0,0, rute=args.cuencaNC)
#Mira si la precipitacion es una ruta o un numero
try:
Precip = float(args.precip)
except:
Precip, p = wmf.read_map_raster(args.precip)
Precip = cu.Transform_Map2Basin(Precip, p)
#Calcula caudales medios y regionalizacion de maximos
cu.GetQ_Balance(Precip)
Qmax = cu.GetQ_Max(cu.CellQmed, args.coefMax, args.expoMax)
Qmin = cu.GetQ_Min(cu.CellQmed, args.coefMin, args.expoMin)
Qmin[Qmin<0] = 0
DictQ = {'Qmed': cu.CellQmed}
for c,k in enumerate(['2.33', '5', '10', '25', '50', '100']):
DictQ.update({'Qmin'+k:Qmin[c]})
DictQ.update({'Qmax'+k:Qmax[c]})
#Guarda el kml base
cu.Save_Net2Map(args.ruta, umbral=args.umbral, DriverFormat = 'kml',
NumTramo=True,
dx = args.dx,
Dict = DictQ,
formato = '%.3f')
#-----------------------------------------------------------------------------------------------------
#Corrige el encabezado y cambia colores
#-----------------------------------------------------------------------------------------------------
#Leer kml
f = open(args.ruta,'r')
LinKml = f.readlines()
f.close()
#Diccionario con colores para horton
DictColorHort = {'viejo':{'1': 'ffF1AE53', '2': 'ffC77E26', '3': 'ff9E4C22', '4': 'ff712010', '5': '03234D'},
'nuevo':{'1': '#53AEF1', '2': '#267EC7', '3': '#224C9E', '4': '#102071', '5': '#03234D'}}
flag = True
while flag:
try:
#Coloca el estuilo de la corriente
pos = LinKml.index('\t<Style><LineStyle><color>ff0000ff</color></LineStyle><PolyStyle><fill>0</fill></PolyStyle></Style>\n')
horton = LinKml[pos+3].split('>')[1].split('<')[0]
LinKml[pos] ='\t<Style><LineStyle><color>'+DictColorHort['viejo'][horton]+'</color><width>'+horton+'</width></LineStyle><PolyStyle><fill>0</fill></PolyStyle></Style>\n'
#Coloca el description
codigo = str(int(float(LinKml[pos+4].split('>')[1].split('<')[0])))
Longitud = '.4f',float(LinKml[pos+2].split('>')[1].split('<')[0])
Longitud = str(Longitud[1])
LinKml[pos+2] = '\t\t<SimpleData name="Long[km]">'+Longitud+'</SimpleData>\n'
LinKml[pos+4] = '\t\t<SimpleData name="Codigo">'+codigo+'</SimpleData>\n'
LinKml.insert(pos+4,'\t\t<SimpleData name="color_linea">'+DictColorHort['nuevo'][horton]+'</SimpleData>\n')
LinKml.insert(pos+4,'\t\t<SimpleData name="grosor_linea">'+horton+'</SimpleData>\n')
nombre = '<name> Resultados Simulacion Hidrologica Tramo '+codigo+' </name>\n'
LinKml.insert(pos,nombre)
except:
flag = False
#Encabezado estatico
LinKml[4] = LinKml[4].replace('float', 'string')
LinKml[5] = LinKml[5].replace('int', 'string')
LinKml[6] = LinKml[6].replace('int', 'string')
LinKml[6] = LinKml[6].replace('Tramo', 'Codigo')
LinKml.insert(7,'\t<SimpleField name="color_linea" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="grosor_linea" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="Municipio" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="Tipo_Canal" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="N_Cauce" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="Barrio" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="Vereda" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="Comuna" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="Resolucion" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="AreaCuenca" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="CentroX" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="CentroY" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="AlturaMax" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="AlturaMin" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="LongCauce" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="LongCuenca" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="PendCauce" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="PendCuenca" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="TiempoC" type="string"></SimpleField>\n', )
LinKml.insert(7,'\t<SimpleField name="FiguraTiempoC" type="string"></SimpleField>\n', )
for i in range(7,40):
LinKml[i] = LinKml[i].replace('float','string')
#Encuentra los nodos y su ubicacion
Flag = True
Nodos = []
NodosSolos = []
cont = 1
while Flag:
try:
pos = LinKml.index( '\t\t<SimpleData name="Codigo">'+str(cont)+'</SimpleData>\n')
Nodos.append([cont, pos])
NodosSolos.append(cont)
cont += 1
except:
if cont == 1:
cont = 2
else:
Flag = False
#-----------------------------------------------------------------------------------------------------
#La adjunta la informacion de barrios, vereda, etc.
#-----------------------------------------------------------------------------------------------------
if args.shpinfo <> None:
#Lectura del shpfile
D = ogr.Open(args.shpinfo)
L = D.GetLayer()
Nodos = []
for i in range(L.GetFeatureCount()):
f = L.GetFeature(i)
Nodos.append(int(f.GetFieldAsString(1).split(' ')[-1]))
#Mete los valores en el kml
Flag = True
cont = 1
try:
pos = LinKml.index( '\t\t<SimpleData name="Codigo">'+str(cont)+'</SimpleData>\n')
except:
cont += 1
pos = LinKml.index( '\t\t<SimpleData name="Codigo">'+str(cont)+'</SimpleData>\n')
while Flag:
try:
# Encuentra la posicion para ese index
pos = LinKml.index( '\t\t<SimpleData name="Codigo">'+str(cont)+'</SimpleData>\n')
#Incluye info de barrios y demas
posShp = Nodos.index(cont)
f = L.GetFeature(posShp)
#Toma los fields de la descripcion
ListaNombres = ['Municipio','Tipo_Canal', 'N_Cauce', 'Barrio', 'Vereda']
for i,n in zip(range(10, 15), ListaNombres):
if f.GetFieldAsString(i) <> '':
LinKml.insert(pos, '\t\t<SimpleData name="'+n+'">'+f.GetFieldAsString(i)+'</SimpleData>\n')
# Actualiza para buscar el siguiente
cont += 1
except:
Flag = False
#-----------------------------------------------------------------------------------------------------
#Le adjunta la informacion de geomorfologia y de figura de Tc
#-----------------------------------------------------------------------------------------------------
if args.geomorfo <> None:
#Lee el diccionario con la info geomorfo
f = open(args.geomorfo,'r')
DictGeo = pickle.load(f)
f.close()
#Mete la geomorfologia
Flag = True
cont = 1
try:
pos = LinKml.index( '\t\t<SimpleData name="Codigo">'+str(cont)+'</SimpleData>\n')
except:
cont += 1
pos = LinKml.index( '\t\t<SimpleData name="Codigo">'+str(cont)+'</SimpleData>\n')
while Flag:
try:
# Encuentra la posicion para ese index
pos = LinKml.index( '\t\t<SimpleData name="Codigo">'+str(cont)+'</SimpleData>\n')
#Obtiene geomorfologica y Tc para el nodo
DG = DictGeo[str(cont)]['Geo']
Tc = np.median(DictGeo[str(cont)]['Tc'].values())
#Parametros geomorfologicos
L1 = ['Area[km2]','Centro_[X]','Centro_[Y]','Hmax_[m]','Hmin_[m]',
'Long_Cau [km]', 'Long_Cuenca [km]', 'Pend_Cauce [%]',
'Pend_Cuenca [%]']
L2 = ['AreaCuenca','CentroX','CentroY','AlturaMax','AlturaMin',
'LongCauce','LongCuenca','PendCauce','PendCuenca']
for k1,k2 in zip(L1, L2):
var = '%.3f' % DG[k1]
LinKml.insert(pos, '\t\t<SimpleData name="'+k2+'">'+var+'</SimpleData>\n')
#tiempo de concentracion
var = '%.3f' % Tc
LinKml.insert(pos, '\t\t<SimpleData name="TiempoC">'+var+'</SimpleData>\n')
var = args.tiempoC + 'Tc_'+str(cont)+'.html'
LinKml.insert(pos, '\t\t<SimpleData name="FiguraTiempoC">'+var+'</SimpleData>\n')
# Actualiza para buscar el siguiente
cont += 1
except:
Flag = False
#-----------------------------------------------------------------------------------------------------
#Escribe el kml bueno
#-----------------------------------------------------------------------------------------------------
#Escribe nuevo kml
f = open(args.ruta,'w')
f.writelines(LinKml)
f.close()
print wmf.cu.dxp
|
gpl-3.0
|
shayneholmes/plover
|
plover/dictionary/test_rtfcre_dict.py
|
7
|
7651
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
from plover.dictionary.rtfcre_dict import load_dictionary, TranslationConverter, format_translation, save_dictionary
import mock
import re
import unittest
from cStringIO import StringIO
class TestCase(unittest.TestCase):
def test_converter(self):
styles = {1: 'Normal', 2: 'Continuation'}
convert = TranslationConverter(styles)
cases = (
('', ''),
(r'\-', '-'),
(r'\\', '\\'),
(r'\{', '{'),
(r'\}', '}'),
(r'\~', '{^ ^}'),
(r'\_', '-'),
('\\\r', '{#Return}{#Return}'),
('\\\n', '{#Return}{#Return}'),
(r'\cxds', '{^}'),
(r'pre\cxds ', '{pre^}'),
(r'pre\cxds ', '{pre^} '),
(r'pre\cxds', '{pre^}'),
(r'\cxds post', '{^post}'),
(r'\cxds in\cxds', '{^in^}'),
(r'\cxds in\cxds ', '{^in^}'),
(r'\cxfc', '{-|}'),
(r'\cxfl', '{>}'),
(r'pre\cxfl', 'pre{>}'),
(r'{\*\cxsvatdictflags N}', '{-|}'),
(r'{\*\cxsvatdictflags LN1}', '{-|}'),
(r'\par', '{#Return}{#Return}'),
# caseCATalyst declares new styles without a preceding \par so we treat
# it as an implicit par.
(r'\s1', '{#Return}{#Return}'),
# But if the \par is present we don't treat \s as an implicit par.
(r'\par\s1', '{#Return}{#Return}'),
# Continuation styles are indented too.
(r'\par\s2', '{#Return}{#Return}{^ ^}'),
# caseCATalyst punctuation.
(r'.', '{.}'),
(r'. ', '{.} '),
(r' . ', ' . '),
(r'{\cxa Q.}.', 'Q..'),
(r'Mr.', 'Mr.'), # Don't mess with period that is part of a word.
(r'.attribute', '.attribute'),
(r'{\cxstit contents}', 'contents'),
(r'{\cxfing c}', '{&c}'),
(r'{\cxp.}', '{.}'),
(r'{\cxp .}', '{.}'),
(r'{\cxp . }', '{.}'),
(r'{\cxp . }', '{.}'),
(r'{\cxp !}', '{!}'),
(r'{\cxp ?}', '{?}'),
(r'{\cxp ,}', '{,}'),
(r'{\cxp ;}', '{;}'),
(r'{\cxp :}', '{:}'),
('{\\cxp \'}', '{^\'}'),
('{\\cxp -}', '{^-^}'),
('{\\cxp /}', '{^/^}'),
('{\\cxp... }', '{^... ^}'),
('{\\cxp ") }', '{^") ^}'),
('{\\nonexistant }', ''),
('{\\nonexistant contents}', 'contents'),
('{\\nonexistant cont\\_ents}', 'cont-ents'),
('{\\*\\nonexistant }', ''),
('{\\*\\nonexistant contents}', ''),
('{eclipse command}', '{eclipse command}'),
('test text', 'test text'),
('test text', 'test{^ ^}text'),
(r'{\cxconf [{\cxc abc}]}', 'abc'),
(r'{\cxconf [{\cxc abc}|{\cxc def}]}', 'def'),
(r'{\cxconf [{\cxc abc}|{\cxc def}|{\cxc ghi}]}', 'ghi'),
(r'{\cxconf [{\cxc abc}|{\cxc {\cxp... }}]}', '{^... ^}'),
(r'be\cxds{\*\cxsvatdictentrydate\yr2006\mo5\dy10}', '{be^}'),
(r'{\nonexistant {\cxp .}}', '{.}'),
(r'{\*\nonexistant {\cxp .}}', ''),
)
failed = []
for before, after in cases:
if convert(before) != after:
failed.append((before, after))
for before, after in failed:
print 'convert(%s) != %s: %s' % (before, after, convert(before))
self.assertEqual(len(failed), 0)
def test_load_dict(self):
"""Test the load_dict function.
This test just tests load_dict so it mocks out the converters and just
verifies that they are called.
"""
expected_styles = {
0: 'Normal',
1: 'Question',
2: 'Answer',
3: 'Colloquy',
4: 'Continuation Q',
5: 'Continuation A',
6: 'Continuation Col',
7: 'Paren',
8: 'Centered',
}
header = '\r\n'.join(
[r'{\rtf1\ansi\cxdict{\*\cxrev100}{\*\cxsystem Fake Software}'] +
[r'{\s%d %s;}' % (k, v) for k, v in expected_styles.items()] +
['}'])
footer = '\r\n}'
def make_dict(s):
return ''.join((header, s, footer))
def assertEqual(a, b):
self.assertEqual(a._dict, b)
this = self
class Converter(object):
def __init__(self, styles):
this.assertEqual(styles, expected_styles)
def __call__(self, s):
if s == 'return_none':
return None
return 'converted(%s)' % s
convert = Converter(expected_styles)
normalize = lambda x: 'normalized(%s)' % x
cases = (
# Empty dictionary.
('', {}),
# Only one translation.
('{\\*\\cxs SP}translation', {'SP': 'translation'}),
# Multiple translations no newlines.
('{\\*\\cxs SP}translation{\\*\\cxs S}translation2',
{'SP': 'translation', 'S': 'translation2'}),
# Multiple translations on separate lines.
('{\\*\\cxs SP}translation\r\n{\\*\\cxs S}translation2',
{'SP': 'translation', 'S': 'translation2'}),
('{\\*\\cxs SP}translation\n{\\*\\cxs S}translation2',
{'SP': 'translation', 'S': 'translation2'}),
# Escaped \r and \n handled
('{\\*\\cxs SP}trans\\\r\\\n', {'SP': 'trans\\\r\\\n'}),
# Escaped \r\n handled in mid translation
('{\\*\\cxs SP}trans\\\r\\\nlation', {'SP': 'trans\\\r\\\nlation'}),
# Whitespace is preserved in various situations.
('{\\*\\cxs S}t ', {'S': 't '}),
('{\\*\\cxs S}t {\\*\\cxs T}t ', {'S': 't ', 'T': 't '}),
('{\\*\\cxs S}t \r\n{\\*\\cxs T}t ', {'S': 't ', 'T': 't '}),
('{\\*\\cxs S}t \r\n{\\*\\cxs T} t \r\n', {'S': 't ', 'T': ' t '}),
# Translations are ignored if converter returns None
('{\\*\\cxs S}return_none', {}),
('{\\*\\cxs T}t t t ', {'T': 't t t '}),
# Conflicts result on only the last one kept.
('{\\*\\cxs T}t{\\*\\cxs T}g', {'T': 'g'}),
('{\\*\\cxs T}t{\\*\\cxs T}return_none', {'T': 't'}),
)
patch_path = 'plover.dictionary.rtfcre_dict'
with mock.patch.multiple(patch_path, normalize_steno=normalize,
TranslationConverter=Converter):
for s, expected in cases:
expected = dict((normalize(k), convert(v))
for k, v in expected.iteritems())
assertEqual(load_dictionary(make_dict(s)), expected)
def test_format_translation(self):
cases = (
('', ''),
('{^in^}', '\cxds in\cxds '),
('{pre^}', 'pre\cxds '),
('{pre^} ', 'pre\cxds '),
('{pre^} ', 'pre\cxds ')
)
failed = False
format_str = "format({}) != {}: {}"
for before, expected in cases:
result = format_translation(before)
if result != expected:
failed = True
print format_str.format(before, expected, result)
self.assertFalse(failed)
def test_save_dictionary(self):
f = StringIO()
d = {
'S/T': '{pre^}',
}
save_dictionary(d, f)
expected = '{\\rtf1\\ansi{\\*\\cxrev100}\\cxdict{\\*\\cxsystem Plover}{\\stylesheet{\\s0 Normal;}}\r\n{\\*\\cxs S///T}pre\\cxds \r\n}\r\n'
self.assertEqual(f.getvalue(), expected)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
italomaia/turtle-linux
|
games/BubbleKing/lib/bubble.py
|
1
|
1336
|
import pygame
from pygame.locals import *
import sprite
import capsule
def init(g,r,p, big=False):
if not hasattr(g,'bubble_count'):
g.bubble_count = 0
if g.bubble_count >= 3:
return None
g.bubble_count += 1
print 'new bubble', g.bubble_count
if not big:
s = sprite.Sprite3(g,r,'bubble',(0,0,7,7))
else:
s = sprite.Sprite3(g,r,'big-bubble',(0,0,16,16))
s.big = big
s.rect.centerx = r.centerx
s.rect.centery = r.centery
s.groups.add('solid')
s.groups.add('bubble')
s.hit_groups.add('enemy')
s.hit = hit
g.sprites.append(s)
s.loop = loop
s.life = 30
s.strength = 1
s.deinit = deinit
if big: s.strength = 3
s.vx = 1
if p.facing == 'left':
s.vx = -1
s.vy = 0
s.rect.centerx += s.vx*(6+s.rect.width/2)
s.rect.centery -= 4
g.game.sfx['bubble'].play()
return s
def deinit(g,s):
print "bubble deinit"
g.bubble_count -= 1
def loop(g,s):
s.rect.x += s.vx*5
s.life -= 1
if s.life == 0:
s.active = False
def hit(g,a,b):
a.active = False
b.strength -= a.strength
if b.strength <= 0:
b.active = False
#if a.big:
capsule.init(g,b.rect)
else:
g.game.sfx['hit'].play()
#print 'bubble hit!'
|
gpl-3.0
|
supertom/ansible
|
lib/ansible/cli/playbook.py
|
21
|
9325
|
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
########################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.utils.vars import load_extra_vars
from ansible.utils.vars import load_options_vars
from ansible.vars import VariableManager
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
#---------------------------------------------------------------------------------------------------
class PlaybookCLI(CLI):
''' code behind ansible playbook cli'''
def parse(self):
# create parser for CLI options
parser = CLI.base_parser(
usage = "%prog playbook.yml",
connect_opts=True,
meta_opts=True,
runas_opts=True,
subset_opts=True,
check_opts=True,
inventory_opts=True,
runtask_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
)
# ansible playbook specific opts
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
parser.add_option('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
self.options, self.args = parser.parse_args(self.args[1:])
self.parser = parser
if len(self.args) == 0:
raise AnsibleOptionsError("You must specify a playbook file to run")
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# Manage passwords
sshpass = None
becomepass = None
vault_pass = None
passwords = {}
# don't deal with privilege escalation or passwords when we don't need to
if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax:
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader)
loader.set_vault_password(vault_pass)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords()[0]
loader.set_vault_password(vault_pass)
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
for playbook in self.args:
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager()
variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options)
variable_manager.options_vars = load_options_vars(self.options)
# create the inventory, and filter it based on the subset specified (if any)
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
variable_manager.set_inventory(inventory)
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
display.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
inventory.subset(self.options.subset)
if len(inventory.list_hosts()) == 0 and no_hosts is False:
# Invalid limit
raise AnsibleError("Specified --limit does not match any hosts")
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
if self.options.listhosts:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
display.display(msg)
all_tags = set()
if self.options.listtags or self.options.listtasks:
taskmsg = ''
if self.options.listtasks:
taskmsg = ' tasks:\n'
def _process_block(b):
taskmsg = ''
for task in b.block:
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
if task.action == 'meta':
continue
all_tags.update(task.tags)
if self.options.listtasks:
cur_tags = list(mytags.union(set(task.tags)))
cur_tags.sort()
if task.name:
taskmsg += " %s" % task.get_name()
else:
taskmsg += " %s" % task.action
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
return taskmsg
all_vars = variable_manager.get_vars(loader=loader, play=play)
play_context = PlayContext(play=play, options=self.options)
for block in play.compile():
block = block.filter_tagged_tasks(play_context, all_vars)
if not block.has_tasks():
continue
taskmsg += _process_block(block)
if self.options.listtags:
cur_tags = list(mytags.union(all_tags))
cur_tags.sort()
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
return 0
else:
return results
|
gpl-3.0
|
creasyw/IMTAphy
|
wnsbase/playground/builtins/Push/Push.py
|
3
|
3957
|
###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: [email protected]
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
from wnsbase.playground.Tools import *
import wnsbase.playground.plugins.Command
from wnsbase.playground.builtins.Update.Update import UpdateCommand
import wnsbase.playground.Core
core = wnsbase.playground.Core.getCore()
class PushCommand(wnsbase.playground.plugins.Command.Command):
def __init__(self):
usage = "\n%prog push\n\n"
rationale = "Push the whole project tree and all its modules to remote location from projects.py."
usage += rationale
usage += """
Push uploads all projects to the URL provided in projects.py. Use --noAsk option to suppress questions.
Use together with --configFile to supply a different projects.py file containing different target locations.
Use --create-prefix option if target directory does not exist.
See ./playground --help for more information.
"""
wnsbase.playground.plugins.Command.Command.__init__(self, "push", rationale, usage)
self.optParser.add_option("", "--create-prefix",
dest = "createPrefix", default = False,
action = "store_true",
help = "create new remote repository if not present")
def run(self):
def push(project, otherProjects):
otherURL = None
if otherProjects is None:
otherURL = project.getRCSUrl()
else:
print "Checking for another branch"
otherProject = None
for p in otherProjects.all:
if project.getDir() == p.getDir():
otherProject = p
if otherProject is None:
print "WARNING: The alternate projects file does not contain %s" % project.getDir()
print "Skipping %s" % project.getDir()
else:
otherURL = otherProject.getRCSUrl()
rcs = project.getRCS()
if rcs.isPinned():
sys.stdout.write("\nSkipping module in %s, because it is pinned to %s\n\n"
% (project.getDir(), rcs.getPinnedPatchLevel()))
return
checkForConflictsAndExit(".")
core = wnsbase.playground.Core.getCore()
if otherURL is not None:
warning = "Do you really want to push " + project.getDir() + " to " + otherURL
if (core.userFeedback.askForConfirmation(warning)):
print "\nPushing '" + project.getDir() + " to " + otherURL + "' ..."
rcs.push(otherURL, self.options.createPrefix).realtimePrint()
core._process_hooks("_pre_push")
core.foreachProject(push, otherProjects = core.otherProjects)
|
gpl-2.0
|
google/earthengine-community
|
samples/python/guides/images09.py
|
1
|
1565
|
# Copyright 2020 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Earth Engine Developer's Guide examples for 'Images - Relational, conditional and Boolean operations'."""
# [START earthengine__images09__where_operator]
# Load a cloudy Landsat 8 image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603')
# Load another image to replace the cloudy pixels.
replacement = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416')
# Compute a cloud score band.
cloud = ee.Algorithms.Landsat.simpleCloudScore(image).select('cloud')
# Set cloudy pixels to the other image.
replaced = image.where(cloud.gt(10), replacement)
# Define a map centered on San Francisco Bay.
map_replaced = folium.Map(location=[37.4675, -122.1363], zoom_start=9)
# Add the image layer to the map and display it.
map_replaced.add_ee_layer(replaced,
{'bands': ['B5', 'B4', 'B3'], 'min': 0, 'max': 0.5},
'clouds replaced')
display(map_replaced)
# [END earthengine__images09__where_operator]
|
apache-2.0
|
andresailer/DIRAC
|
Resources/Catalog/FileCatalogFactory.py
|
9
|
2630
|
""" FileCatalogFactory class to create file catalog client objects according to the
configuration description
"""
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCatalogPath
from DIRAC.Resources.Catalog.FileCatalogProxyClient import FileCatalogProxyClient
from DIRAC.Core.Utilities import ObjectLoader
__RCSID__ = "$Id$"
class FileCatalogFactory(object):
""" Factory of file catalog objects. Only exposes createCatalog() method
"""
def __init__( self ):
self.log = gLogger.getSubLogger( 'FileCatalogFactory' )
self.catalogPath = ''
def createCatalog( self, catalogName, useProxy = False ):
""" Create a file catalog object from its name and CS description
"""
catalogPath = getCatalogPath( catalogName )
catalogType = gConfig.getValue( catalogPath + '/CatalogType', catalogName )
catalogURL = gConfig.getValue( catalogPath + '/CatalogURL', "DataManagement/" + catalogType )
optionsDict = {}
result = gConfig.getOptionsDict( catalogPath )
if result['OK']:
optionsDict = result['Value']
if useProxy:
result = self.__getCatalogClass( catalogType )
if not result['OK']:
return result
catalogClass = result['Value']
methods = catalogClass.getInterfaceMethods()
catalog = FileCatalogProxyClient( catalogName )
catalog.setInterfaceMethods( methods )
return S_OK( catalog )
return self.__createCatalog( catalogName, catalogType, catalogURL, optionsDict )
def __getCatalogClass( self, catalogType ):
objectLoader = ObjectLoader.ObjectLoader()
result = objectLoader.loadObject( 'Resources.Catalog.%sClient' % catalogType, catalogType + 'Client' )
if not result['OK']:
gLogger.error( 'Failed to load catalog object', '%s' % result['Message'] )
return result
def __createCatalog( self, catalogName, catalogType, catalogURL, optionsDict ):
self.log.debug( 'Creating %s client of type %s' % ( catalogName, catalogType ) )
result = self.__getCatalogClass( catalogType )
if not result['OK']:
return result
catalogClass = result['Value']
try:
optionsDict['url'] = catalogURL
catalog = catalogClass( **optionsDict )
self.log.debug( 'Loaded module %sClient' % catalogType )
return S_OK( catalog )
except Exception as x:
errStr = "Failed to instantiate %s()" % ( catalogType )
gLogger.exception( errStr, lException = x )
return S_ERROR( errStr )
# Catalog module was not loaded
return S_ERROR( 'No suitable client found for %s' % catalogName )
|
gpl-3.0
|
joemicro/Manufacturing
|
productionform.py
|
1
|
61536
|
#// Libraries
import sys
import os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sqlalchemy import *
from sqlalchemy.orm import *
#// My Imports
import ui_forms.ui_productionform
from databaseschema import *
from genericdelegates import *
from functions import *
import modelsandviews
import itemform
import receiveform
from reports import *
import reporting
localTITLE = 'Production'
""" Story!!!!!!!
Step 1) get total FG Volume by Batch, by looping through base assembly, and getting the volume of the items attached to it
Step 2) update cost of batch details namely RM for batch, via running an update query.
Step 3) get cost of RM and divide by FG liters being produced (can't divide by rm liters, because FG liters are inflated)
will get cost by running a sum query on the batch details in the DB.
Step 4) sum Up the packaging cost, for each item.
Step 5) in the base assembly get the total cost for each batch based on volume and percentage of batch used.
ex. 301 has 50ltrs, has batch BCRM 100%, FRVN 100%, FORG 50%.
a liter BCRM = .25, FRVN = .10, FORG = .15. then RM for 301 will equal (.25 + .10 +(.15 * .5)) * 50
"""
def updateCostForBatch(session, date, journalID, batchID=None):
""" update cost in the batch detail table in the DB, based on a batchID,
need date and journal_id to pass to avgCost calculator """
batchIDFilter = BatchDetail.base_id==batchID
if not batchID:
batchIDFilter = ""
batchDetailQuery = session.query(BatchDetail).filter(batchIDFilter)
for item in batchDetailQuery:
bom_id = item.bom_id
cost = avgCost(bom_id, date, journalID)
item.cost = cost
session.flush()
session.commit()
#==============================================
### Setup Batches Model========================
class Batch(object):
def __init__(self, base_id=None, batch_id=None, batch_num=None, batch_desc=None, rm_cost=None,
fg_volume=None, per_lt=None, used=None):
self.base_id = base_id
self.batch_id = batch_id
self.batch_num = batch_num
self.batch_desc = batch_desc
self.rm_cost = rm_cost
self.fg_volume = fg_volume
self.per_lt = per_lt
self.used = used
BATCH, NUM, BATCH_DESC, BATCH_COST, FG_VOLUME, PER_LT, USED = range(7)
class ProductionBatchesModel(QAbstractTableModel):
### Model Initializer =====================
def __init__(self, baseModel, parent=None):
super(ProductionBatchesModel, self).__init__(parent)
self.records = []
self.records.append(Batch())
self.baseAssembly = baseModel
self.productionDetail = None
### Base Implementation ===================
def rowCount(self, index=QModelIndex()):
return len(self.records)
def columnCount(self, index=QModelIndex()):
return 7
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
if section == BATCH:
return QVariant('Batch ID')
elif section == NUM:
return QVariant('Batch No.')
elif section == BATCH_DESC:
return QVariant('Description')
elif section == BATCH_COST:
return QVariant('RM Cost')
elif section == FG_VOLUME:
return QVariant('FG Volume')
elif section == PER_LT:
return QVariant('Per Lt.')
elif section == USED:
return QVariant('Unused')
return QVariant(section + 1)
def flags(self, index):
flag = QAbstractTableModel.flags(self, index)
if index.column() == BATCH:
flag |= Qt.ItemIsEditable
return flag
def data(self, index, role=Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self.records)):
return QVariant()
record = self.records[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == BATCH:
return QVariant(record.batch_id)
elif column == NUM:
return QVariant(record.batch_num)
elif column == BATCH_DESC:
return QVariant(record.batch_desc)
elif column == BATCH_COST:
if not record.rm_cost:
return QVariant(record.rm_cost)
return QVariant(round(record.rm_cost, 4))
elif column == FG_VOLUME:
if not record.fg_volume:
return QVariant(record.fg_volume)
return QVariant(round(record.fg_volume, 4))
elif column == PER_LT:
cost = float(getType(record.rm_cost))
volume = float(getType(record.fg_volume))
per_lt = nonZero(cost, 0) / nonZero(volume, 1)
return QVariant(round(per_lt, 4))
elif column == USED:
if not record.used:
return QVariant()
return QVariant('{:.0%}'.format(record.used))
elif role == Qt.BackgroundColorRole:
if (abs(nonZero(record.used, 0)) * 100) > 2:
return QVariant(QColor(255, 130, 130))
return QVariant()
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role == Qt.EditRole:
row = index.row()
record = self.records[row]
column = index.column()
if column == BATCH:
batch_id = value.toInt()[0]
record.batch_id = batch_id
base_id = dLookup(BatchHeader.base_id, BatchHeader.batch_id==batch_id)
record.base_id = base_id
base_num = dLookup(BaseHeader.base_no, BaseHeader.base_id==base_id)
record.batch_num = base_num
desc = dLookup(BaseHeader.base_desc, BaseHeader.base_id==base_id)
record.batch_desc = desc
cost = self.calcCost(record)
record.rm_cost = cost
volume = self.calcVolume(record)
record.fg_volume = volume
self.emit(SIGNAL('dataChanged(QModelIndex, QModelIndex)'), index, index)
return True
return False
def insertRows(self, position, rows=1, index=QModelIndex()):
self.beginInsertRows(QModelIndex(), position, position + rows - 1)
for row in range(rows):
self.records.insert(position + row + 1, Batch())
self.endInsertRows()
return True
def removeRows(self, position, rows=1, index=QModelIndex()):
self.beginRemoveRows(QModelIndex(), position, position + rows - 1)
self.records = self.records[:position] + self.records[position + rows:]
self.endRemoveRows()
return True
### Data and Calculations =====================
def calcUsed(self, record):
base_id = record.base_id
base_assembly_list = self.baseAssembly.filteredList(base_id)
base_value = 0
for base in base_assembly_list:
if base.base_id:
base_value += getType(base.value)
batch_value = record.rm_cost
percent = (float(getType(batch_value)) - float(getType(base_value))) / float(nonZero(batch_value, 1))
return percent
def calcVolume(self, record):
base_id = record.base_id
baseAssemblyList = self.baseAssembly.filteredList(base_id)
volume = 0
for base in baseAssemblyList:
fgd_id = base.item_id
iterList = self.productionDetail.filteredList(fgd_id)
if iterList:
volume += self.productionDetail.getSumVolume(iterList)
volume = float(getType(volume))
return volume
def calcCost(self, record):
batch_id = record.batch_id
costQuery = Session().query(func.sum(BatchDetail.cost * BatchDetail.bom_qty).label('sumCost')).filter(BatchDetail.base_id == batch_id)
for i in costQuery:
sum_cost = i.sumCost
sum_cost = float(getType(sum_cost))
return sum_cost
def sumUpCost(self, record):
assert isinstance(record, Batch)
rm_cost = float(getType(record.rm_cost))
return rm_cost
def getSumCost(self):
sumCost = sum(map(self.sumUpCost, self.records), 0.0)
return sumCost
def updateModel(self):
self.beginResetModel()
for record in self.records:
record.rm_cost = self.calcCost(record)
record.fg_volume = self.calcVolume(record)
record.used = self.calcUsed(record)
self.endResetModel()
def recordBatchDetailsToRMD(self, session, journal_id, date):
records_ = []
adjustments = []
for record in self.records:
batch_id = record.batch_id
batch = session.query(BatchHeader).filter(BatchHeader.batch_id==batch_id)
batch.update({'journal_id': journal_id})
batchDetails = session.query(BatchDetail).filter(BatchDetail.base_id==batch_id)
for item in batchDetails:
bom_id = item.bom_id
qty = item.bom_qty
cost = item.cost
total = getType(qty) * getType(cost)
records_ += [BatchRMD(journal_id, bom_id, qty, cost, total, batch_id)]
adjRmd = adjustAvgCost(session, bom_id, str(date), journal_id)
if adjRmd:
adjustments += adjRmd
return (records_, adjustments)
def load(self, batchList):
self.beginResetModel()
self.records = []
self.endResetModel()
for record in batchList:
batch_id = record.batch_id
base_id = dLookup(BatchHeader.base_id, BatchHeader.batch_id==batch_id)
base_num = dLookup(BaseHeader.base_no, BaseHeader.base_id==base_id)
desc = dLookup(BaseHeader.base_desc, BaseHeader.base_id==base_id)
self.records.append(Batch(base_id, batch_id, base_num, desc))
self.updateModel()
def clear(self):
self.beginResetModel()
self.records = []
self.records.append(Batch())
self.endResetModel()
def copy(self, indexList):
clipboard = QApplication.clipboard()
clipText = QString()
indexList.sort()
previous = indexList[0]
for current in indexList:
text = self.data(current, Qt.DisplayRole).toString()
if current.row() != previous.row():
clipText.append('\n')
else:
clipText.append('\t')
clipText.append(text)
previous = current
clipText.remove(0, 1)
clipboard.setText(clipText)
def checkForExtraBatches(self):
for rec in self.records:
if not rec.batch_id:
continue
base_id = rec.base_id
base_num = rec.batch_num
baseList = self.baseAssembly.filteredList(base_id)
if not baseList:
QMessageBox.warning(None, 'Calculating Production', 'There is no base-assembly for Batch: %s' % base_num, QMessageBox.Ok)
return False
return True
#==============================================
### Setup Detail Model ========================
FGD_ID, ITEM, QTY, DESC, VOLUME, PACK, RM_COST, DIRECT_COST, COST, TOTAL, QTY_PK = range(11)
class ProductionDetailModel(QAbstractTableModel):
### Model Initializer ============
def __init__(self, batchModel, baseModel, bomModel, parent=None):
super(ProductionDetailModel, self).__init__(parent)
self.records = []
self.records.append(FGD())
self.headerList = ('ID', 'Item No', 'Qty', 'Description', 'Volume', 'Pack', 'RM Cost',
'Direct Cost', 'Cost', 'Total', 'PK Qty')
self.directCost = 0
self.baseAssembly = baseModel
self.bomAssembly = bomModel
self.batches = batchModel
### Base Implementation ============
def rowCount(self, index=QModelIndex()):
return len(self.records)
def columnCount(self, index=QModelIndex()):
return 10
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
return QVariant(self.headerList[section])
return QVariant(section + 1)
def flags(self, index):
flag = QAbstractTableModel.flags(self, index)
if index.column() in (ITEM, QTY, DESC):
flag |= Qt.ItemIsEditable
return flag
def data(self, index, role=Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self.records)):
return QVariant()
record = self.records[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == FGD_ID:
return QVariant(record.fgd_id)
elif column == ITEM:
item_no = dLookup(Items.item_no, Items.item_id==record.item_id)
return QVariant(item_no)
elif column == QTY:
return QVariant(record.fgd_qty)
elif column == DESC:
return QVariant(record.fgd_desc)
elif column == VOLUME:
volume = self.volume(record.item_id)
return QVariant(volume)
elif column == PACK:
pack = self.pack(record.item_id)
return QVariant(pack)
elif column == RM_COST:
return QVariant(record.rm_cost)
elif column == DIRECT_COST:
return QVariant(record.direct_cost)
elif column == COST:
return QVariant(record.cost)
elif column == TOTAL:
cost = getType(record.cost)
qty = getType(record.fgd_qty)
total = qty * cost
return QVariant(total)
elif column == QTY_PK:
pack = self.pack(record.item_id)
if not pack:
return
pack = float(pack)
qty = record.fgd_qty
qty_pk = round(qty / pack, 0)
return QVariant(qty_pk)
elif role == Qt.EditRole:
if column == ITEM:
return QVariant(record.item_id)
return QVariant()
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role == Qt.EditRole:
row = index.row()
record = self.records[row]
column = index.column()
fg_id = dMax(FGD.fgd_id) + 1
if column == ITEM:
item = value.toInt()[0]
record.item_id = item
if not record.fgd_id:
record.fgd_id = fg_id + row
desc = dLookup(Items.item_desc, Items.item_id==item)
record.fgd_desc = str(desc)
elif column == QTY:
qty, ok = value.toFloat()
if not ok:
return
record.fgd_qty = qty
record.rm_cost = self.calc_rmCost(record)
record.direct_cost = self.calc_directCost(record)
record.cost = self.calc_cost(record)
elif column == DESC:
record.fgd_desc = value.toString()
self.emit(SIGNAL('dataChanged(QModelIndex, QModelIndex)'), index, index)
return True
return False
def insertRows(self, position, rows=1, index=QModelIndex()):
self.beginInsertRows(QModelIndex(), position, position + rows - 1)
for row in range(rows):
self.records.insert(position + row + 1, FGD())
self.endInsertRows()
return True
def removeRows(self, position, rows=1, index=QModelIndex()):
self.beginRemoveRows(QModelIndex(), position, position + rows - 1)
self.records = self.records[:position] + self.records[position + rows:]
self.endRemoveRows()
return True
### Functions, Data and calculations
def volume(self, item_id):
volume = dLookup(Items.volume, Items.item_id==item_id)
volume = getType(volume)
return volume
def pack(self, item_id):
pack = dLookup(Items.pack, Items.item_id==item_id)
return pack
def sumUpTotal(self, record):
qty = getType(record.fgd_qty)
cost = getType(record.cost)
total = qty * cost
return float(total)
def sumUpVolume(self, record):
assert isinstance(record, FGD)
qty = record.fgd_qty
item_id = record.item_id
volume = dLookup(Items.volume, Items.item_id==item_id)
sumVolume = getType(qty) * getType(volume)
return float(sumVolume)
def getSumTotal(self, iterable=None):
if not iterable:
iterable = self.records
sumTotal = sum(map(self.sumUpTotal, iterable), 0.0)
return sumTotal
def getSumVolume(self, iterable=None):
if not iterable:
iterable = self.records
sumVolume = sum(map(self.sumUpVolume, iterable), 0.0)
return sumVolume
def calc_rmCost(self, record):
#// sum up the value of the batches associated with fgd_id, and divide it by qty, because its per_lt * item_volume
#// the percentage is already accounted for in the value
fgd_id = record.fgd_id
baseAssemblyList = self.baseAssembly.filteredList(None, fgd_id)
batch_cost = sum(map(self.baseAssembly.sumUpValue, baseAssemblyList), 0.0)
rm_cost = nonZero(batch_cost, 0) / getType(record.fgd_qty)
#// simply enter the sum of bom cost, and will be multiplied by qty in the total column
bomAssemblyList = self.bomAssembly.filteredList(fgd_id)
bom_cost = sum(map(self.bomAssembly.sumUpCost, bomAssemblyList))
rm_cost += nonZero(bom_cost, 0)
rm_cost = round(rm_cost, 4)
return rm_cost
def calc_directCost(self, record):
""" calculate direct cost for item
the formula to calculate direct cost: directCost = volume((filing + labour) / sum(volume))
"""
_directCost_ = self.directCost
item_id = record.item_id
volume = self.volume(item_id)
sumVolume = self.getSumVolume(self.records)
directCost = nonZero(volume, 0) * (nonZero(_directCost_, 0) / nonZero(sumVolume, 1))
directCost = round(nonZero(directCost, 0), 4)
return directCost
def calc_cost(self, record):
rmCost = record.rm_cost
directCost = record.direct_cost
cost = rmCost + directCost
return cost
def updateModel(self, directCost=0):
self.beginResetModel()
self.directCost = directCost
for record in self.records:
if record.item_id:
record.rm_cost = self.calc_rmCost(record)
record.direct_cost = self.calc_directCost(record)
record.cost = self.calc_cost(record)
self.endResetModel()
def checkForExtraBatches(self):
pass
def updateBaseAssemblyValue(self):
""" Goes through all base assemblies attached to the FG item, and calculates the value base on the batch,
it basically looks up the per liter cost for the batch and multiplies it by total volume used by fg item
multiplied by percentage"""
for record in self.records:
fgd_id = record.fgd_id # // get the fgd_id, because thats the unique for the base assembly
if not record.item_id:
return
fg_volume = getType(self.volume(record.item_id)) * getType(record.fgd_qty) # // get total volume for item
# // create a list of indexes that contain base assemblies associated with this fgd_id
bIndex = self.batches.index(0, 1) # // specify an address to search in
beginIndex = self.baseAssembly.index(0, 0) # // specify a beginning index
baseIndexList = self.baseAssembly.match(beginIndex, Qt.DisplayRole, QVariant(fgd_id), hits=-1) # // create the list
# // now go through the base assembly list and enter the value
for i in baseIndexList:
row = i.row() # // get row number
base_num_index = self.baseAssembly.index(row, 1) # // specify index, to find the base_num
base_num = self.baseAssembly.data(base_num_index).toString() # // get base_num, so we could match it to batch
percentage_index = self.baseAssembly.index(row, 2)
percentage = self.baseAssembly.data(percentage_index).toFloat()[0]# // the percentage to be used in calculating value * percent
index_list = self.batches.match(bIndex, Qt.DisplayRole, QVariant(base_num)) # // get the index of the batch to get the per-lt cost
if not index_list:
QMessageBox.information(None, 'Calculating Production', 'Missing batch for Base: %s' % base_num, QMessageBox.Ok)
continue
per_lt_row = index_list[0].row()
per_lt_index = self.batches.index(per_lt_row, 5)
per_lt = self.batches.data(per_lt_index, Qt.DisplayRole).toFloat()[0]
value = round(fg_volume * per_lt * percentage, 4)
value_index = self.baseAssembly.index(row, 4)
if base_num:
self.baseAssembly.setData(value_index, QVariant(value), Qt.EditRole)
# print 'ID: %s, Base: %s, fgVolume: %s, volum: %s, qty: %s, per_lt: %s, percent: %s, value: %s' \
# % (fgd_id, base_num, fg_volume, self.volume(record.item_id), record.fgd_qty, per_lt, percentage, value)
# return
## Operations setup ============
def clear(self):
self.beginResetModel()
self.records = []
self.records.append(FGD())
self.endResetModel()
def filteredList(self, fgdId):
records_ = []
for record in self.records:
if record.item_id:
if str(record.fgd_id) == str(fgdId):
fgd_id = record.fgd_id
item_id = record.item_id
fgd_desc = record.fgd_desc
fgd_qty = record.fgd_qty
cost = record.cost
records_ += [FGD(fgd_id, item_id, fgd_desc, fgd_qty, cost)]
return records_
def save(self, journal_id):
records_ = []
for record in self.records:
if record.item_id:
fgd_id = int(record.fgd_id)
item_id = int(record.item_id)
fgd_desc = str(record.fgd_desc)
fgd_qty = unicode(record.fgd_qty)
rm_cost = unicode(record.rm_cost)
direct_cost = unicode(record.direct_cost)
cost = unicode(record.cost)
total = getType(fgd_qty) * getType(cost)
mix = dLookup(Items.mix_item, Items.item_id==item_id)
item_no = dLookup(Items.item_no, Items.item_id==item_id)
bom_id = dLookup(BOM.bom_id, BOM.bom_no==item_no)
if eval(mix):
records_ += [ReceiveRMD(journal_id, bom_id, fgd_desc, fgd_qty, cost, cost, 0, 'Production Detail', total)]
records_ += [FGD(fgd_id, item_id, fgd_desc, fgd_qty, cost, journal_id, rm_cost, direct_cost)]
else:
records_ += [FGD(fgd_id, item_id, fgd_desc, fgd_qty, cost, journal_id, rm_cost, direct_cost)]
return records_
def load(self, objectList, directCost=0):
self.beginResetModel()
self.records = []
self.endResetModel()
incr = 1
row_id_list = []
for item in objectList:
if isinstance(item, PrepDetail):
fg_id = dMax(FGD.fgd_id) + incr
item_id = item.item_id
fgd_qty = item.qty
fgd_desc = dLookup(Items.item_desc, Items.item_id==item_id)
self.records.append(FGD(fg_id, item_id, fgd_desc, fgd_qty))
incr += 1
row_id_list += [(item.pd_id, fg_id, item_id)]
elif isinstance(item, FGD):
fg_id = item.fgd_id
item_id = item.item_id
fgd_desc = item.fgd_desc
fgd_qty = item.fgd_qty
cost = item.cost
journal_id = item.journal_id
rm_cost = item.rm_cost
direct_cost = item.direct_cost
self.records.append(FGD(fg_id, item_id, fgd_desc, fgd_qty, cost, journal_id, rm_cost, direct_cost))
row_id_list = None
self.records.append(FGD())
return row_id_list
def copy(self, indexList):
clipboard = QApplication.clipboard()
clipText = QString()
indexList.sort()
previous = indexList[0]
for current in indexList:
text = self.data(current, Qt.DisplayRole).toString()
if current.row() != previous.row():
clipText.append('\n')
else:
clipText.append('\t')
clipText.append(text)
previous = current
clipText.remove(0, 1)
clipboard.setText(clipText)
def paste(self, position, index=QModelIndex()):
myList = []
clipboard = QApplication.clipboard()
text = clipboard.text()
rows = text.split('\n')
fgInc = position
for rec in rows:
col = rec.split('\t')
fg_id = dMax(FGD.fgd_id) + 1 + fgInc
item_id = dLookup(Items.item_id, Items.item_no==str(col[0]))
if item_id:
qty = float(getType(col[1])) if len(col) >= 2 else None
desc = col[2] if len(col) >= 3 else dLookup(Items.item_desc, Items.item_id==item_id)
myList += [FGD(fg_id, item_id, desc, qty)]
fgInc += 1
rowCount = len(myList)
self.beginInsertRows(QModelIndex(), position, position + rowCount - 1)
for row in range(rowCount):
self.records.insert(position + row, myList[row])
self.endInsertRows()
self.updateModel()
return rowCount
#=======================================================================
### Receive Form Setup =====
class ReceiveFormDialog(QDialog):
def __init__(self, model, bomModel, session, journal_id, parent=None):
super(ReceiveFormDialog, self).__init__(parent)
self.journal_id = journal_id
self.journal_date = self.parent().getDate()
self.session = session
self.model = model
saveButton = QPushButton('&Save')
saveButton.setVisible(False)
clearButton = QPushButton('C&lear')
closeButton = QPushButton('Close')
self.date_dateEdit = QDateEdit()
self.date_dateEdit.setCalendarPopup(True)
self.date_dateEdit.setDate(self.journal_date)
label = QLabel('Total')
self.v_total_label = QLabel()
self.v_total_label.setMinimumSize(QSize(96, 25))
self.v_total_label.setFrameShape(QFrame.Box)
self.detailView = QTableView()
self.detailView.setModel(self.model)
itemModel = bomModel
delegate = GenericDelegate(self)
delegate.insertDelegate(receiveform.ITEM, ComboDelegate(itemModel, True))
delegate.insertDelegate(receiveform.DESCRIPTION, PlainTextDelegate())
delegate.insertDelegate(receiveform.QTY, NumberDelegate())
delegate.insertDelegate(receiveform.PRICE, NumberDelegate())
delegate.insertDelegate(receiveform.MEMO, PlainTextDelegate())
self.detailView.setItemDelegate(delegate)
self.detailView.setColumnHidden(receiveform.SHIPPING, True)
self.detailView.setColumnHidden(receiveform.COST, True)
self.detailView.setColumnHidden(receiveform.MEMO, True)
spacer = QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum)
buttonLayout = QHBoxLayout()
buttonLayout.addWidget(saveButton)
buttonLayout.addWidget(clearButton)
buttonLayout.addWidget(closeButton)
buttonLayout.addItem(spacer)
buttonLayout.addWidget(self.date_dateEdit)
buttonLayout.addItem(spacer)
buttonLayout.addWidget(label)
buttonLayout.addWidget(self.v_total_label)
layout = QVBoxLayout()
layout.addLayout(buttonLayout)
layout.addWidget(self.detailView)
self.setLayout(layout)
self.updateSumTotal()
self.date_dateEdit.dateChanged.connect(self.setParentDate)
saveButton.clicked.connect(lambda: self.save(self.journal_id, self.journal_date))
clearButton.clicked.connect(self.clear)
closeButton.clicked.connect(self.reject)
self.resize(QSize(600, 400))
self.setWindowTitle(localTITLE)
self.model.dataChanged.connect(self.setDirty)
self.model.dataChanged.connect(self.autoAddRow)
def setParentDate(self):
date = self.date_dateEdit.date()
self.parent().dateDateEdit.setDate(date)
self.journal_date = date.toPyDate()
def setDirty(self):
self.parent().setDirty()
self.updateSumTotal()
def setJournalInfo(self, journal_id, journal_date):
self.journal_date = journal_date
self.journal_id = journal_id
def contextMenuEvent(self, event):
menu = QMenu(self)
if self.detailView.hasFocus():
copyAction = menu.addAction('Copy', QObject, 'Ctrl+C')
pasteAction = menu.addAction('Paste', QObject, 'Ctrl+V')
insertAction = menu.addAction("Insert Line", QObject, "Ctrl+I")
deleteAction = menu.addAction("Delete Line", QObject, "Ctrl+D")
copyAction.triggered.connect(self.copy)
pasteAction.triggered.connect(self.paste)
self.connect(insertAction, SIGNAL("triggered()"), self.insertRow)
self.connect(deleteAction, SIGNAL("triggered()"), self.removeRow)
addActions(self, self.detailView, (copyAction, pasteAction, insertAction, deleteAction))
menu.exec_(event.globalPos())
def updateSumTotal(self):
sum_total = QString('%L1').arg(self.model.getSumTotal(), 0, 'f', 2)
self.v_total_label.setText(sum_total)
def copy(self):
if self.model.rowCount() <= 1:
return
selectedItems = self.detailView.selectionModel().selectedIndexes()
self.model.copy(selectedItems)
def paste(self):
row = self.detailView.currentIndex().row()
self.model.paste(row)
self.updateSumTotal()
def autoAddRow(self):
view = self.detailView
row = view.currentIndex().row()
if self.model.rowCount() == row + 1:
self.insertRow()
def insertRow(self):
view = self.detailView
index = view.currentIndex()
row = index.row()
self.model.insertRows(row)
view.setFocus()
view.setCurrentIndex(index)
def removeRow(self):
if self.model.rowCount() <= 1:
return
view = self.detailView
rowsSelected = view.selectionModel().selectedRows()
if not rowsSelected:
row = view.currentIndex().row()
rows = 1
else:
for i in rowsSelected:
row = i.row()
rows = len(rowsSelected)
row = row - rows + 1
self.model.removeRows(row, rows)
def save(self, journal_id, journal_date):
details, adjustments = self.model.save(journal_id, journal_date)
self.session.add_all(details)
self.session.add_all(adjustments)
self.sendToDB()
self.accept()
def delete(self):
self.session.query(ReceiveRMD).filter(ReceiveRMD.journal_id==self.journal_id).delete()
self.sendToDB()
def sendToDB(self):
try:
self.session.flush
self.session.commit()
except Exception, e:
self.session.rollback()
raise e
def clear(self):
self.model.clear()
#=======================================================================
### Form Setup =============
class ProductionForm(QDialog, ui_forms.ui_productionform.Ui_ProductionForm):
def __init__(self, baseListModel, batchListModel, itemModel, bomModel, prepModel, parent=None):
super(ProductionForm, self).__init__(parent)
self.setupUi(self)
self.session = Session()
self.dirty = False
self.editing = False
self.record_id = None
self.current_record = None
self.directCost = 0
self.my_parent = parent
### setup text's and labels
self.v_prodID_label.setText(str(dMax(JournalHeader.journal_id) + 1))
self.dateDateEdit.setDate(self.my_parent.getDate())
### Prep combo box setup ###
prep_view = modelsandviews.ItemView(prepModel, False, self.prep_comboBox)
self.prep_comboBox.setModel(prepModel)
self.prep_comboBox.setView(prep_view)
self.prep_comboBox.setFixedWidth(150)
self.prep_comboBox.setCurrentIndex(-1)
self.prep_comboBox.setModelColumn(0)
self.prep_comboBox.setEditable(True)
### instantiate models
self.baseAssemblyModel = itemform.BaseAssemblyModel()
self.bomAssemblyModel = itemform.ItemAssemblyModel()
self.batchesModel = ProductionBatchesModel(self.baseAssemblyModel)
self.detailModel = ProductionDetailModel(self.batchesModel, self.baseAssemblyModel, self.bomAssemblyModel)
self.batchesModel.productionDetail = self.detailModel
self.receiveModel = receiveform.ReceivingDetailModel(self.session)
### setup detail view ###
self.itemModel = itemModel
tblView = self.productionTableView
tblView.setModel(self.detailModel)
delegate = GenericDelegate(self)
delegate.insertDelegate(ITEM, ComboDelegate(self.itemModel, True))
delegate.insertDelegate(QTY, NumberDelegate())
delegate.insertDelegate(DESC, PlainTextDelegate())
tblView.setItemDelegate(delegate)
tblView.setColumnWidth(ITEM, 50)
tblView.setColumnWidth(QTY, 75)
tblView.setColumnWidth(DESC, 250)
tblView.setColumnWidth(VOLUME, 50)
tblView.setColumnWidth(PACK, 50)
tblView.setColumnWidth(RM_COST, 75)
tblView.setColumnWidth(DIRECT_COST, 75)
tblView.setColumnWidth(COST, 75)
tblView.setColumnHidden(FGD_ID, True)
### setup Assembly views ###
self.bsView.setVisible(False)
baseModel = baseListModel
self.baseAssemblyProxy = QSortFilterProxyModel()
self.baseAssemblyProxy.setFilterKeyColumn(0)
self.baseAssemblyProxy.setSourceModel(self.baseAssemblyModel)
self.baseAssembly_view = modelsandviews.AssemblyTableView(self.baseAssemblyProxy, baseModel, self)
self.verticalLayout.addWidget(self.baseAssembly_view)
self.bmView.setVisible(False)
self.bomModel = bomModel
self.bomAssemblyProxy = QSortFilterProxyModel()
self.bomAssemblyProxy.setSourceModel(self.bomAssemblyModel)
self.bomAssemblyProxy.setFilterKeyColumn(0)
self.bomAssembly_view = modelsandviews.AssemblyTableView(self.bomAssemblyProxy, self.bomModel, self)
self.verticalLayout.addWidget(self.label_15)
self.verticalLayout.addWidget(self.bomAssembly_view)
### setup batches view ###
self.batchList = batchListModel
self.batchesTableView.setModel(self.batchesModel)
delegate = GenericDelegate(self)
delegate.insertDelegate(BATCH, ComboDelegate(self.batchList, False))
self.batchesTableView.setItemDelegate(delegate)
self.batchesTableView.setColumnWidth(BATCH, 50)
self.batchesTableView.setColumnWidth(NUM, 75)
self.batchesTableView.setColumnWidth(BATCH_DESC, 150)
self.batchesTableView.setColumnWidth(FG_VOLUME, 75)
self.batchesTableView.setColumnWidth(PER_LT, 75)
self.batchesTableView.setColumnWidth(USED, 50)
### setup receive form
self.dialog = ReceiveFormDialog(self.receiveModel, self.bomModel, self.session, None, self)
### Signal and Slot Setup ###
self.dateDateEdit.dateChanged.connect(self.setParentDate)
self.detailModel.dataChanged.connect(lambda: self.autoAddRow(self.productionTableView, self.detailModel))
self.batchesModel.dataChanged.connect(lambda: self.autoAddRow(self.batchesTableView, self.batchesModel))
self.detailModel.dataChanged.connect(self.addAssemblies)
self.productionTableView.selectionModel().currentRowChanged.connect(self.setFilter)
self.productionTableView.doubleClicked.connect(self.editItem)
self.prep_comboBox.currentIndexChanged.connect(self.fillWithPrep)
self.calcButton.clicked.connect(self.recalc)
self.saveButton.clicked.connect(self.save)
self.newButton.clicked.connect(self.clear)
self.receiveButton.clicked.connect(self.openReceive)
self.printButton.clicked.connect(self.printReport)
self.deleteButton.clicked.connect(lambda: self.delete(header=True))
self.closeButton.clicked.connect(self.accept)
self.setupConnection()
## == Form Behaviour Setup
def setupConnection(self):
""" connect every widget on form to the data changed function,
to set the form to dirty """
widgets = self.findChildren(QWidget)
for widget in widgets:
if isinstance(widget, (QLineEdit, QTextEdit)):
self.connect(widget, SIGNAL("textEdited(QString)"), self.setDirty)
elif isinstance(widget, QComboBox):
self.connect(widget, SIGNAL("activated(int)"), self.setDirty)
elif isinstance(widget, QCheckBox):
self.connect(widget, SIGNAL("stateChanged(int)"), self.setDirty)
def setDirty(self):
self.updateSumTotals()
self.dirty = True
self.setWindowTitle("%s - Editing..." % localTITLE)
def setParentDate(self):
date = self.dateDateEdit.date().toPyDate()
self.my_parent.setDate(date)
def getDate(self):
date = self.dateDateEdit.date()
date = date.toPyDate()
return date
def contextMenuEvent(self, event):
menu = QMenu(self)
if self.productionTableView.hasFocus():
view = self.productionTableView
model = self.detailModel
copyAction = menu.addAction('Copy', QObject)
pasteAction = menu.addAction('Paste', QObject)
insertAction = menu.addAction("Insert Line", QObject)
deleteAction = menu.addAction("Delete Line", QObject)
copyAction.triggered.connect(lambda: self.copy(view, model))
pasteAction.triggered.connect(lambda: self.paste(view, model))
insertAction.triggered.connect(lambda: self.insertRow(view, model))
deleteAction.triggered.connect(lambda: self.removeRow(view, model))
elif self.batchesTableView.hasFocus():
view = self.batchesTableView
model = self.batchesModel
copyAction = menu.addAction('Copy', QObject)
insertAction = menu.addAction("Insert Line", QObject)
deleteAction = menu.addAction("Delete Line", QObject)
copyAction.triggered.connect(lambda: self.copy(view, model))
insertAction.triggered.connect(lambda: self.insertRow(view, model))
deleteAction.triggered.connect(lambda: self.removeRow(view, model))
elif self.bomAssembly_view.hasFocus():
view = self.bomAssembly_view
model = self.bomAssemblyModel
copyAction = menu.addAction('Copy', QObject)
insertAction = menu.addAction("Insert Line", QObject)
deleteAction = menu.addAction("Delete Line", QObject)
copyAction.triggered.connect(lambda: self.copy(view, model))
insertAction.triggered.connect(lambda: self.insertRow(view, model))
deleteAction.triggered.connect(lambda: self.removeRow(view, model))
elif self.baseAssembly_view.hasFocus():
view = self.baseAssembly_view
model = self.baseAssemblyModel
copyAction = menu.addAction('Copy', QObject)
insertAction = menu.addAction("Insert Line", QObject)
deleteAction = menu.addAction("Delete Line", QObject)
copyAction.triggered.connect(lambda: self.copy(view, model))
insertAction.triggered.connect(lambda: self.insertRow(view, model))
deleteAction.triggered.connect(lambda: self.removeRow(view, model))
menu.exec_(event.globalPos())
def fgd_id(self):
index = self.productionTableView.currentIndex()
row = index.row()
idIndex = self.detailModel.index(row, 0)
fgd_id, ok = self.detailModel.data(idIndex, Qt.DisplayRole).toInt()
return (fgd_id, ok)
def copy(self, view, model):
if model.rowCount() <= 1:
return
selectedItems = view.selectionModel().selectedIndexes()
model.copy(selectedItems)
def paste(self, view, model):
row = view.currentIndex().row()
rows = model.paste(row)
return
for i in range(rows):
index = view.model().index(i + row, 1)
view.setCurrentIndex(index)
self.addAssemblies(recall=False)
self.updateSumTotals()
def autoAddRow(self, view, model):
self.setDirty()
row = view.currentIndex().row()
if model.rowCount() == row + 1:
self.insertRow(view, model)
def insertRow(self, view, model):
if view is not None:
index = view.currentIndex()
row = index.row() + 1
if model in (self.bomAssemblyModel, self.baseAssemblyModel):
fgd_id, ok = self.fgd_id()
if ok:
model.insertRows(row, item_id=fgd_id)
self.setFilter()
else:
model.insertRows(row)
view.setFocus()
view.setCurrentIndex(index)
def removeRow(self, view, model):
rowsSelected = view.selectionModel().selectedRows()
if not rowsSelected:
row = view.currentIndex().row()
rows = 1
else:
for i in rowsSelected:
row = i.row()
rows = len(rowsSelected)
row = row - rows + 1
if model in (self.baseAssemblyModel, self.bomAssemblyModel):
if model == self.baseAssemblyModel:
proxy_index = self.baseAssemblyProxy.index(view.currentIndex().row(), 0)
row = self.baseAssemblyProxy.mapToSource(proxy_index).row()
elif model == self.bomAssemblyModel:
proxy_index = self.bomAssemblyProxy.index(view.currentIndex().row(), 0)
row = self.bomAssemblyProxy.mapToSource(proxy_index).row()
rows = 1
model.removeRows(row, rows)
if model.rowCount() < 1:
self.insertRow(view, model)
self.updateSumTotals()
self.setDirty()
def deleteAssemblies(self, fgd_id):
beginIndex = self.baseAssemblyModel.index(0, 0)
baseIndexList = self.baseAssemblyModel.match(beginIndex, Qt.DisplayRole, QVariant(fgd_id), hits=-1)
beginIndex = self.bomAssemblyModel.index(0, 0)
bomIndexList = self.bomAssemblyModel.match(beginIndex, Qt.DisplayRole, QVariant(fgd_id), hits=-1)
if not baseIndexList:
return
if not bomIndexList:
return
while baseIndexList:
position = baseIndexList[0].row()
self.baseAssemblyModel.removeRows(position)
baseIndexList = self.baseAssemblyModel.match(beginIndex, Qt.DisplayRole, QVariant(fgd_id))
while bomIndexList:
position = bomIndexList[0].row()
self.bomAssemblyModel.removeRows(position)
bomIndexList = self.bomAssemblyModel.match(beginIndex, Qt.DisplayRole, QVariant(fgd_id))
def addAssemblies(self, recall=False):
if recall == True:
baseList = self.session.query(FGDBatchAssembly).join(FGD).filter(FGD.journal_id==self.record_id)
itemList = self.session.query(FGDBOMAssembly).join(FGD).join(AssemblyRMD) \
.filter(FGD.journal_id==self.record_id)
clear = True
fgd_id = None
else:
index = self.productionTableView.currentIndex()
if not index.column() == 1:
return
row = index.row()
myIndex = self.detailModel.index(row, 1)
fgd_id = self.fgd_id()[0]
fg_num = str(self.detailModel.data(myIndex, Qt.DisplayRole).toString())
fg_id = dLookup(Items.item_id, Items.item_no==fg_num)
self.deleteAssemblies(fgd_id)
baseList = self.session.query(BaseAssembly).filter(BaseAssembly.item_id==fg_id)
itemList = self.session.query(ItemAssembly).filter(ItemAssembly.item_id==fg_id)
clear = False
self.baseAssemblyModel.load(baseList, clear, fgd_id)
self.bomAssemblyModel.load(itemList, clear, fgd_id)
self.baseAssemblyProxy.reset()
self.bomAssemblyProxy.reset()
def setFilter(self):
fgd_id = self.fgd_id()[0]
self.bomAssemblyProxy.setFilterFixedString(str(fgd_id))
self.baseAssemblyProxy.setFilterFixedString(str(fgd_id))
def editItem(self):
row = self.productionTableView.currentIndex().row()
index = self.detailModel.index(row, ITEM)
item_id = self.detailModel.data(index, Qt.EditRole).toString()
if not item_id:
return
form = self.my_parent.itemForm()
form.recall(1, str(item_id))
def fillWithPrep(self):
self.clearModels()
#// get prep id
prep_id = str(self.prep_comboBox.currentText())
#// get batch list, and load it into batch model
batch_list = self.session.query(BatchHeader).filter(BatchHeader.prep_id==prep_id)
self.batchesModel.load(batch_list)
#// now lets load items and assemblies
#// first w'ill load items then w'ill loop through it to get the fgd and load assemblies for each one
details = self.session.query(PrepDetail).filter(PrepDetail.header_id==prep_id)
row_id_list = self.detailModel.load(details)
for row in row_id_list:
pd_id, fgd_id, fg_id = row
clear = False
base_list = self.session.query(PrepAssembly).filter(PrepAssembly.pd_id==pd_id)
itemList = self.session.query(ItemAssembly).filter(ItemAssembly.item_id==fg_id)
self.baseAssemblyModel.load(base_list, clear, fgd_id)
self.bomAssemblyModel.load(itemList, clear, fgd_id)
self.baseAssemblyProxy.reset()
self.bomAssemblyProxy.reset()
self.setFilter()
self.editing = False
### Calculations
def updateSumTotals(self):
fg_volume = round(nonZero(self.detailModel.getSumVolume(), 0), 2)
rm_cost = round(nonZero(self.batchesModel.getSumCost(), 0), 2)
fg_total_cost = self.detailModel.getSumTotal()
filing = float(getType(self.filingChargeLineEdit.text()))
labour = float(getType(self.labourChargeLineEdit.text()))
self.directCost = filing + labour
per_lt = round(nonZero(filing, 0) / nonZero(fg_volume, 1), 2)
self.v_fgVolume_label.setText(str('{:,.2f}'.format(fg_volume)))
self.v_rmCost_label.setText(str('{:,.2f}'.format(rm_cost)))
self.v_totalFGCost_label.setText(str('{:,.2f}'.format(fg_total_cost)))
self.v_costPerLT_label.setText(str('{:,.2f}'.format(per_lt)))
self.v_totalFees_label.setText(str('{:,.2f}'.format(self.directCost)))
def recalc(self):
date = self.dateDateEdit.date()
date = date.toPyDate()
date = str(date)
journal_id = str(self.v_prodID_label.text())
updateCostForBatch(self.session, date, journal_id)
self.bomAssemblyModel.updateCost(journal_id, date)
self.batchesModel.updateModel()
self.detailModel.updateBaseAssemblyValue()
self.detailModel.updateModel(self.directCost)
self.batchesModel.updateModel() #// need to update batches model again because the used column is based on data from baseAssembly
self.updateSumTotals()
### Form operations
def reject(self):
self.accept()
def accept(self):
if self.dirty:
answer = QMessageBox.question(self, "Editing - %s" % localTITLE, "Would you like to save your data?",
QMessageBox.Yes| QMessageBox.No| QMessageBox.Cancel)
if answer == QMessageBox.Cancel:
return
elif answer == QMessageBox.No:
QDialog.accept(self)
elif answer == QMessageBox.Yes:
self.save()
QDialog.accept(self)
self.my_parent.formClosed()
def openReceive(self):
journal_id = int(self.v_prodID_label.text()) if self.v_prodID_label.text() else None
journal_date = self.dateDateEdit.date().toPyDate()
self.dialog.setJournalInfo(journal_id, journal_date)
self.dialog.updateSumTotal()
self.dialog.exec_()
def save(self):
#// make sure there are details to be recorded
if self.detailModel.rowCount() <= 1 :
QMessageBox.information(self, 'Save Production - %s' % localTITLE, 'No details found', QMessageBox.Ok)
return
#// check if all batches are being used
ok = self.batchesModel.checkForExtraBatches()
if not ok:
return
#// prepare the values to be saved for the header
journal_type = 'Production'
qDate = self.dateDateEdit.date()
journal_date = qDate.toPyDate()
journal_no = str(self.refnoLineEdit.text())
journal_memo = str(self.notesTextEdit.toPlainText())
filing_charge = unicode(self.filingChargeLineEdit.text())
labour_charge = unicode(self.labourChargeLineEdit.text())
mix = False
modified_date = QDateTime().currentDateTime().toPyDateTime()
log_memo = 'Created'
progress = QProgressDialog('Recording Production...', 'Abort Record', 0, 4)
progress.setCancelButton(None)
progress.setValue(1)
progress.show()
progress.setLabelText('Calculating')
QApplication.processEvents()
#// if journal is being edited, then update header, delete details and repost a new.
if self.editing:
#// check for closing date issues
old_date = dLookup(JournalHeader.journal_date, JournalHeader.journal_id==self.record_id)
if not closingDate(old_date):
return
if not closingDate(journal_date):
return
log_memo = 'Modified'
#// update journal header info
journal_id = self.record_id
self.current_record = self.session.query(ProductionHeader).filter(ProductionHeader.journal_id==self.record_id)
self.current_record.update({'journal_date': journal_date, 'journal_no': journal_no, 'journal_memo': journal_memo,
'filing_charge': filing_charge, 'labour_charge': labour_charge, 'mix': mix,
'modified_date': modified_date})
#// delete old details from db
self.delete(header=False)
#// if its a new journal
else:
#// check for closing issues
if not closingDate(journal_date):
return
#// make sure to get correct journal_id
journal_id = dMax(JournalHeader.journal_id) + 1
#// post header info
self.session.add(ProductionHeader(journal_id, journal_type, journal_date, journal_no, journal_memo,
filing_charge, labour_charge, mix, modified_date))
#// update the journal_id on the prep
prep_id = str(self.prep_comboBox.currentText())
if prep_id:
self.session.query(PrepHeader).filter(PrepHeader.prep_id==prep_id).update({'prod_id': journal_id})
#// but before reporting details recalculate
#// recalculate
self.recalc()
progress.setLabelText('Posting')
progress.setValue(2)
#// post-repost details
if self.receiveModel.rowCount() > 1:
self.dialog.save(journal_id, journal_date)
details = self.detailModel.save(journal_id)
rmDetails, batchAdjustments = self.batchesModel.recordBatchDetailsToRMD(self.session, journal_id, journal_date)
baseAssemblies = self.baseAssemblyModel.save(None, 'FGDBatchAssembly')
bomAssemblies = self.bomAssemblyModel.save(journal_id, 'FGDBOMAssembly', None, journal_date)
bomRM, smAdjustments = self.bomAssemblyModel.save(journal_id, 'AssemblyRMD', self.session, journal_date, self.detailModel)
progress.setLabelText('Saving')
progress.setValue(3)
self.session.add_all(details)
self.session.add_all(rmDetails)
self.session.add_all(batchAdjustments)
self.session.add_all(baseAssemblies)
self.session.add_all(bomAssemblies)
self.session.add_all(bomRM)
self.session.add_all(smAdjustments)
self.session.add(Logs(journal_id, self.my_parent.user_id, modified_date, log_memo))
self.sendToDB()
self.v_prodID_label.setText(str(journal_id))
self.editing = True
self.dirty = False
self.record_id = journal_id
progress.setValue(4)
self.setWindowTitle('%s - (Data Saved)' % localTITLE)
self.session.close()
def delete(self, header):
if not self.record_id:
return
journal_id = self.record_id
if header:
#// check for closing date issues
old_date = dLookup(JournalHeader.journal_date, JournalHeader.journal_id==self.record_id)
if not closingDate(old_date):
return
answer = QMessageBox.question(self, "Delete - %s" % localTITLE, "Are you sure you " \
"want to delete Production: %s:, %s" % (self.v_prodID_label.text(),
self.dateDateEdit.date().toPyDate()),
QMessageBox.Yes| QMessageBox.No, QMessageBox.NoButton)
if answer == QMessageBox.No:
return
self.session.query(ProductionHeader).filter(ProductionHeader.journal_id==journal_id).delete()
self.session.query(BatchHeader).filter(BatchHeader.journal_id==journal_id).update({'journal_id': None})
prep_id = str(self.prep_comboBox.currentText())
if prep_id:
self.session.query(PrepHeader).filter(PrepHeader.prod_id==journal_id).update({'prod_id': None})
self.deleteDBAssemblies(journal_id)
self.session.query(FGD).filter(FGD.journal_id==journal_id).delete()
self.session.query(RMD).filter(RMD.journal_id==journal_id).delete()
if header:
log_memo = 'Deleted - ProdID: %s, Date: %s, ProdNo: %s' % (self.record_id,
self.dateDateEdit.date().toPyDate(),
str(self.refnoLineEdit.text()))
self.session.add(Logs(self.record_id, self.my_parent.user_id, QDateTime().currentDateTime().toPyDateTime(), log_memo))
self.sendToDB()
self.clear()
def deleteDBAssemblies(self, journal_id):
fgdQry = self.session.query(FGD.fgd_id).filter(FGD.journal_id==journal_id).subquery()
self.session.query(FGDBatchAssembly).filter(FGDBatchAssembly.item_id.in_(fgdQry)).delete('fetch')
self.session.query(FGDBOMAssembly).filter(FGDBOMAssembly.item_id.in_(fgdQry)).delete('fetch')
def sendToDB(self):
try:
self.session.flush
self.session.commit()
except Exception, e:
self.session.rollback()
raise e
def recall(self, journal_id):
# // first find out if the user is in middle of entering data.
if self.dirty:
answer = QMessageBox.question(self, "Editing - %s" % localTITLE, "Would you like to save your data?",
QMessageBox.Yes| QMessageBox.Discard| QMessageBox.Cancel)
if answer == QMessageBox.Cancel:
return
elif answer == QMessageBox.Yes:
self.save()
self.record_id = journal_id
self.current_record = self.session.query(ProductionHeader).filter(ProductionHeader.journal_id==journal_id)
for record in self.current_record:
self.v_prodID_label.setText(str(journal_id))
self.dateDateEdit.setDate(record.journal_date)
self.refnoLineEdit.setText(str(record.journal_no))
self.notesTextEdit.setText(str(record.journal_memo))
self.filingChargeLineEdit.setText(str(record.filing_charge))
self.labourChargeLineEdit.setText(str(record.labour_charge))
self.directCost = getType(record.filing_charge) + getType(record.labour_charge)
prep_id = dLookup(PrepHeader.prep_id, PrepHeader.prod_id==journal_id)
if prep_id:
self.prep_comboBox.lineEdit().setText(prep_id)
batch_list = self.session.query(BatchHeader).filter(BatchHeader.journal_id==journal_id)
details = self.session.query(FGD).filter(FGD.journal_id==journal_id)
receive = self.session.query(ReceiveRMD).join(BOM).filter(ReceiveRMD.journal_id==journal_id).filter(BOM.mix_item==False)
self.detailModel.load(details)
self.batchesModel.load(batch_list)
self.receiveModel.load(receive)
self.addAssemblies(True)
self.setFilter()
self.updateSumTotals()
self.editing = True
def clearModels(self):
self.batchesModel.clear()
self.detailModel.clear()
self.baseAssemblyModel.clear()
self.bomAssemblyModel.clear()
self.receiveModel.clear()
def clear(self):
self.prep_comboBox.blockSignals(True)
self.clearModels()
self.my_parent.refreshModels()
widgets = self.findChildren(QWidget)
for widget in widgets:
if isinstance(widget, (QLineEdit, QTextEdit)):
widget.clear()
elif isinstance(widget, QComboBox):
widget.setCurrentIndex(-1)
elif isinstance(widget, QCheckBox):
widget.setChecked(False)
elif isinstance(widget, QLabel):
if widget.objectName()[:2] == 'v_':
widget.clear()
if defaultDate() == 'current':
self.dateDateEdit.setDate(QDate.currentDate())
self.v_prodID_label.setText(str(dMax(JournalHeader.journal_id) + 1))
self.editing = False
self.dirty = False
self.setWindowTitle(localTITLE)
self.prep_comboBox.blockSignals(False)
def printReport(self):
journal_id = str(self.v_prodID_label.text())
if not journal_id:
return
reportModel = reporting.ReportModel('Production')
self.refreshReport(reportModel)
self.my_parent.reportForm(reportModel, self)
def refreshReport(self, model, report=None):
journal_id = str(self.v_prodID_label.text())
productionQuery(self.session, journal_id, model)
def formClosed(self):
self.my_parent.formClosed()
if __name__ == '__main__':
app = QApplication(sys.argv)
setupDatabase("Production.sqlite")
form = ProductionForm()
form.show()
app.exec_()
|
mit
|
5t111111/markdown-preview.vim
|
markdownpreview_lib/pygments/styles/trac.py
|
364
|
1933
|
# -*- coding: utf-8 -*-
"""
pygments.styles.trac
~~~~~~~~~~~~~~~~~~~~
Port of the default trac highlighter design.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class TracStyle(Style):
"""
Port of the default trac highlighter design.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #999988',
Comment.Preproc: 'bold noitalic #999999',
Comment.Special: 'bold #999999',
Operator: 'bold',
String: '#bb8844',
String.Regex: '#808000',
Number: '#009999',
Keyword: 'bold',
Keyword.Type: '#445588',
Name.Builtin: '#999999',
Name.Function: 'bold #990000',
Name.Class: 'bold #445588',
Name.Exception: 'bold #990000',
Name.Namespace: '#555555',
Name.Variable: '#008080',
Name.Constant: '#008080',
Name.Tag: '#000080',
Name.Attribute: '#008080',
Name.Entity: '#800080',
Generic.Heading: '#999999',
Generic.Subheading: '#aaaaaa',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
|
lgpl-2.1
|
sebrandon1/neutron
|
neutron/objects/address_scope.py
|
4
|
1292
|
# Copyright (c) 2016 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
from neutron.db.models import address_scope as models
from neutron.objects import base
from neutron.objects import common_types
@obj_base.VersionedObjectRegistry.register
class AddressScope(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.AddressScope
fields = {
'id': obj_fields.UUIDField(),
'tenant_id': obj_fields.StringField(nullable=True),
'name': obj_fields.StringField(),
'shared': obj_fields.BooleanField(),
'ip_version': common_types.IPVersionEnumField(),
}
|
apache-2.0
|
3dfxsoftware/cbss-addons
|
account_voucher_move_id/__openerp__.py
|
1
|
1448
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Show Journal Entry in voucher',
'version': '0.1',
'author': 'Vauxoo',
'category': 'Accounting',
'website': 'http://www.vauxoo.com',
'description': """
This module show Journal Entry in:
========================================
*Sales Receipts
*Customer Payments
*Purchase Receipts
*Supplier Payments
""",
'depends' : [
'account_voucher'],
'data': [
"account_voucher.xml",
],
'auto_install': False,
'installable': True,
}
|
gpl-2.0
|
jerryz1982/neutron
|
neutron/plugins/cisco/db/l3/l3_models.py
|
48
|
4506
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import agents_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
class HostingDevice(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents an appliance hosting Neutron router(s).
When the hosting device is a Nova VM 'id' is uuid of that VM.
"""
__tablename__ = 'cisco_hosting_devices'
# complementary id to enable identification of associated Neutron resources
complementary_id = sa.Column(sa.String(36))
# manufacturer id of the device, e.g., its serial number
device_id = sa.Column(sa.String(255))
admin_state_up = sa.Column(sa.Boolean, nullable=False, default=True)
# 'management_port_id' is the Neutron Port used for management interface
management_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete="SET NULL"))
management_port = orm.relationship(models_v2.Port)
# 'protocol_port' is udp/tcp port of hosting device. May be empty.
protocol_port = sa.Column(sa.Integer)
cfg_agent_id = sa.Column(sa.String(36),
sa.ForeignKey('agents.id'),
nullable=True)
cfg_agent = orm.relationship(agents_db.Agent)
# Service VMs take time to boot so we store creation time
# so we can give preference to older ones when scheduling
created_at = sa.Column(sa.DateTime, nullable=False)
status = sa.Column(sa.String(16))
class HostedHostingPortBinding(model_base.BASEV2):
"""Represents binding of logical resource's port to its hosting port."""
__tablename__ = 'cisco_port_mappings'
logical_resource_id = sa.Column(sa.String(36), primary_key=True)
logical_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete="CASCADE"),
primary_key=True)
logical_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==HostedHostingPortBinding.logical_port_id',
backref=orm.backref('hosting_info', cascade='all', uselist=False))
# type of hosted port, e.g., router_interface, ..._gateway, ..._floatingip
port_type = sa.Column(sa.String(32))
# type of network the router port belongs to
network_type = sa.Column(sa.String(32))
hosting_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id',
ondelete='CASCADE'))
hosting_port = orm.relationship(
models_v2.Port,
primaryjoin='Port.id==HostedHostingPortBinding.hosting_port_id')
# VLAN tag for trunk ports
segmentation_id = sa.Column(sa.Integer, autoincrement=False)
class RouterHostingDeviceBinding(model_base.BASEV2):
"""Represents binding between Neutron routers and their hosting devices."""
__tablename__ = 'cisco_router_mappings'
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
router = orm.relationship(
l3_db.Router,
backref=orm.backref('hosting_info', cascade='all', uselist=False))
# If 'auto_schedule' is True then router is automatically scheduled
# if it lacks a hosting device or its hosting device fails.
auto_schedule = sa.Column(sa.Boolean, default=True, nullable=False)
# id of hosting device hosting this router, None/NULL if unscheduled.
hosting_device_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_hosting_devices.id',
ondelete='SET NULL'))
hosting_device = orm.relationship(HostingDevice)
|
apache-2.0
|
luoguizhou/gooderp_addons
|
sell/__openerp__.py
|
6
|
3544
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 上海开阖软件有限公司 (http://www.osbzr.com).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
'name': 'GOODERP 销售模块',
'author': '[email protected],[email protected]',
'website': 'https://www.osbzr.com',
'category': 'gooderp',
'description':
'''
该模块可以方便的管理销货。
通过创建销货订单,审核后将销货订单行中的商品销售给客户,来完成销货功能。
通过创建销货退货订单,审核后将退货订单行中的商品退回,来完成销货退货功能。
通过创建销售变更单,选择原始销货订单,审核后将销售变更单行中的商品调整到原始销货订单行,来完成销售调整功能。
销货管理的报表有:
销售订单跟踪表;
销售明细表;
销售汇总表(按商品、按客户、按销售人员);
销售收款一览表;
销售前十商品。
''',
'version': '11.11',
'depends': ['warehouse', 'partner_address', 'staff'],
'data': [
'data/sell_data.xml',
'security/groups.xml',
'security/rules.xml',
'views/sell_view.xml',
'views/customer_view.xml',
'views/approve_multi_sale_order_view.xml',
'report/customer_statements_view.xml',
'report/sell_order_track_view.xml',
'report/sell_order_detail_view.xml',
'report/sell_summary_goods_view.xml',
'report/sell_summary_partner_view.xml',
'report/sell_summary_staff_view.xml',
'report/sell_receipt_view.xml',
'report/sell_top_ten_view.xml',
'report/sell_summary_view.xml',
'wizard/customer_statements_wizard_view.xml',
'wizard/sell_order_track_wizard_view.xml',
'wizard/sell_order_detail_wizard_view.xml',
'wizard/sell_summary_goods_wizard_view.xml',
'wizard/sell_summary_partner_wizard_view.xml',
'wizard/sell_summary_staff_wizard_view.xml',
'wizard/sell_receipt_wizard_view.xml',
'wizard/sell_top_ten_wizard_view.xml',
'security/ir.model.access.csv',
'report/report_data.xml',
'data/home_page_data.xml'
],
'demo': [
'data/sell_demo.xml',
],
'installable': True,
'auto_install': False,
}
|
agpl-3.0
|
dlazz/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_snapmirror.py
|
16
|
19961
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Create/Delete/Initialize SnapMirror volume/vserver relationships
- Modify schedule for a SnapMirror relationship
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_snapmirror
options:
state:
choices: ['present', 'absent']
description:
- Whether the specified relationship should exist or not.
default: present
source_volume:
description:
- Specifies the name of the source volume for the SnapMirror.
destination_volume:
description:
- Specifies the name of the destination volume for the SnapMirror.
source_vserver:
description:
- Name of the source vserver for the SnapMirror.
destination_vserver:
description:
- Name of the destination vserver for the SnapMirror.
source_path:
description:
- Specifies the source endpoint of the SnapMirror relationship.
destination_path:
description:
- Specifies the destination endpoint of the SnapMirror relationship.
relationship_type:
choices: ['data_protection', 'load_sharing', 'vault', 'restore', 'transition_data_protection',
'extended_data_protection']
description:
- Specify the type of SnapMirror relationship.
schedule:
description:
- Specify the name of the current schedule, which is used to update the SnapMirror relationship.
- Optional for create, modifiable.
source_hostname:
description:
- Source hostname or IP address.
- Required for SnapMirror delete
source_username:
description:
- Source username.
- Optional if this is same as destination username.
source_password:
description:
- Source password.
- Optional if this is same as destination password.
short_description: "NetApp ONTAP Manage SnapMirror"
version_added: "2.7"
'''
EXAMPLES = """
- name: Create SnapMirror
na_ontap_snapmirror:
state: present
source_volume: test_src
destination_volume: test_dest
source_vserver: ansible_src
destination_vserver: ansible_dest
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete SnapMirror
na_ontap_snapmirror:
state: absent
destination_path: <path>
source_hostname: "{{ source_hostname }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Set schedule to NULL
na_ontap_snapmirror:
state: present
destination_path: <path>
schedule: ""
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Release SnapMirror
na_ontap_snapmirror:
state: release
destination_path: <path>
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPSnapmirror(object):
"""
Class with Snapmirror methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
source_vserver=dict(required=False, type='str'),
destination_vserver=dict(required=False, type='str'),
source_volume=dict(required=False, type='str'),
destination_volume=dict(required=False, type='str'),
source_path=dict(required=False, type='str'),
destination_path=dict(required=False, type='str'),
schedule=dict(required=False, type='str'),
relationship_type=dict(required=False, type='str',
choices=['data_protection', 'load_sharing',
'vault', 'restore',
'transition_data_protection',
'extended_data_protection']
),
source_hostname=dict(required=False, type='str'),
source_username=dict(required=False, type='str'),
source_password=dict(required=False, type='str', no_log=True)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_together=(['source_volume', 'destination_volume'],
['source_vserver', 'destination_vserver']),
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# setup later if required
self.source_server = None
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def snapmirror_get_iter(self):
"""
Compose NaElement object to query current SnapMirror relations using destination-path
SnapMirror relation for a destination path is unique
:return: NaElement object for SnapMirror-get-iter
"""
snapmirror_get_iter = netapp_utils.zapi.NaElement('snapmirror-get-iter')
query = netapp_utils.zapi.NaElement('query')
snapmirror_info = netapp_utils.zapi.NaElement('snapmirror-info')
snapmirror_info.add_new_child('destination-location', self.parameters['destination_path'])
query.add_child_elem(snapmirror_info)
snapmirror_get_iter.add_child_elem(query)
return snapmirror_get_iter
def snapmirror_get(self):
"""
Get current SnapMirror relations
:return: Dictionary of current SnapMirror details if query successful, else None
"""
snapmirror_get_iter = self.snapmirror_get_iter()
snap_info = dict()
try:
result = self.server.invoke_successfully(snapmirror_get_iter, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching snapmirror info: %s' % to_native(error),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
snapmirror_info = result.get_child_by_name('attributes-list').get_child_by_name(
'snapmirror-info')
snap_info['mirror_state'] = snapmirror_info.get_child_content('mirror-state')
snap_info['status'] = snapmirror_info.get_child_content('relationship-status')
snap_info['schedule'] = snapmirror_info.get_child_content('schedule')
if snap_info['schedule'] is None:
snap_info['schedule'] = ""
return snap_info
return None
def snapmirror_create(self):
"""
Create a SnapMirror relationship
"""
options = {'source-location': self.parameters['source_path'],
'destination-location': self.parameters['destination_path']}
snapmirror_create = netapp_utils.zapi.NaElement.create_node_with_children('snapmirror-create', **options)
if self.parameters.get('relationship_type'):
snapmirror_create.add_new_child('relationship-type', self.parameters['relationship_type'])
if self.parameters.get('schedule'):
snapmirror_create.add_new_child('schedule', self.parameters['schedule'])
try:
self.server.invoke_successfully(snapmirror_create, enable_tunneling=True)
self.snapmirror_initialize()
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating SnapMirror %s' % to_native(error),
exception=traceback.format_exc())
def delete_snapmirror(self):
"""
Delete a SnapMirror relationship
#1. Quiesce the SnapMirror relationship at destination
#2. Break the SnapMirror relationship at the source
#3. Release the SnapMirror at source
#4. Delete SnapMirror at destination
"""
if not self.parameters.get('source_hostname'):
self.module.fail_json(msg='Missing parameters for delete: Please specify the '
'source cluster hostname to release the SnapMirror relation')
if self.parameters.get('source_username'):
self.module.params['username'] = self.parameters['source_username']
if self.parameters.get('source_password'):
self.module.params['password'] = self.parameters['source_password']
self.module.params['hostname'] = self.parameters['source_hostname']
self.source_server = netapp_utils.setup_ontap_zapi(module=self.module)
self.snapmirror_quiesce()
if self.parameters.get('relationship_type') and \
self.parameters.get('relationship_type') not in ['load_sharing', 'vault']:
self.snapmirror_break()
if self.get_destination():
self.snapmirror_release()
self.snapmirror_delete()
def snapmirror_quiesce(self):
"""
Quiesce SnapMirror relationship - disable all future transfers to this destination
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_quiesce = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-quiesce', **options)
try:
self.server.invoke_successfully(snapmirror_quiesce,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error Quiescing SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_delete(self):
"""
Delete SnapMirror relationship at destination cluster
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-destroy', **options)
try:
self.server.invoke_successfully(snapmirror_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_break(self):
"""
Break SnapMirror relationship at destination cluster
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_break = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-break', **options)
try:
self.server.invoke_successfully(snapmirror_break,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error breaking SnapMirror relationship : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_release(self):
"""
Release SnapMirror relationship from source cluster
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_release = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-release', **options)
try:
self.source_server.invoke_successfully(snapmirror_release,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error releasing SnapMirror relationship : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_abort(self):
"""
Abort a SnapMirror relationship in progress
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_abort = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-abort', **options)
try:
self.server.invoke_successfully(snapmirror_abort,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error aborting SnapMirror relationship : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_initialize(self):
"""
Initialize SnapMirror based on relationship type
"""
current = self.snapmirror_get()
if current['mirror_state'] != 'snapmirrored':
initialize_zapi = 'snapmirror-initialize'
if self.parameters.get('relationship_type') and self.parameters['relationship_type'] == 'load_sharing':
initialize_zapi = 'snapmirror-initialize-ls-set'
options = {'source-location': self.parameters['source_path']}
else:
options = {'destination-location': self.parameters['destination_path']}
snapmirror_init = netapp_utils.zapi.NaElement.create_node_with_children(
initialize_zapi, **options)
try:
self.server.invoke_successfully(snapmirror_init,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error initializing SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_modify(self, modify):
"""
Modify SnapMirror schedule
"""
options = {'destination-location': self.parameters['destination_path'],
'schedule': modify.get('schedule')}
snapmirror_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-modify', **options)
try:
self.server.invoke_successfully(snapmirror_modify,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying SnapMirror schedule : %s'
% (to_native(error)),
exception=traceback.format_exc())
def snapmirror_update(self):
"""
Update data in destination endpoint
"""
options = {'destination-location': self.parameters['destination_path']}
snapmirror_update = netapp_utils.zapi.NaElement.create_node_with_children(
'snapmirror-update', **options)
try:
result = self.server.invoke_successfully(snapmirror_update,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error updating SnapMirror : %s'
% (to_native(error)),
exception=traceback.format_exc())
def check_parameters(self):
"""
Validate parameters and fail if one or more required params are missing
Update source and destination path from vserver and volume parameters
"""
if self.parameters['state'] == 'present'\
and (self.parameters.get('source_path') or self.parameters.get('destination_path')):
if not self.parameters.get('destination_path') or not self.parameters.get('source_path'):
self.module.fail_json(msg='Missing parameters: Source path or Destination path')
elif self.parameters.get('source_volume'):
if not self.parameters.get('source_vserver') or not self.parameters.get('destination_vserver'):
self.module.fail_json(msg='Missing parameters: source vserver or destination vserver or both')
self.parameters['source_path'] = self.parameters['source_vserver'] + ":" + self.parameters['source_volume']
self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":" +\
self.parameters['destination_volume']
elif self.parameters.get('source_vserver'):
self.parameters['source_path'] = self.parameters['source_vserver'] + ":"
self.parameters['destination_path'] = self.parameters['destination_vserver'] + ":"
def get_destination(self):
release_get = netapp_utils.zapi.NaElement('snapmirror-get-destination-iter')
query = netapp_utils.zapi.NaElement('query')
snapmirror_dest_info = netapp_utils.zapi.NaElement('snapmirror-destination-info')
snapmirror_dest_info.add_new_child('destination-location', self.parameters['destination_path'])
query.add_child_elem(snapmirror_dest_info)
release_get.add_child_elem(query)
try:
result = self.source_server.invoke_successfully(release_get, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching snapmirror destinations info: %s' % to_native(error),
exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
return True
return None
def apply(self):
"""
Apply action to SnapMirror
"""
self.check_parameters()
current = self.snapmirror_get()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if cd_action == 'create':
self.snapmirror_create()
elif cd_action == 'delete':
if current['status'] == 'transferring':
self.snapmirror_abort()
else:
self.delete_snapmirror()
else:
if modify:
self.snapmirror_modify(modify)
# check for initialize
if current and current['mirror_state'] != 'snapmirrored':
self.snapmirror_initialize()
# set changed explicitly for initialize
self.na_helper.changed = True
# Update when create is called again, or modify is being called
if self.parameters['state'] == 'present':
self.snapmirror_update()
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""Execute action"""
community_obj = NetAppONTAPSnapmirror()
community_obj.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
|
rootml/YouCompleteMe
|
third_party/pythonfutures/concurrent/futures/_base.py
|
89
|
19642
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from __future__ import with_statement
import logging
import threading
import time
try:
from collections import namedtuple
except ImportError:
from concurrent.futures._compat import namedtuple
__author__ = 'Brian Quinlan ([email protected])'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
gpl-3.0
|
flyfei/python-for-android
|
python3-alpha/python3-src/Lib/lib2to3/fixes/fix_renames.py
|
203
|
2221
|
"""Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = LOOKUP[(mod_name.value, attr_name.value)]
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
|
apache-2.0
|
ssundarraj/grip
|
tests/mocks.py
|
2
|
4810
|
from __future__ import print_function, unicode_literals
import json
import requests
import responses
from grip import DEFAULT_API_URL, GitHubAssetManager, Grip, StdinReader
from helpers import USER_CONTEXT, input_file, output_file
class GitHubRequestsMock(responses.RequestsMock):
auth = ('test-username', 'test-password')
bad_auth = ('bad-username', 'bad-password')
def __init__(self, assert_all_requests_are_fired=False):
super(GitHubRequestsMock, self).__init__(
assert_all_requests_are_fired=assert_all_requests_are_fired)
self._response_map = {
input_file('zero.md'): {
'markdown': output_file('raw', 'zero.html'),
'user-content': output_file('raw', 'zero-user-content.html'),
'user-context': output_file('raw', 'zero-user-context.html'),
},
input_file('simple.md'): {
'markdown': output_file('raw', 'simple.html'),
'user-content': output_file('raw', 'simple-user-content.html'),
'user-context': output_file('raw', 'simple-user-context.html'),
},
input_file('gfm-test.md'): {
'markdown': output_file('raw', 'gfm-test.html'),
'user-content': output_file(
'raw', 'gfm-test-user-content.html'),
'user-context': output_file(
'raw', 'gfm-test-user-context.html'),
},
}
self.add_callback(
responses.POST, '{0}/markdown'.format(DEFAULT_API_URL),
callback=self._markdown_request)
self.add_callback(
responses.POST, '{0}/markdown/raw'.format(DEFAULT_API_URL),
callback=self._markdown_raw_request)
def _authenticate(self, request):
if 'Authorization' not in request.headers:
return None
dummy = requests.Request()
requests.auth.HTTPBasicAuth(*self.auth)(dummy)
if request.headers['Authorization'] != dummy.headers['Authorization']:
return (401, {'content-type': 'application/json; charset=utf-8'},
'{"message":"Bad credentials"}')
return None
def _output_for(self, content, mode=None, context=None):
for request_content in self._response_map:
if request_content != content:
continue
responses = self._response_map[request_content]
if mode is None or mode == 'markdown':
return responses['markdown']
elif context is None:
return responses['user-content']
elif context == USER_CONTEXT:
return responses['user-context']
else:
raise ValueError(
'Markdown group not found for user context: {0}'.format(
USER_CONTEXT))
raise ValueError('Markdown group not found for: {!r}'.format(content))
def _decode_body(self, request):
if 'charset=UTF-8' not in request.headers['content-type']:
raise ValueError('Expected UTF-8 charset, got: {!r}'.format(
request.headers['content-type']))
return request.body.decode('utf-8') if request.body else ''
def _markdown_request(self, request):
r = self._authenticate(request)
if r:
return r
payload = json.loads(self._decode_body(request))
return (200, {'content-type': 'text/html'}, self._output_for(
payload['text'], payload['mode'], payload.get('context', None)))
def _markdown_raw_request(self, request):
r = self._authenticate(request)
if r:
return r
return (200, {'content-type': 'text/html'}, self._output_for(
self._decode_body(request)))
class StdinReaderMock(StdinReader):
def __init__(self, mock_stdin, *args, **kwargs):
super(StdinReaderMock, self).__init__(*args, **kwargs)
self._mock_stdin = mock_stdin
def read_stdin(self):
return self._mock_stdin
class GitHubAssetManagerMock(GitHubAssetManager):
def __init__(self, cache_path=None, style_urls=None):
if cache_path is None:
cache_path = 'dummy-path'
super(GitHubAssetManagerMock, self).__init__(cache_path, style_urls)
self.clear_calls = 0
self.cache_filename_calls = 0
self.retrieve_styles_calls = 0
def clear(self):
self.clear_calls += 1
def cache_filename(self, url):
self.cache_filename_calls += 1
return super(GitHubAssetManagerMock, self).cache_filename(url)
def retrieve_styles(self, asset_url_path):
self.retrieve_styles_calls += 1
class GripMock(Grip):
def default_asset_manager(self):
return GitHubAssetManagerMock()
|
mit
|
BbiKkuMi/heekscnc
|
area_funcs.py
|
24
|
15418
|
import area
from nc.nc import *
import math
import kurve_funcs
# some globals, to save passing variables as parameters too much
area_for_feed_possible = None
tool_radius_for_pocket = None
def cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, final_depth):
prev_p = p
first = True
for vertex in curve.getVertices():
if need_rapid and first:
# rapid across
rapid(vertex.p.x, vertex.p.y)
##rapid down
rapid(z = current_start_depth + rapid_safety_space)
#feed down
feed(z = final_depth)
first = False
else:
if vertex.type == 1:
arc_ccw(vertex.p.x, vertex.p.y, i = vertex.c.x, j = vertex.c.y)
elif vertex.type == -1:
arc_cw(vertex.p.x, vertex.p.y, i = vertex.c.x, j = vertex.c.y)
else:
feed(vertex.p.x, vertex.p.y)
prev_p = vertex.p
return prev_p
def area_distance(a, old_area):
best_dist = None
for curve in a.getCurves():
for vertex in curve.getVertices():
c = old_area.NearestPoint(vertex.p)
d = c.dist(vertex.p)
if best_dist == None or d < best_dist:
best_dist = d
for curve in old_area.getCurves():
for vertex in curve.getVertices():
c = a.NearestPoint(vertex.p)
d = c.dist(vertex.p)
if best_dist == None or d < best_dist:
best_dist = d
return best_dist
def make_obround(p0, p1, radius):
dir = p1 - p0
d = dir.length()
dir.normalize()
right = area.Point(dir.y, -dir.x)
obround = area.Area()
c = area.Curve()
vt0 = p0 + right * radius
vt1 = p1 + right * radius
vt2 = p1 - right * radius
vt3 = p0 - right * radius
c.append(area.Vertex(0, vt0, area.Point(0, 0)))
c.append(area.Vertex(0, vt1, area.Point(0, 0)))
c.append(area.Vertex(1, vt2, p1))
c.append(area.Vertex(0, vt3, area.Point(0, 0)))
c.append(area.Vertex(1, vt0, p0))
obround.append(c)
return obround
def feed_possible(p0, p1):
if p0 == p1:
return True
obround = make_obround(p0, p1, tool_radius_for_pocket)
a = area.Area(area_for_feed_possible)
obround.Subtract(a)
if obround.num_curves() > 0:
return False
return True
def cut_curvelist1(curve_list, rapid_safety_space, current_start_depth, depth, clearance_height, keep_tool_down_if_poss):
p = area.Point(0, 0)
first = True
for curve in curve_list:
need_rapid = True
if first == False:
s = curve.FirstVertex().p
if keep_tool_down_if_poss == True:
# see if we can feed across
if feed_possible(p, s):
need_rapid = False
elif s.x == p.x and s.y == p.y:
need_rapid = False
if need_rapid:
rapid(z = clearance_height)
p = cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, depth)
first = False
rapid(z = clearance_height)
def cut_curvelist2(curve_list, rapid_safety_space, current_start_depth, depth, clearance_height, keep_tool_down_if_poss,start_point):
p = area.Point(0, 0)
start_x,start_y=start_point
first = True
for curve in curve_list:
need_rapid = True
if first == True:
direction = "on";radius = 0.0;offset_extra = 0.0; roll_radius = 0.0;roll_on = 0.0; roll_off = 0.0; rapid_safety_space; step_down = math.fabs(depth);extend_at_start = 0.0;extend_at_end = 0.0
kurve_funcs.make_smaller( curve, start = area.Point(start_x,start_y))
kurve_funcs.profile(curve, direction, radius , offset_extra, roll_radius, roll_on, roll_off, rapid_safety_space , clearance_height, current_start_depth, step_down , depth, extend_at_start, extend_at_end)
else:
s = curve.FirstVertex().p
if keep_tool_down_if_poss == True:
# see if we can feed across
if feed_possible(p, s):
need_rapid = False
elif s.x == p.x and s.y == p.y:
need_rapid = False
cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, depth)
first = False #change to True if you want to rapid back to start side before zigging again with unidirectional set
rapid(z = clearance_height)
def recur(arealist, a1, stepover, from_center):
# this makes arealist by recursively offsetting a1 inwards
if a1.num_curves() == 0:
return
if from_center:
arealist.insert(0, a1)
else:
arealist.append(a1)
a_offset = area.Area(a1)
a_offset.Offset(stepover)
# split curves into new areas
if area.holes_linked():
for curve in a_offset.getCurves():
a2 = area.Area()
a2.append(curve)
recur(arealist, a2, stepover, from_center)
else:
# split curves into new areas
a_offset.Reorder()
a2 = None
for curve in a_offset.getCurves():
if curve.IsClockwise():
if a2 != None:
a2.append(curve)
else:
if a2 != None:
recur(arealist, a2, stepover, from_center)
a2 = area.Area()
a2.append(curve)
if a2 != None:
recur(arealist, a2, stepover, from_center)
def get_curve_list(arealist, reverse_curves = False):
curve_list = list()
for a in arealist:
for curve in a.getCurves():
if reverse_curves == True:
curve.Reverse()
curve_list.append(curve)
return curve_list
curve_list_for_zigs = []
rightward_for_zigs = True
sin_angle_for_zigs = 0.0
cos_angle_for_zigs = 1.0
sin_minus_angle_for_zigs = 0.0
cos_minus_angle_for_zigs = 1.0
one_over_units = 1.0
def make_zig_curve(curve, y0, y, zig_unidirectional):
if rightward_for_zigs:
curve.Reverse()
# find a high point to start looking from
high_point = None
for vertex in curve.getVertices():
if high_point == None:
high_point = vertex.p
elif vertex.p.y > high_point.y:
# use this as the new high point
high_point = vertex.p
elif math.fabs(vertex.p.y - high_point.y) < 0.002 * one_over_units:
# equal high point
if rightward_for_zigs:
# use the furthest left point
if vertex.p.x < high_point.x:
high_point = vertex.p
else:
# use the furthest right point
if vertex.p.x > high_point.x:
high_point = vertex.p
zig = area.Curve()
high_point_found = False
zig_started = False
zag_found = False
for i in range(0, 2): # process the curve twice because we don't know where it will start
prev_p = None
for vertex in curve.getVertices():
if zag_found: break
if prev_p != None:
if zig_started:
zig.append(unrotated_vertex(vertex))
if math.fabs(vertex.p.y - y) < 0.002 * one_over_units:
zag_found = True
break
elif high_point_found:
if math.fabs(vertex.p.y - y0) < 0.002 * one_over_units:
if zig_started:
zig.append(unrotated_vertex(vertex))
elif math.fabs(prev_p.y - y0) < 0.002 * one_over_units and vertex.type == 0:
zig.append(area.Vertex(0, unrotated_point(prev_p), area.Point(0, 0)))
zig.append(unrotated_vertex(vertex))
zig_started = True
elif vertex.p.x == high_point.x and vertex.p.y == high_point.y:
high_point_found = True
prev_p = vertex.p
if zig_started:
if zig_unidirectional == True:
# remove the last bit of zig
if math.fabs(zig.LastVertex().p.y - y) < 0.002 * one_over_units:
vertices = zig.getVertices()
while len(vertices) > 0:
v = vertices[len(vertices)-1]
if math.fabs(v.p.y - y0) < 0.002 * one_over_units:
break
else:
vertices.pop()
zig = area.Curve()
for v in vertices:
zig.append(v)
curve_list_for_zigs.append(zig)
def make_zig(a, y0, y, zig_unidirectional):
for curve in a.getCurves():
make_zig_curve(curve, y0, y, zig_unidirectional)
reorder_zig_list_list = []
def add_reorder_zig(curve):
global reorder_zig_list_list
# look in existing lists
s = curve.FirstVertex().p
for curve_list in reorder_zig_list_list:
last_curve = curve_list[len(curve_list) - 1]
e = last_curve.LastVertex().p
if math.fabs(s.x - e.x) < 0.002 * one_over_units and math.fabs(s.y - e.y) < 0.002 * one_over_units:
curve_list.append(curve)
return
# else add a new list
curve_list = []
curve_list.append(curve)
reorder_zig_list_list.append(curve_list)
def reorder_zigs():
global curve_list_for_zigs
global reorder_zig_list_list
reorder_zig_list_list = []
for curve in curve_list_for_zigs:
add_reorder_zig(curve)
curve_list_for_zigs = []
for curve_list in reorder_zig_list_list:
for curve in curve_list:
curve_list_for_zigs.append(curve)
def rotated_point(p):
return area.Point(p.x * cos_angle_for_zigs - p.y * sin_angle_for_zigs, p.x * sin_angle_for_zigs + p.y * cos_angle_for_zigs)
def unrotated_point(p):
return area.Point(p.x * cos_minus_angle_for_zigs - p.y * sin_minus_angle_for_zigs, p.x * sin_minus_angle_for_zigs + p.y * cos_minus_angle_for_zigs)
def rotated_vertex(v):
if v.type:
return area.Vertex(v.type, rotated_point(v.p), rotated_point(v.c))
return area.Vertex(v.type, rotated_point(v.p), area.Point(0, 0))
def unrotated_vertex(v):
if v.type:
return area.Vertex(v.type, unrotated_point(v.p), unrotated_point(v.c))
return area.Vertex(v.type, unrotated_point(v.p), area.Point(0, 0))
def rotated_area(a):
an = area.Area()
for curve in a.getCurves():
curve_new = area.Curve()
for v in curve.getVertices():
curve_new.append(rotated_vertex(v))
an.append(curve_new)
return an
def zigzag(a, stepover, zig_unidirectional):
if a.num_curves() == 0:
return
global rightward_for_zigs
global curve_list_for_zigs
global sin_angle_for_zigs
global cos_angle_for_zigs
global sin_minus_angle_for_zigs
global cos_minus_angle_for_zigs
global one_over_units
one_over_units = 1 / area.get_units()
a = rotated_area(a)
b = area.Box()
a.GetBox(b)
x0 = b.MinX() - 1.0
x1 = b.MaxX() + 1.0
height = b.MaxY() - b.MinY()
num_steps = int(height / stepover + 1)
y = b.MinY() + 0.1 * one_over_units
null_point = area.Point(0, 0)
rightward_for_zigs = True
curve_list_for_zigs = []
for i in range(0, num_steps):
y0 = y
y = y + stepover
p0 = area.Point(x0, y0)
p1 = area.Point(x0, y)
p2 = area.Point(x1, y)
p3 = area.Point(x1, y0)
c = area.Curve()
c.append(area.Vertex(0, p0, null_point, 0))
c.append(area.Vertex(0, p1, null_point, 0))
c.append(area.Vertex(0, p2, null_point, 1))
c.append(area.Vertex(0, p3, null_point, 0))
c.append(area.Vertex(0, p0, null_point, 1))
a2 = area.Area()
a2.append(c)
a2.Intersect(a)
make_zig(a2, y0, y, zig_unidirectional)
if zig_unidirectional == False:
rightward_for_zigs = (rightward_for_zigs == False)
reorder_zigs()
def pocket(a,tool_radius, extra_offset, stepover, depthparams, from_center, keep_tool_down_if_poss, use_zig_zag, zig_angle, zig_unidirectional = False,start_point=None, cut_mode = 'conventional'):
global tool_radius_for_pocket
global area_for_feed_possible
#if len(a.getCurves()) > 1:
# for crv in a.getCurves():
# ar = area.Area()
# ar.append(crv)
# pocket(ar, tool_radius, extra_offset, rapid_safety_space, start_depth, final_depth, stepover, stepdown, clearance_height, from_center, keep_tool_down_if_poss, use_zig_zag, zig_angle, zig_unidirectional)
# return
tool_radius_for_pocket = tool_radius
if keep_tool_down_if_poss:
area_for_feed_possible = area.Area(a)
area_for_feed_possible.Offset(extra_offset - 0.01)
use_internal_function = (area.holes_linked() == False) # use internal function, if area module is the Clipper library
if use_internal_function:
curve_list = a.MakePocketToolpath(tool_radius, extra_offset, stepover, from_center, use_zig_zag, zig_angle)
else:
global sin_angle_for_zigs
global cos_angle_for_zigs
global sin_minus_angle_for_zigs
global cos_minus_angle_for_zigs
radians_angle = zig_angle * math.pi / 180
sin_angle_for_zigs = math.sin(-radians_angle)
cos_angle_for_zigs = math.cos(-radians_angle)
sin_minus_angle_for_zigs = math.sin(radians_angle)
cos_minus_angle_for_zigs = math.cos(radians_angle)
arealist = list()
a_offset = area.Area(a)
current_offset = tool_radius + extra_offset
a_offset.Offset(current_offset)
do_recursive = True
if use_zig_zag:
zigzag(a_offset, stepover, zig_unidirectional)
curve_list = curve_list_for_zigs
else:
if do_recursive:
recur(arealist, a_offset, stepover, from_center)
else:
while(a_offset.num_curves() > 0):
if from_center:
arealist.insert(0, a_offset)
else:
arealist.append(a_offset)
current_offset = current_offset + stepover
a_offset = area.Area(a)
a_offset.Offset(current_offset)
curve_list = get_curve_list(arealist, cut_mode == 'climb')
depths = depthparams.get_depths()
current_start_depth = depthparams.start_depth
if start_point==None:
for depth in depths:
cut_curvelist1(curve_list, depthparams.rapid_safety_space, current_start_depth, depth, depthparams.clearance_height, keep_tool_down_if_poss)
current_start_depth = depth
else:
for depth in depths:
cut_curvelist2(curve_list, depthparams.rapid_safety_space, current_start_depth, depth, depthparams.clearance_height, keep_tool_down_if_poss, start_point)
current_start_depth = depth
|
bsd-3-clause
|
Donkyhotay/MoonPy
|
twisted/persisted/dirdbm.py
|
61
|
10317
|
# -*- test-case-name: twisted.test.test_dirdbm -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DBM-style interface to a directory.
Each key is stored as a single file. This is not expected to be very fast or
efficient, but it's good for easy debugging.
DirDBMs are *not* thread-safe, they should only be accessed by one thread at
a time.
No files should be placed in the working directory of a DirDBM save those
created by the DirDBM itself!
Maintainer: Itamar Shtull-Trauring
"""
import os
import types
import base64
import glob
try:
import cPickle as pickle
except ImportError:
import pickle
try:
_open
except NameError:
_open = open
class DirDBM:
"""A directory with a DBM interface.
This class presents a hash-like interface to a directory of small,
flat files. It can only use strings as keys or values.
"""
def __init__(self, name):
"""
@type name: str
@param name: Base path to use for the directory storage.
"""
self.dname = os.path.abspath(name)
if not os.path.isdir(self.dname):
os.mkdir(self.dname)
else:
# Run recovery, in case we crashed. we delete all files ending
# with ".new". Then we find all files who end with ".rpl". If a
# corresponding file exists without ".rpl", we assume the write
# failed and delete the ".rpl" file. If only a ".rpl" exist we
# assume the program crashed right after deleting the old entry
# but before renaming the replacement entry.
#
# NOTE: '.' is NOT in the base64 alphabet!
for f in glob.glob(os.path.join(self.dname, "*.new")):
os.remove(f)
replacements = glob.glob(os.path.join(self.dname, "*.rpl"))
for f in replacements:
old = f[:-4]
if os.path.exists(old):
os.remove(f)
else:
os.rename(f, old)
def _encode(self, k):
"""Encode a key so it can be used as a filename.
"""
# NOTE: '_' is NOT in the base64 alphabet!
return base64.encodestring(k).replace('\n', '_').replace("/", "-")
def _decode(self, k):
"""Decode a filename to get the key.
"""
return base64.decodestring(k.replace('_', '\n').replace("-", "/"))
def _readFile(self, path):
"""Read in the contents of a file.
Override in subclasses to e.g. provide transparently encrypted dirdbm.
"""
f = _open(path, "rb")
s = f.read()
f.close()
return s
def _writeFile(self, path, data):
"""Write data to a file.
Override in subclasses to e.g. provide transparently encrypted dirdbm.
"""
f = _open(path, "wb")
f.write(data)
f.flush()
f.close()
def __len__(self):
"""
@return: The number of key/value pairs in this Shelf
"""
return len(os.listdir(self.dname))
def __setitem__(self, k, v):
"""
C{dirdbm[k] = v}
Create or modify a textfile in this directory
@type k: str
@param k: key to set
@type v: str
@param v: value to associate with C{k}
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
assert type(v) == types.StringType, "DirDBM value must be a string"
k = self._encode(k)
# we create a new file with extension .new, write the data to it, and
# if the write succeeds delete the old file and rename the new one.
old = os.path.join(self.dname, k)
if os.path.exists(old):
new = old + ".rpl" # replacement entry
else:
new = old + ".new" # new entry
try:
self._writeFile(new, v)
except:
os.remove(new)
raise
else:
if os.path.exists(old): os.remove(old)
os.rename(new, old)
def __getitem__(self, k):
"""
C{dirdbm[k]}
Get the contents of a file in this directory as a string.
@type k: str
@param k: key to lookup
@return: The value associated with C{k}
@raise KeyError: Raised when there is no such key
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
path = os.path.join(self.dname, self._encode(k))
try:
return self._readFile(path)
except:
raise KeyError, k
def __delitem__(self, k):
"""
C{del dirdbm[foo]}
Delete a file in this directory.
@type k: str
@param k: key to delete
@raise KeyError: Raised when there is no such key
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
k = self._encode(k)
try: os.remove(os.path.join(self.dname, k))
except (OSError, IOError): raise KeyError(self._decode(k))
def keys(self):
"""
@return: a C{list} of filenames (keys).
"""
return map(self._decode, os.listdir(self.dname))
def values(self):
"""
@return: a C{list} of file-contents (values).
"""
vals = []
keys = self.keys()
for key in keys:
vals.append(self[key])
return vals
def items(self):
"""
@return: a C{list} of 2-tuples containing key/value pairs.
"""
items = []
keys = self.keys()
for key in keys:
items.append((key, self[key]))
return items
def has_key(self, key):
"""
@type key: str
@param key: The key to test
@return: A true value if this dirdbm has the specified key, a faluse
value otherwise.
"""
assert type(key) == types.StringType, "DirDBM key must be a string"
key = self._encode(key)
return os.path.isfile(os.path.join(self.dname, key))
def setdefault(self, key, value):
"""
@type key: str
@param key: The key to lookup
@param value: The value to associate with key if key is not already
associated with a value.
"""
if not self.has_key(key):
self[key] = value
return value
return self[key]
def get(self, key, default = None):
"""
@type key: str
@param key: The key to lookup
@param default: The value to return if the given key does not exist
@return: The value associated with C{key} or C{default} if not
C{self.has_key(key)}
"""
if self.has_key(key):
return self[key]
else:
return default
def __contains__(self, key):
"""
C{key in dirdbm}
@type key: str
@param key: The key to test
@return: A true value if C{self.has_key(key)}, a false value otherwise.
"""
assert type(key) == types.StringType, "DirDBM key must be a string"
key = self._encode(key)
return os.path.isfile(os.path.join(self.dname, key))
def update(self, dict):
"""
Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting
keys will be overwritten with the values from C{dict}.
@type dict: mapping
@param dict: A mapping of key/value pairs to add to this dirdbm.
"""
for key, val in dict.items():
self[key]=val
def copyTo(self, path):
"""
Copy the contents of this dirdbm to the dirdbm at C{path}.
@type path: C{str}
@param path: The path of the dirdbm to copy to. If a dirdbm
exists at the destination path, it is cleared first.
@rtype: C{DirDBM}
@return: The dirdbm this dirdbm was copied to.
"""
path = os.path.abspath(path)
assert path != self.dname
d = self.__class__(path)
d.clear()
for k in self.keys():
d[k] = self[k]
return d
def clear(self):
"""
Delete all key/value pairs in this dirdbm.
"""
for k in self.keys():
del self[k]
def close(self):
"""
Close this dbm: no-op, for dbm-style interface compliance.
"""
def getModificationTime(self, key):
"""
Returns modification time of an entry.
@return: Last modification date (seconds since epoch) of entry C{key}
@raise KeyError: Raised when there is no such key
"""
assert type(key) == types.StringType, "DirDBM key must be a string"
path = os.path.join(self.dname, self._encode(key))
if os.path.isfile(path):
return os.path.getmtime(path)
else:
raise KeyError, key
class Shelf(DirDBM):
"""A directory with a DBM shelf interface.
This class presents a hash-like interface to a directory of small,
flat files. Keys must be strings, but values can be any given object.
"""
def __setitem__(self, k, v):
"""
C{shelf[foo] = bar}
Create or modify a textfile in this directory.
@type k: str
@param k: The key to set
@param v: The value to associate with C{key}
"""
v = pickle.dumps(v)
DirDBM.__setitem__(self, k, v)
def __getitem__(self, k):
"""
C{dirdbm[foo]}
Get and unpickle the contents of a file in this directory.
@type k: str
@param k: The key to lookup
@return: The value associated with the given key
@raise KeyError: Raised if the given key does not exist
"""
return pickle.loads(DirDBM.__getitem__(self, k))
def open(file, flag = None, mode = None):
"""
This is for 'anydbm' compatibility.
@param file: The parameter to pass to the DirDBM constructor.
@param flag: ignored
@param mode: ignored
"""
return DirDBM(file)
__all__ = ["open", "DirDBM", "Shelf"]
|
gpl-3.0
|
sakanaou/storm
|
storm-client/src/py/storm/constants.py
|
22
|
1061
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
|
apache-2.0
|
Abhinav117/pymtl
|
pisa/elf_test.py
|
4
|
1407
|
#=========================================================================
# elf_test.py
#=========================================================================
import elf
import os
import random
import struct
from SparseMemoryImage import SparseMemoryImage
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
def test_basic( tmpdir ):
# Create a sparse memory image
mem_image = SparseMemoryImage()
section_names = [ ".text", ".data" ]
for i in xrange(4):
section = SparseMemoryImage.Section()
section.name = section_names[ random.randint(0,1) ]
section.addr = i * 0x00001000
data_ints = [ random.randint(0,1000) for r in xrange(10) ]
data_bytes = bytearray()
for data_int in data_ints:
data_bytes.extend(struct.pack("<I",data_int))
section.data = data_bytes
mem_image.add_section( section )
# Write the sparse memory image to an ELF file
with tmpdir.join("elf-test").open('wb') as file_obj:
elf.elf_writer( mem_image, file_obj )
# Read the ELF file back into a new sparse memory image
mem_image_test = None
with tmpdir.join("elf-test").open('rb') as file_obj:
mem_image_test = elf.elf_reader( file_obj )
# Check that the original and new sparse memory images are equal
assert mem_image == mem_image_test
|
bsd-3-clause
|
justinjoy/cerbero
|
test/test_cerbero_build_build.py
|
23
|
3397
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import os
from test.test_common import DummyConfig
from cerbero.build import build
class MakefilesBase(build.MakefilesBase):
srcdir = ''
build_dir = ''
def __init__(self, config):
self.config = config
build.MakefilesBase.__init__(self)
@build.modify_environment
def get_env_var(self, var):
if var not in os.environ:
return None
return os.environ[var]
@build.modify_environment
def get_env_var_nested(self, var):
return self.get_env_var(var)
class ModifyEnvTest(unittest.TestCase):
def setUp(self):
self.var = 'TEST_VAR'
self.val1 = 'test'
self.val2 = 'test2'
self.mk = MakefilesBase(DummyConfig())
def testAppendEnv(self):
os.environ[self.var] = self.val1
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, "%s %s" % (self.val1, self.val2))
def testAppendNonExistentEnv(self):
if self.var in os.environ:
del os.environ[self.var]
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, ' %s' % self.val2)
def testNewEnv(self):
os.environ[self.var] = self.val1
self.mk.new_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, self.val2)
def testAppendAndNewEnv(self):
os.environ[self.var] = ''
self.mk.append_env = {self.var: self.val1}
self.mk.new_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, self.val2)
def testSystemLibs(self):
os.environ['PKG_CONFIG_PATH'] = '/path/1'
os.environ['PKG_CONFIG_LIBDIR'] = '/path/2'
self.mk.config.allow_system_libs = True
self.mk.use_system_libs = True
val = self.mk.get_env_var('PKG_CONFIG_PATH')
self.assertEquals(val,'/path/2:/usr/lib/pkgconfig:'
'/usr/share/pkgconfig:/usr/lib/i386-linux-gnu/pkgconfig')
val = self.mk.get_env_var('PKG_CONFIG_LIBDIR')
self.assertEquals(val,'/path/2')
def testNestedModif(self):
os.environ[self.var] = self.val1
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEquals(val, "%s %s" % (self.val1, self.val2))
val = self.mk.get_env_var_nested(self.var)
self.assertEquals(val, "%s %s" % (self.val1, self.val2))
|
lgpl-2.1
|
Yukinoshita47/Yuki-Chan-The-Auto-Pentest
|
Module/theHarvester/discovery/googlesets.py
|
23
|
1177
|
import string
import httplib
import sys
import myparser
import re
import time
class search_google_labs:
def __init__(self, list):
self.results = ""
self.totalresults = ""
self.server = "labs.google.com"
self.hostname = "labs.google.com"
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
id = 0
self.set = ""
for x in list:
id += 1
if id == 1:
self.set = self.set + "q" + str(id) + "=" + str(x)
else:
self.set = self.set + "&q" + str(id) + "=" + str(x)
def do_search(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/sets?hl=en&" + self.set)
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults += self.results
def get_set(self):
rawres = myparser.parser(self.totalresults, list)
return rawres.set()
def process(self):
self.do_search()
|
mit
|
rhattersley/iris
|
lib/iris/tests/unit/fileformats/grib/load_convert/test_ellipsoid.py
|
13
|
3478
|
# (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test function :func:`iris.fileformats.grib._load_convert.ellipsoid.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests
import numpy.ma as ma
import iris.coord_systems as icoord_systems
from iris.exceptions import TranslationError
from iris.fileformats.grib._load_convert import ellipsoid
# Reference GRIB2 Code Table 3.2 - Shape of the Earth.
MDI = ma.masked
class Test(tests.IrisTest):
def test_shape_unsupported(self):
unsupported = [2, 4, 5, 8, 9, 10, MDI]
emsg = 'unsupported shape of the earth'
for shape in unsupported:
with self.assertRaisesRegexp(TranslationError, emsg):
ellipsoid(shape, MDI, MDI, MDI)
def test_spherical_default_supported(self):
cs_by_shape = {0: icoord_systems.GeogCS(6367470),
6: icoord_systems.GeogCS(6371229)}
for shape, expected in cs_by_shape.items():
result = ellipsoid(shape, MDI, MDI, MDI)
self.assertEqual(result, expected)
def test_spherical_shape_1_no_radius(self):
shape = 1
emsg = 'radius to be specified'
with self.assertRaisesRegexp(ValueError, emsg):
ellipsoid(shape, MDI, MDI, MDI)
def test_spherical_shape_1(self):
shape = 1
radius = 10
result = ellipsoid(shape, MDI, MDI, radius)
expected = icoord_systems.GeogCS(radius)
self.assertEqual(result, expected)
def test_oblate_shape_3_7_no_axes(self):
for shape in [3, 7]:
emsg = 'axis to be specified'
with self.assertRaisesRegexp(ValueError, emsg):
ellipsoid(shape, MDI, MDI, MDI)
def test_oblate_shape_3_7_no_major(self):
for shape in [3, 7]:
emsg = 'major axis to be specified'
with self.assertRaisesRegexp(ValueError, emsg):
ellipsoid(shape, MDI, 1, MDI)
def test_oblate_shape_3_7_no_minor(self):
for shape in [3, 7]:
emsg = 'minor axis to be specified'
with self.assertRaisesRegexp(ValueError, emsg):
ellipsoid(shape, 1, MDI, MDI)
def test_oblate_shape_3_7(self):
for shape in [3, 7]:
major, minor = 1, 10
scale = 1
result = ellipsoid(shape, major, minor, MDI)
if shape == 3:
# Convert km to m.
scale = 1000
expected = icoord_systems.GeogCS(major * scale, minor * scale)
self.assertEqual(result, expected)
if __name__ == '__main__':
tests.main()
|
lgpl-3.0
|
Giswater/giswater_qgis_plugin
|
map_tools/cad_api_info.py
|
1
|
3531
|
"""
This file is part of Giswater 3
The program is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License as published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
"""
# -*- coding: utf-8 -*-
from qgis.core import QgsMapToPixel
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QCursor
from qgis.PyQt.QtWidgets import QAction
from .parent import ParentMapTool
from ..actions.api_cf import ApiCF
class CadApiInfo(ParentMapTool):
""" Button 37: Info """
def __init__(self, iface, settings, action, index_action):
""" Class constructor """
# Call ParentMapTool constructor
super(CadApiInfo, self).__init__(iface, settings, action, index_action)
self.index_action = index_action
self.tab_type = None
# :var self.block_signal: used when the signal 'signal_activate' is emitted from the info, do not open another form
self.block_signal = False
def create_point(self, event):
x = event.pos().x()
y = event.pos().y()
try:
point = QgsMapToPixel.toMapCoordinates(self.canvas.getCoordinateTransform(), x, y)
except(TypeError, KeyError):
self.iface.actionPan().trigger()
return False
return point
""" QgsMapTools inherited event functions """
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
for rb in self.rubberband_list:
rb.reset()
self.info_cf.resetRubberbands()
self.action().trigger()
return
def canvasMoveEvent(self, event):
pass
def canvasReleaseEvent(self, event):
for rb in self.rubberband_list:
rb.reset()
if self.block_signal:
self.block_signal = False
return
self.info_cf = ApiCF(self.iface, self.settings, self.controller, self.controller.plugin_dir, self.tab_type)
self.info_cf.signal_activate.connect(self.reactivate_map_tool)
complet_result = None
if event.button() == Qt.LeftButton:
point = self.create_point(event)
if point is False:
return
complet_result, dialog = self.info_cf.open_form(point, tab_type=self.tab_type)
if complet_result is False:
return
elif event.button() == Qt.RightButton:
point = self.create_point(event)
if point is False:
return
self.info_cf.hilight_feature(point, rb_list=self.rubberband_list, tab_type=self.tab_type)
def reactivate_map_tool(self):
""" Reactivate tool """
self.block_signal = True
info_action = self.iface.mainWindow().findChild(QAction, 'map_tool_api_info_data')
info_action.trigger()
def activate(self):
# Check button
self.action().setChecked(True)
# Change map tool cursor
self.cursor = QCursor()
self.cursor.setShape(Qt.WhatsThisCursor)
self.canvas.setCursor(self.cursor)
self.rubberband_list = []
if self.index_action == '37':
self.tab_type = 'data'
elif self.index_action == '199':
self.tab_type = 'inp'
def deactivate(self):
for rb in self.rubberband_list:
rb.reset()
if hasattr(self, 'info_cf'):
self.info_cf.resetRubberbands()
ParentMapTool.deactivate(self)
|
gpl-3.0
|
MonicaHsu/truvaluation
|
venv/lib/python2.7/curses/__init__.py
|
108
|
1817
|
"""curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initscr()
...
"""
__revision__ = "$Id$"
from _curses import *
from curses.wrapper import wrapper
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from has_key import has_key
|
mit
|
abdulbaqi/quranf
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py
|
550
|
8977
|
# urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
mit
|
hmen89/odoo
|
addons/mail/static/scripts/openerp_mailgate.py
|
316
|
7640
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-TODAY OpenERP S.A. (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
"""
openerp_mailgate.py
"""
import cgitb
import time
import optparse
import sys
import xmlrpclib
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import Encoders
class DefaultConfig(object):
"""
Default configuration
"""
OPENERP_DEFAULT_USER_ID = 1
OPENERP_DEFAULT_PASSWORD = 'admin'
OPENERP_HOSTNAME = 'localhost'
OPENERP_PORT = 8069
OPENERP_DEFAULT_DATABASE = 'openerp'
MAIL_ERROR = '[email protected]'
MAIL_SERVER = 'smtp.example.com'
MAIL_SERVER_PORT = 25
MAIL_ADMINS = ('[email protected]',)
config = DefaultConfig()
def send_mail(_from_, to_, subject, text, files=None, server=config.MAIL_SERVER, port=config.MAIL_SERVER_PORT):
assert isinstance(to_, (list, tuple))
if files is None:
files = []
msg = MIMEMultipart()
msg['From'] = _from_
msg['To'] = COMMASPACE.join(to_)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for file_name, file_content in files:
part = MIMEBase('application', "octet-stream")
part.set_payload( file_content )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"'
% file_name)
msg.attach(part)
smtp = smtplib.SMTP(server, port=port)
smtp.sendmail(_from_, to_, msg.as_string() )
smtp.close()
class RPCProxy(object):
def __init__(self, uid, passwd,
host=config.OPENERP_HOSTNAME,
port=config.OPENERP_PORT,
path='object',
dbname=config.OPENERP_DEFAULT_DATABASE):
self.rpc = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/%s' % (host, port, path), allow_none=True)
self.user_id = uid
self.passwd = passwd
self.dbname = dbname
def __call__(self, *request, **kwargs):
return self.rpc.execute(self.dbname, self.user_id, self.passwd, *request, **kwargs)
class EmailParser(object):
def __init__(self, uid, password, dbname, host, port, model=False, email_default=False):
self.rpc = RPCProxy(uid, password, host=host, port=port, dbname=dbname)
if model:
try:
self.model_id = int(model)
self.model = str(model)
except:
self.model_id = self.rpc('ir.model', 'search', [('model', '=', model)])[0]
self.model = str(model)
self.email_default = email_default
def parse(self, message, custom_values=None, save_original=None):
# pass message as bytes because we don't know its encoding until we parse its headers
# and hence can't convert it to utf-8 for transport
return self.rpc('mail.thread',
'message_process',
self.model,
xmlrpclib.Binary(message),
custom_values or {},
save_original or False)
def configure_parser():
parser = optparse.OptionParser(usage='usage: %prog [options]', version='%prog v1.1')
group = optparse.OptionGroup(parser, "Note",
"This program parse a mail from standard input and communicate "
"with the Odoo server for case management in the CRM module.")
parser.add_option_group(group)
parser.add_option("-u", "--user", dest="userid",
help="Odoo user id to connect with",
default=config.OPENERP_DEFAULT_USER_ID, type='int')
parser.add_option("-p", "--password", dest="password",
help="Odoo user password",
default=config.OPENERP_DEFAULT_PASSWORD)
parser.add_option("-o", "--model", dest="model",
help="Name or ID of destination model",
default="crm.lead")
parser.add_option("-m", "--default", dest="default",
help="Admin email for error notifications.",
default=None)
parser.add_option("-d", "--dbname", dest="dbname",
help="Odoo database name (default: %default)",
default=config.OPENERP_DEFAULT_DATABASE)
parser.add_option("--host", dest="host",
help="Odoo Server hostname",
default=config.OPENERP_HOSTNAME)
parser.add_option("--port", dest="port",
help="Odoo Server XML-RPC port number",
default=config.OPENERP_PORT)
parser.add_option("--custom-values", dest="custom_values",
help="Dictionary of extra values to pass when creating records",
default=None)
parser.add_option("-s", dest="save_original",
action="store_true",
help="Keep a full copy of the email source attached to each message",
default=False)
return parser
def main():
"""
Receive the email via the stdin and send it to the OpenERP Server
"""
parser = configure_parser()
(options, args) = parser.parse_args()
email_parser = EmailParser(options.userid,
options.password,
options.dbname,
options.host,
options.port,
model=options.model,
email_default= options.default)
msg_txt = sys.stdin.read()
custom_values = {}
try:
custom_values = dict(eval(options.custom_values or "{}" ))
except:
import traceback
traceback.print_exc()
try:
email_parser.parse(msg_txt, custom_values, options.save_original or False)
except Exception:
msg = '\n'.join([
'parameters',
'==========',
'%r' % (options,),
'traceback',
'=========',
'%s' % (cgitb.text(sys.exc_info())),
])
subject = '[Odoo]:ERROR: Mailgateway - %s' % time.strftime('%Y-%m-%d %H:%M:%S')
send_mail(
config.MAIL_ERROR,
config.MAIL_ADMINS,
subject, msg, files=[('message.txt', msg_txt)]
)
sys.stderr.write("Failed to deliver email to Odoo Server, sending error notification to %s\n" % config.MAIL_ADMINS)
if __name__ == '__main__':
main()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
imsut/commons
|
src/python/twitter/pants/tasks/eclipse_gen.py
|
1
|
8147
|
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import pkgutil
from collections import defaultdict
from twitter.common.collections import OrderedSet
from twitter.common.dirutil import safe_mkdir, safe_open
from twitter.pants import get_buildroot
from twitter.pants.base.generator import TemplateData, Generator
from twitter.pants.tasks.ide_gen import IdeGen
__author__ = 'John Sirois'
_TEMPLATE_BASEDIR = os.path.join('eclipse', 'templates')
_VERSIONS = {
'3.5': '3.7', # 3.5-3.7 are .project/.classpath compatible
'3.6': '3.7',
'3.7': '3.7',
}
_SETTINGS = (
'org.eclipse.core.resources.prefs',
'org.eclipse.jdt.core.prefs',
'org.eclipse.jdt.ui.prefs',
)
class EclipseGen(IdeGen):
@classmethod
def setup_parser(cls, option_group, args, mkflag):
IdeGen.setup_parser(option_group, args, mkflag)
supported_versions = sorted(list(_VERSIONS.keys()))
option_group.add_option(mkflag("eclipse-version"), dest = "eclipse_gen_version",
default = '3.6', type = "choice", choices = supported_versions,
help = "[%%default] The Eclipse version the project "
"configuration should be generated for; can be one of: "
"%s" % supported_versions)
def __init__(self, context):
IdeGen.__init__(self, context)
eclipse_version = _VERSIONS[context.options.eclipse_gen_version]
self.project_template = os.path.join(_TEMPLATE_BASEDIR, 'project-%s.mustache' % eclipse_version)
self.classpath_template = os.path.join(_TEMPLATE_BASEDIR, 'classpath-%s.mustache' % eclipse_version)
self.apt_template = os.path.join(_TEMPLATE_BASEDIR, 'factorypath-%s.mustache' % eclipse_version)
self.pydev_template = os.path.join(_TEMPLATE_BASEDIR, 'pydevproject-%s.mustache' % eclipse_version)
self.debug_template = os.path.join(_TEMPLATE_BASEDIR, 'debug-launcher-%s.mustache' % eclipse_version)
self.project_filename = os.path.join(self.cwd, '.project')
self.classpath_filename = os.path.join(self.cwd, '.classpath')
self.apt_filename = os.path.join(self.cwd, '.factorypath')
self.pydev_filename = os.path.join(self.cwd, '.pydevproject')
def generate_project(self, project):
def linked_folder_id(path):
return path.replace(os.path.sep, '.')
def base_path(source_set):
return os.path.join(source_set.root_dir, source_set.source_base)
source_bases = {}
def add_source_base(path, id):
source_bases[path] = id
for source_set in project.sources:
add_source_base(base_path(source_set), linked_folder_id(source_set.source_base))
if project.has_python:
for source_set in project.py_sources:
add_source_base(base_path(source_set), linked_folder_id(source_set.source_base))
for source_set in project.py_libs:
add_source_base(base_path(source_set), linked_folder_id(source_set.source_base))
def create_source_template(base, includes=None, excludes=None):
return TemplateData(
base=source_bases[base],
includes=includes or [],
excludes=excludes or [],
joined_includes = '|'.join(includes) if includes else '',
joined_excludes = '|'.join(excludes) if excludes else '',
)
def create_sourcepath(base, sources):
def normalize_path_pattern(path):
return '%s/' % path if not path.endswith('/') else path
includes = [normalize_path_pattern(src_set.path) for src_set in sources if src_set.path]
excludes = []
for source_set in sources:
excludes.extend(normalize_path_pattern(exclude) for exclude in source_set.excludes)
return create_source_template(base, includes, excludes)
pythonpaths = []
if project.has_python:
for source_set in project.py_sources:
pythonpaths.append(create_source_template(base_path(source_set)))
for source_set in project.py_libs:
lib_path = source_set.path if source_set.path.endswith('.egg') else '%s/' % source_set.path
pythonpaths.append(create_source_template(base_path(source_set), includes=[lib_path]))
source_bases_list = [{'path': path, 'id': id} for (path, id) in source_bases.items()]
configured_project = TemplateData(
name=self.project_name,
has_python=project.has_python,
has_scala=project.has_scala and not project.skip_scala,
source_bases=source_bases_list,
pythonpaths=pythonpaths,
debug_port=project.debug_port,
)
outdir = os.path.abspath(os.path.join(self.work_dir, 'bin'))
safe_mkdir(outdir)
source_sets = defaultdict(OrderedSet) # base -> source_set
for source_set in project.sources:
source_sets[base_path(source_set)].add(source_set)
sourcepaths = [create_sourcepath(base, sources) for base, sources in source_sets.items()]
libs = []
def add_jarlibs(classpath_entries):
for classpath_entry in classpath_entries:
jar = classpath_entry.jar
source_jar = classpath_entry.source_jar
libs.append(TemplateData(
jar=os.path.relpath(jar, self.cwd),
source_jar=os.path.relpath(source_jar, self.cwd) if source_jar else None
))
add_jarlibs(project.internal_jars)
add_jarlibs(project.external_jars)
configured_classpath = TemplateData(
sourcepaths=sourcepaths,
has_tests=project.has_tests,
libs=libs,
has_scala = project.has_scala,
outdir=os.path.relpath(outdir, get_buildroot()),
)
with safe_open(self.project_filename, 'w') as output:
Generator(pkgutil.get_data(__name__, self.project_template),
project=configured_project).write(output)
with safe_open(self.classpath_filename, 'w') as output:
Generator(pkgutil.get_data(__name__, self.classpath_template),
classpath=configured_classpath).write(output)
debug_filename = os.path.join(self.work_dir, 'Debug on port %d.launch' % project.debug_port)
with safe_open(debug_filename, 'w') as output:
Generator(pkgutil.get_data(__name__, self.debug_template),
project=configured_project).write(output)
for resource in _SETTINGS:
with safe_open(os.path.join(self.cwd, '.settings', resource), 'w') as prefs:
prefs.write(pkgutil.get_data(__name__, os.path.join('eclipse', 'files', resource)))
factorypath = TemplateData(
project_name=self.project_name,
# The easiest way to make sure eclipse sees all annotation processors is to put all libs on
# the apt factorypath - this does not seem to hurt eclipse performance in any noticeable way.
jarpaths=["('%s', %s)" % (lib.jar, "'%s'" % lib.source_jar if lib.source_jar else 'None') for lib in libs]
)
with open(self.apt_filename, 'w') as output:
Generator(pkgutil.get_data(__name__, self.apt_template),
factorypath =factorypath).write(output)
if project.has_python:
with safe_open(self.pydev_filename, 'w') as output:
Generator(pkgutil.get_data(__name__, self.pydev_template),
project=configured_project).write(output)
else:
if os.path.exists(self.pydev_filename):
os.remove(self.pydev_filename)
print('\nGenerated project at %s%s' % (self.work_dir, os.sep))
|
apache-2.0
|
rabipanda/tensorflow
|
tensorflow/python/client/timeline_test.py
|
25
|
8552
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.Timeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.client import timeline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class TimelineTest(test.TestCase):
def _validateTrace(self, chrome_trace_format):
# Check that the supplied string is valid JSON.
trace = json.loads(chrome_trace_format)
# It should have a top-level key containing events.
self.assertTrue('traceEvents' in trace)
# Every event in the list should have a 'ph' field.
for event in trace['traceEvents']:
self.assertTrue('ph' in event)
def testSimpleTimeline(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
def testTimelineCpu(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.test_session(use_gpu=False) as sess:
const1 = constant_op.constant(1.0, name='const1')
const2 = constant_op.constant(2.0, name='const2')
result = math_ops.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(
show_memory=False, show_dataflow=False)
self._validateTrace(ctf)
def testTimelineGpu(self):
if not test.is_gpu_available(cuda_only=True):
return
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.test_session(force_gpu=True) as sess:
const1 = constant_op.constant(1.0, name='const1')
const2 = constant_op.constant(2.0, name='const2')
result = math_ops.add(const1, const2) + const1 * const2
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices)
self.assertTrue('/device:GPU:0/stream:all' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(
show_memory=False, show_dataflow=False)
self._validateTrace(ctf)
def testTimelineWithRPCs(self):
"""Tests that Timeline can handle RPC tracing."""
metadata = config_pb2.RunMetadata()
step_stats = metadata.step_stats
dev_stats = step_stats.dev_stats.add()
dev_stats.device = '/job:worker/replica:0/task:0/cpu:0'
node_stats = dev_stats.node_stats.add()
node_stats.node_name = 'RecvTensor'
node_stats.all_start_micros = 12345
node_stats.op_end_rel_micros = 42
node_stats.timeline_label = ('[1024B] edge_160_conv2/biases/read from '
'/job:ps/replica:0/task:3/cpu:0 to '
'/job:worker/replica:0/task:0/cpu:0')
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
def testAnalysisAndAllocations(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
config = config_pb2.ConfigProto(device_count={'CPU': 3})
with session.Session(config=config) as sess:
with ops.device('/cpu:0'):
num1 = variables.Variable(1.0, name='num1')
with ops.device('/cpu:1'):
num2 = variables.Variable(2.0, name='num2')
with ops.device('/cpu:2'):
result = num1 + num2 + num1 * num2
sess.run(variables.global_variables_initializer())
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
tl = timeline.Timeline(run_metadata.step_stats)
step_analysis = tl.analyze_step_stats()
ctf = step_analysis.chrome_trace.format_to_string()
self._validateTrace(ctf)
maximums = step_analysis.allocator_maximums
self.assertTrue('cpu' in maximums)
cpu_max = maximums[
'cuda_host_bfc'] if 'cuda_host_bfc' in maximums else maximums['cpu']
# At least num1 + num2, both float32s (4 bytes each)
self.assertGreater(cpu_max.num_bytes, 8)
self.assertGreater(cpu_max.timestamp, 0)
self.assertTrue('num1' in cpu_max.tensors or 'num1/read' in cpu_max.tensors)
self.assertTrue('num2' in cpu_max.tensors or 'num2/read' in cpu_max.tensors)
def testManyCPUs(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
config = config_pb2.ConfigProto(device_count={'CPU': 3})
with session.Session(config=config) as sess:
with ops.device('/cpu:0'):
num1 = variables.Variable(1.0, name='num1')
with ops.device('/cpu:1'):
num2 = variables.Variable(2.0, name='num2')
with ops.device('/cpu:2'):
result = num1 + num2 + num1 * num2
sess.run(variables.global_variables_initializer())
sess.run(result, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
step_stats = run_metadata.step_stats
devices = [d.device for d in step_stats.dev_stats]
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:1' in devices)
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:2' in devices)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format()
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_dataflow=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=False)
self._validateTrace(ctf)
tl = timeline.Timeline(step_stats)
ctf = tl.generate_chrome_trace_format(
show_memory=False, show_dataflow=False)
self._validateTrace(ctf)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
avneesh91/django
|
django/contrib/admin/options.py
|
10
|
82904
|
import copy
import json
import operator
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from urllib.parse import quote as urlquote
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, construct_change_message, flatten_fieldsets,
get_deleted_objects, lookup_needs_distinct, model_format_dict,
model_ngettext, quote, unquote,
)
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist, FieldError, PermissionDenied, ValidationError,
)
from django.core.paginator import Paginator
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, format_lazy, get_text_list
from django.utils.translation import gettext as _, ngettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(metaclass=forms.MediaDefiningClass):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, models.ManyToManyField) or isinstance(db_field, models.ForeignKey):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(return None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field, self.admin_site, using=db)
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = format_lazy('{} {}', help_text, msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_exclude(self, request, obj=None):
"""
Hook for specifying exclude.
"""
return self.exclude
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Return a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for fk_lookup in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(fk_lookup):
fk_lookup = fk_lookup()
for k, v in widgets.url_params_from_lookup_dict(fk_lookup).items():
if k == lookup and v == value:
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on nonexistent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (prev_field.concrete and
field not in prev_field.get_path_info()[-1].target_fields):
relation_parts.append(part)
if not getattr(field, 'get_path_info', None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.get_path_info()[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
valid_lookups = {self.date_hierarchy}
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.add(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.add(filter_item[0])
else:
valid_lookups.add(filter_item)
# Is it a valid relational lookup?
return not {
LOOKUP_SEP.join(relation_parts),
LOOKUP_SEP.join(relation_parts + [part])
}.isdisjoint(valid_lookups)
def to_field_allowed(self, request, to_field):
"""
Return True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (any(issubclass(model, related_model) for model in registered_models) and
hasattr(remote_field, 'get_related_field') and
remote_field.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Return True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Return True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
class ModelAdmin(BaseModelAdmin):
"""Encapsulate all admin options and functionality for a given model."""
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_as_continue = True
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
popup_response_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super().__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/change/$', wrap(self.change_view), name='%s_%s_change' % info),
# For backwards compatibility (was the change url before 1.9)
url(r'^(.+)/$', wrap(RedirectView.as_view(
pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
))),
]
return urlpatterns
@property
def urls(self):
return self.get_urls()
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'admin/RelatedObjectLookups.js',
'actions%s.js' % extra,
'urlify.js',
'prepopulate%s.js' % extra,
'vendor/xregexp/xregexp%s.js' % extra,
]
return forms.Media(js=['admin/js/%s' % url for url in js])
def get_model_perms(self, request):
"""
Return a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Return a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = OrderedDict(
(f, None) for f in readonly_fields
if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError(
'%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__)
)
def get_changelist(self, request, **kwargs):
"""
Return the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Return an instance matching the field and value provided, the primary
key is used if no field is provided. Return ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Return a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form')):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Return a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(
self.model, self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults
)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yield formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION,
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in self.get_actions(request).values():
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Return a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Return a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Return a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Return the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a JSON structure describing changes from a changed object.
"""
return construct_change_message(form, formsets, add)
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError(
'Bad message level string: `%s`. Possible values are: %s'
% (level, levels_repr)
)
messages.add_message(request, level, message, extra_tags=extra_tags, fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
'admin:%s_%s_change' % (opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = force_text(obj)
msg_dict = {
'name': opts.verbose_name,
'obj': obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'value': str(value),
'obj': str(obj),
})
return TemplateResponse(request, self.popup_response_template or [
'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name),
'admin/%s/popup_response.html' % opts.app_label,
'admin/popup_response.html',
], {
'popup_response_data': popup_response_data,
})
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST and self.save_as_continue and
self.has_change_permission(request, obj)
):
msg = format_html(
_('The {name} "{obj}" was added successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was added successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determine the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
opts = obj._meta
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else opts.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps({
'action': 'change',
'value': str(value),
'obj': str(obj),
'new_value': str(new_value),
})
return TemplateResponse(request, self.popup_response_template or [
'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name),
'admin/%s/popup_response.html' % opts.app_label,
'admin/popup_response.html',
], {
'popup_response_data': popup_response_data,
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
'name': opts.verbose_name,
'obj': format_html('<a href="{}">{}</a>', urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = format_html(
_('The {name} "{obj}" was added successfully. You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully. You may add another {name} below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determine the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
popup_response_data = json.dumps({
'action': 'delete',
'value': str(obj_id),
})
return TemplateResponse(request, self.popup_response_template or [
'admin/%s/%s/popup_response.html' % (opts.app_label, opts.model_name),
'admin/%s/popup_response.html' % opts.app_label,
'admin/popup_response.html',
], {
'popup_response_data': popup_response_data,
})
self.message_user(
request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': opts.verbose_name,
'obj': obj_display,
},
messages.SUCCESS,
)
if self.has_change_permission(request, None):
post_url = reverse(
'admin:%s_%s_changelist' % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index', current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
media=self.media,
)
return TemplateResponse(
request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html",
],
context,
)
def get_inline_formsets(self, request, formsets, inline_instances, obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(
inline, formset, fieldsets, prepopulated, readonly,
model_admin=self,
)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data from the request's GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
"""
Create a message informing the user that the object doesn't exist
and return a redirect to the admin index page.
"""
msg = _("""%(name)s with ID "%(key)s" doesn't exist. Perhaps it was deleted?""") % {
'name': opts.verbose_name,
'key': unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse('admin:index', current_app=self.admin_site.name)
return HttpResponseRedirect(url)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._changeform_view(request, object_id, form_url, extra_context)
def _changeform_view(self, request, object_id, form_url, extra_context):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
if request.method == 'POST' and '_saveasnew' in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, opts, object_id)
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(
self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % opts.verbose_name,
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
list_select_related = self.get_list_select_related(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(
request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self,
)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
if action_failed:
# Redirect back to the changelist page to avoid resubmitting the
# form if the user refreshes the browser or uses the "No, take
# me back" button on the action confirmation page.
return HttpResponseRedirect(request.get_full_path())
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == 'POST' and cl.list_editable and '_save' in request.POST:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=self.get_queryset(request))
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
msg = ngettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount
) % {
'count': changecount,
'name': model_ngettext(opts, changecount),
}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
media += action_form.media
else:
action_form = None
selection_note_all = ngettext(
'%(total_count)s selected',
'All %(total_count)s selected',
cl.result_count
)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)
def _delete_view(self, request, object_id, extra_context):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, opts, object_id)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST and not protected: # The user has confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
return self._get_obj_does_not_exist_redirect(request, model._meta, object_id)
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(
self.admin_site.each_context(request),
title=_('Change history: %s') % obj,
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST.copy(),
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
classes = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super().__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js',
'inlines%s.js' % extra]
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
if self.classes and 'collapse' in self.classes:
js.append('collapse%s.js' % extra)
return forms.Media(js=['admin/js/%s' % url for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Return a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
exclude.extend(self.get_readonly_fields(request, obj))
if excluded is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super().is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super().get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super().has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
bsd-3-clause
|
huran2014/huran.github.io
|
wot_gateway/usr/lib/python2.7/sre_parse.py
|
156
|
26798
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = type(()), type([])
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in seqtypes:
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0L
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxint
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + long(i) * av[0]
hi = hi + long(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
item_no = None
if source.next and not source.match(")", 0):
raise error, "pattern not properly closed"
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
setappend((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if hi:
max = int(hi)
if max < min:
raise error, "bad repeat interval"
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in REPEATCODES:
raise error, "multiple repeat"
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not isname(name):
raise error, "bad character in group name"
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not isname(name):
raise error, "bad character in group name"
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error, "syntax error"
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
condname = condname + char
group = 2
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error, "unknown group name"
else:
try:
condgroup = int(condname)
except ValueError:
raise error, "bad character in group name"
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "bad group name"
try:
index = int(name)
if index < 0:
raise error, "negative group number"
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error, "unmatched group"
except IndexError:
raise error, "invalid group reference"
return sep.join(literals)
|
gpl-2.0
|
miketheman/opencomparison
|
searchv2/migrations/0004_auto.py
|
1
|
3142
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'SearchV2', fields ['title_no_prefix']
db.create_index('searchv2_searchv2', ['title_no_prefix'])
# Adding index on 'SearchV2', fields ['title']
db.create_index('searchv2_searchv2', ['title'])
# Adding index on 'SearchV2', fields ['clean_title']
db.create_index('searchv2_searchv2', ['clean_title'])
def backwards(self, orm):
# Removing index on 'SearchV2', fields ['clean_title']
db.delete_index('searchv2_searchv2', ['clean_title'])
# Removing index on 'SearchV2', fields ['title']
db.delete_index('searchv2_searchv2', ['title'])
# Removing index on 'SearchV2', fields ['title_no_prefix']
db.delete_index('searchv2_searchv2', ['title_no_prefix'])
models = {
'searchv2.searchv2': {
'Meta': {'ordering': "['-weight']", 'object_name': 'SearchV2'},
'absolute_url': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'clean_title': ('django.db.models.fields.CharField', [], {'max_length': "'100'", 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'last_committed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'participants': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pypi_downloads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'slug_no_prefix': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'", 'db_index': 'True'}),
'title_no_prefix': ('django.db.models.fields.CharField', [], {'max_length': "'100'", 'db_index': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['searchv2']
|
mit
|
suutari-ai/shoop
|
shuup/front/basket/objects.py
|
2
|
1414
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from shuup.core.basket.objects import BaseBasket as Basket
from shuup.core.basket.storage import BasketCompatibilityError
class BaseBasket(Basket):
def __init__(self, request, basket_name="basket"):
super(BaseBasket, self).__init__(request)
self.basket_name = basket_name
def _load(self):
"""
Get the currently persisted data for this basket.
This will only access the storage once per request in usual
circumstances.
:return: Data dict.
:rtype: dict
"""
if self._data is None:
try:
self._data = self.storage.load(basket=self)
except BasketCompatibilityError as error:
msg = _("Basket loading failed: Incompatible basket (%s)")
messages.error(self.request, msg % error)
self.storage.delete(basket=self)
self._data = self.storage.load(basket=self)
self.dirty = False
self.uncache()
return self._data
|
agpl-3.0
|
broesamle/servo
|
tests/wpt/harness/wptrunner/hosts.py
|
196
|
3292
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
class HostsLine(object):
def __init__(self, ip_address, canonical_hostname, aliases=None, comment=None):
self.ip_address = ip_address
self.canonical_hostname = canonical_hostname
self.aliases = aliases if aliases is not None else []
self.comment = comment
if self.ip_address is None:
assert self.canonical_hostname is None
assert not self.aliases
assert self.comment is not None
@classmethod
def from_string(cls, line):
if not line.strip():
return
line = line.strip()
ip_address = None
canonical_hostname = None
aliases = []
comment = None
comment_parts = line.split("#", 1)
if len(comment_parts) > 1:
comment = comment_parts[1]
data = comment_parts[0].strip()
if data:
fields = data.split()
if len(fields) < 2:
raise ValueError("Invalid hosts line")
ip_address = fields[0]
canonical_hostname = fields[1]
aliases = fields[2:]
return cls(ip_address, canonical_hostname, aliases, comment)
class HostsFile(object):
def __init__(self):
self.data = []
self.by_hostname = {}
def set_host(self, host):
if host.canonical_hostname is None:
self.data.append(host)
elif host.canonical_hostname in self.by_hostname:
old_host = self.by_hostname[host.canonical_hostname]
old_host.ip_address = host.ip_address
old_host.aliases = host.aliases
old_host.comment = host.comment
else:
self.data.append(host)
self.by_hostname[host.canonical_hostname] = host
@classmethod
def from_file(cls, f):
rv = cls()
for line in f:
host = HostsLine.from_string(line)
if host is not None:
rv.set_host(host)
return rv
def to_string(self):
field_widths = [0, 0]
for line in self.data:
if line.ip_address is not None:
field_widths[0] = max(field_widths[0], len(line.ip_address))
field_widths[1] = max(field_widths[1], len(line.canonical_hostname))
lines = []
for host in self.data:
line = ""
if host.ip_address is not None:
ip_string = host.ip_address.ljust(field_widths[0])
hostname_str = host.canonical_hostname
if host.aliases:
hostname_str = "%s %s" % (hostname_str.ljust(field_widths[1]),
" ".join(host.aliases))
line = "%s %s" % (ip_string, hostname_str)
if host.comment:
if line:
line += " "
line += "#%s" % host.comment
lines.append(line)
lines.append("")
return "\n".join(lines)
def to_file(self, f):
f.write(self.to_string().encode("utf8"))
|
mpl-2.0
|
arabenjamin/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
167
|
1659
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
V155/qutebrowser
|
tests/unit/config/test_configcommands.py
|
1
|
29376
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.config.configcommands."""
import logging
import functools
import unittest.mock
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.config import configcommands, configutils
from qutebrowser.api import cmdutils
from qutebrowser.utils import usertypes, urlmatch
from qutebrowser.keyinput import keyutils
from qutebrowser.misc import objects
# Alias because we need this a lot in here.
def keyseq(s):
return keyutils.KeySequence.parse(s)
@pytest.fixture
def commands(config_stub, key_config_stub):
return configcommands.ConfigCommands(config_stub, key_config_stub)
@pytest.fixture
def yaml_value(config_stub):
"""Fixture which provides a getter for a YAML value."""
def getter(option):
return config_stub._yaml._values[option].get_for_url(fallback=False)
return getter
class TestSet:
"""Tests for :set."""
def test_set_no_args(self, commands, tabbed_browser_stubs):
"""Run ':set'.
Should open qute://settings."""
commands.set(win_id=0)
assert tabbed_browser_stubs[0].loaded_url == QUrl('qute://settings')
@pytest.mark.parametrize('option', ['url.auto_search?', 'url.auto_search'])
def test_get(self, config_stub, commands, message_mock, option):
"""Run ':set url.auto_search?' / ':set url.auto_search'.
Should show the value.
"""
config_stub.val.url.auto_search = 'never'
commands.set(win_id=0, option=option)
msg = message_mock.getmsg(usertypes.MessageLevel.info)
assert msg.text == 'url.auto_search = never'
@pytest.mark.parametrize('temp', [True, False])
@pytest.mark.parametrize('option, old_value, inp, new_value', [
('url.auto_search', 'naive', 'dns', 'dns'),
# https://github.com/qutebrowser/qutebrowser/issues/2962
('editor.command',
['gvim', '-f', '{file}', '-c', 'normal {line}G{column0}l'],
'[emacs, "{}"]', ['emacs', '{}']),
])
def test_set_simple(self, monkeypatch, commands, config_stub, yaml_value,
temp, option, old_value, inp, new_value):
"""Run ':set [-t] option value'.
Should set the setting accordingly.
"""
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebKit)
assert config_stub.get(option) == old_value
commands.set(0, option, inp, temp=temp)
assert config_stub.get(option) == new_value
assert yaml_value(option) == (configutils.UNSET if temp else new_value)
def test_set_with_pattern(self, monkeypatch, commands, config_stub):
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebKit)
option = 'content.javascript.enabled'
commands.set(0, option, 'false', pattern='*://example.com')
pattern = urlmatch.UrlPattern('*://example.com')
assert config_stub.get(option)
assert not config_stub.get_obj_for_pattern(option, pattern=pattern)
def test_set_invalid_pattern(self, monkeypatch, commands):
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebKit)
option = 'content.javascript.enabled'
with pytest.raises(cmdutils.CommandError,
match=('Error while parsing http://: Pattern '
'without host')):
commands.set(0, option, 'false', pattern='http://')
def test_set_no_pattern(self, monkeypatch, commands):
"""Run ':set --pattern=*://* colors.statusbar.normal.bg #abcdef.
Should show an error as patterns are unsupported.
"""
with pytest.raises(cmdutils.CommandError,
match='does not support URL patterns'):
commands.set(0, 'colors.statusbar.normal.bg', '#abcdef',
pattern='*://*')
@pytest.mark.parametrize('temp', [True, False])
def test_set_temp_override(self, commands, config_stub, yaml_value, temp):
"""Invoking :set twice.
:set url.auto_search dns
:set -t url.auto_search never
Should set the setting accordingly.
"""
assert config_stub.val.url.auto_search == 'naive'
commands.set(0, 'url.auto_search', 'dns')
commands.set(0, 'url.auto_search', 'never', temp=True)
assert config_stub.val.url.auto_search == 'never'
assert yaml_value('url.auto_search') == 'dns'
@pytest.mark.parametrize('pattern', [None, '*://example.com'])
def test_set_print(self, config_stub, commands, message_mock, pattern):
"""Run ':set -p [-u *://example.com] content.javascript.enabled false'.
Should set show the value.
"""
assert config_stub.val.content.javascript.enabled
commands.set(0, 'content.javascript.enabled', 'false', print_=True,
pattern=pattern)
value = config_stub.get_obj_for_pattern(
'content.javascript.enabled',
pattern=None if pattern is None else urlmatch.UrlPattern(pattern))
assert not value
expected = 'content.javascript.enabled = false'
if pattern is not None:
expected += ' for {}'.format(pattern)
msg = message_mock.getmsg(usertypes.MessageLevel.info)
assert msg.text == expected
def test_set_invalid_option(self, commands):
"""Run ':set foo bar'.
Should show an error.
"""
with pytest.raises(cmdutils.CommandError, match="No option 'foo'"):
commands.set(0, 'foo', 'bar')
def test_set_invalid_value(self, commands):
"""Run ':set auto_save.session blah'.
Should show an error.
"""
with pytest.raises(cmdutils.CommandError,
match="Invalid value 'blah' - must be a boolean!"):
commands.set(0, 'auto_save.session', 'blah')
def test_set_wrong_backend(self, commands, monkeypatch):
monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine)
with pytest.raises(cmdutils.CommandError,
match="The hints.find_implementation setting is "
"not available with the QtWebEngine backend!"):
commands.set(0, 'hints.find_implementation', 'javascript')
def test_empty(self, commands):
"""Run ':set ?'.
Should show an error.
See https://github.com/qutebrowser/qutebrowser/issues/1109
"""
with pytest.raises(cmdutils.CommandError, match="No option '?'"):
commands.set(win_id=0, option='?')
def test_toggle(self, commands):
"""Try toggling a value.
Should show an nicer error.
"""
with pytest.raises(cmdutils.CommandError,
match="Toggling values was moved to the "
":config-cycle command"):
commands.set(win_id=0, option='javascript.enabled!')
def test_invalid(self, commands):
"""Run ':set foo?'.
Should show an error.
"""
with pytest.raises(cmdutils.CommandError, match="No option 'foo'"):
commands.set(win_id=0, option='foo?')
class TestCycle:
"""Test :config-cycle."""
@pytest.mark.parametrize('initial, expected', [
# Normal cycling
('magenta', 'blue'),
# Through the end of the list
('yellow', 'green'),
# Value which is not in the list
('red', 'green'),
])
def test_cycling(self, commands, config_stub, yaml_value,
initial, expected):
"""Run ':set' with multiple values."""
opt = 'colors.statusbar.normal.bg'
config_stub.set_obj(opt, initial)
commands.config_cycle(opt, 'green', 'magenta', 'blue', 'yellow')
assert config_stub.get(opt) == expected
assert yaml_value(opt) == expected
def test_different_representation(self, commands, config_stub):
"""When using a different representation, cycling should work.
For example, we use [foo] which is represented as ["foo"].
"""
opt = 'qt.args'
config_stub.set_obj(opt, ['foo'])
commands.config_cycle(opt, '[foo]', '[bar]')
assert config_stub.get(opt) == ['bar']
commands.config_cycle(opt, '[foo]', '[bar]')
assert config_stub.get(opt) == ['foo']
def test_toggle(self, commands, config_stub, yaml_value):
"""Run ':config-cycle auto_save.session'.
Should toggle the value.
"""
assert not config_stub.val.auto_save.session
commands.config_cycle('auto_save.session')
assert config_stub.val.auto_save.session
assert yaml_value('auto_save.session')
@pytest.mark.parametrize('args', [
['url.auto_search'], ['url.auto_search', 'foo']
])
def test_toggle_nonbool(self, commands, config_stub, args):
"""Run :config-cycle without a bool and 0/1 value.
:config-cycle url.auto_search
:config-cycle url.auto_search foo
Should show an error.
"""
assert config_stub.val.url.auto_search == 'naive'
with pytest.raises(cmdutils.CommandError, match="Need at least "
"two values for non-boolean settings."):
commands.config_cycle(*args)
assert config_stub.val.url.auto_search == 'naive'
def test_set_toggle_print(self, commands, config_stub, message_mock):
"""Run ':config-cycle -p auto_save.session'.
Should toggle the value and show the new value.
"""
commands.config_cycle('auto_save.session', print_=True)
msg = message_mock.getmsg(usertypes.MessageLevel.info)
assert msg.text == 'auto_save.session = true'
class TestAdd:
"""Test :config-list-add and :config-dict-add."""
@pytest.mark.parametrize('temp', [True, False])
@pytest.mark.parametrize('value', ['test1', 'test2'])
def test_list_add(self, commands, config_stub, yaml_value, temp, value):
name = 'content.host_blocking.whitelist'
commands.config_list_add(name, value, temp=temp)
assert str(config_stub.get(name)[-1]) == value
if temp:
assert yaml_value(name) == configutils.UNSET
else:
assert yaml_value(name)[-1] == value
def test_list_add_non_list(self, commands):
with pytest.raises(
cmdutils.CommandError,
match=":config-list-add can only be used for lists"):
commands.config_list_add('history_gap_interval', 'value')
@pytest.mark.parametrize('value', ['', None, 42])
def test_list_add_invalid_values(self, commands, value):
with pytest.raises(
cmdutils.CommandError,
match="Invalid value '{}'".format(value)):
commands.config_list_add('content.host_blocking.whitelist', value)
@pytest.mark.parametrize('value', ['test1', 'test2'])
@pytest.mark.parametrize('temp', [True, False])
def test_dict_add(self, commands, config_stub, yaml_value, value, temp):
name = 'aliases'
key = 'missingkey'
commands.config_dict_add(name, key, value, temp=temp)
assert str(config_stub.get(name)[key]) == value
if temp:
assert yaml_value(name) == configutils.UNSET
else:
assert yaml_value(name)[key] == value
@pytest.mark.parametrize('replace', [True, False])
def test_dict_add_replace(self, commands, config_stub, replace):
name = 'aliases'
key = 'w'
value = 'anything'
if replace:
commands.config_dict_add(name, key, value, replace=True)
assert str(config_stub.get(name)[key]) == value
else:
with pytest.raises(
cmdutils.CommandError,
match="w already exists in aliases - use --replace to "
"overwrite!"):
commands.config_dict_add(name, key, value, replace=False)
def test_dict_add_non_dict(self, commands):
with pytest.raises(
cmdutils.CommandError,
match=":config-dict-add can only be used for dicts"):
commands.config_dict_add('history_gap_interval', 'key', 'value')
@pytest.mark.parametrize('value', ['', None, 42])
def test_dict_add_invalid_values(self, commands, value):
with pytest.raises(cmdutils.CommandError,
match="Invalid value '{}'".format(value)):
commands.config_dict_add('aliases', 'missingkey', value)
class TestRemove:
"""Test :config-list-remove and :config-dict-remove."""
@pytest.mark.parametrize('value', ['25%', '50%'])
@pytest.mark.parametrize('temp', [True, False])
def test_list_remove(self, commands, config_stub, yaml_value, value, temp):
name = 'zoom.levels'
commands.config_list_remove(name, value, temp=temp)
assert value not in config_stub.get(name)
if temp:
assert yaml_value(name) == configutils.UNSET
else:
assert value not in yaml_value(name)
def test_list_remove_non_list(self, commands):
with pytest.raises(
cmdutils.CommandError,
match=":config-list-remove can only be used for lists"):
commands.config_list_remove('content.javascript.enabled',
'never')
def test_list_remove_no_value(self, commands):
with pytest.raises(
cmdutils.CommandError,
match="never is not in colors.completion.fg!"):
commands.config_list_remove('colors.completion.fg', 'never')
@pytest.mark.parametrize('key', ['w', 'q'])
@pytest.mark.parametrize('temp', [True, False])
def test_dict_remove(self, commands, config_stub, yaml_value, key, temp):
name = 'aliases'
commands.config_dict_remove(name, key, temp=temp)
assert key not in config_stub.get(name)
if temp:
assert yaml_value(name) == configutils.UNSET
else:
assert key not in yaml_value(name)
def test_dict_remove_non_dict(self, commands):
with pytest.raises(
cmdutils.CommandError,
match=":config-dict-remove can only be used for dicts"):
commands.config_dict_remove('content.javascript.enabled',
'never')
def test_dict_remove_no_value(self, commands):
with pytest.raises(
cmdutils.CommandError,
match="never is not in aliases!"):
commands.config_dict_remove('aliases', 'never')
class TestUnsetAndClear:
"""Test :config-unset and :config-clear."""
@pytest.mark.parametrize('temp', [True, False])
def test_unset(self, commands, config_stub, yaml_value, temp):
name = 'tabs.show'
config_stub.set_obj(name, 'never', save_yaml=True)
commands.config_unset(name, temp=temp)
assert config_stub.get(name) == 'always'
assert yaml_value(name) == ('never' if temp else configutils.UNSET)
def test_unset_unknown_option(self, commands):
with pytest.raises(cmdutils.CommandError, match="No option 'tabs'"):
commands.config_unset('tabs')
@pytest.mark.parametrize('save', [True, False])
def test_clear(self, commands, config_stub, yaml_value, save):
name = 'tabs.show'
config_stub.set_obj(name, 'never', save_yaml=True)
commands.config_clear(save=save)
assert config_stub.get(name) == 'always'
assert yaml_value(name) == (configutils.UNSET if save else 'never')
class TestSource:
"""Test :config-source."""
pytestmark = pytest.mark.usefixtures('config_tmpdir', 'data_tmpdir',
'config_stub', 'key_config_stub')
@pytest.mark.parametrize('use_default_dir', [True, False])
@pytest.mark.parametrize('clear', [True, False])
def test_config_source(self, tmpdir, commands, config_stub, config_tmpdir,
use_default_dir, clear):
assert config_stub.val.content.javascript.enabled
config_stub.val.search.ignore_case = 'always'
if use_default_dir:
pyfile = config_tmpdir / 'config.py'
arg = None
else:
pyfile = tmpdir / 'sourced.py'
arg = str(pyfile)
pyfile.write_text('c.content.javascript.enabled = False\n',
encoding='utf-8')
commands.config_source(arg, clear=clear)
assert not config_stub.val.content.javascript.enabled
ignore_case = config_stub.val.search.ignore_case
assert ignore_case == (usertypes.IgnoreCase.smart if clear
else usertypes.IgnoreCase.always)
def test_errors(self, commands, config_tmpdir):
pyfile = config_tmpdir / 'config.py'
pyfile.write_text('c.foo = 42', encoding='utf-8')
with pytest.raises(cmdutils.CommandError) as excinfo:
commands.config_source()
expected = ("Errors occurred while reading config.py:\n"
" While setting 'foo': No option 'foo'")
assert str(excinfo.value) == expected
def test_invalid_source(self, commands, config_tmpdir):
pyfile = config_tmpdir / 'config.py'
pyfile.write_text('1/0', encoding='utf-8')
with pytest.raises(cmdutils.CommandError) as excinfo:
commands.config_source()
expected = ("Errors occurred while reading config.py:\n"
" Unhandled exception - ZeroDivisionError:"
" division by zero")
assert str(excinfo.value) == expected
class TestEdit:
"""Tests for :config-edit."""
pytestmark = pytest.mark.usefixtures('config_tmpdir', 'data_tmpdir',
'config_stub', 'key_config_stub',
'qapp')
def test_no_source(self, commands, mocker):
mock = mocker.patch('qutebrowser.config.configcommands.editor.'
'ExternalEditor._start_editor', autospec=True)
commands.config_edit(no_source=True)
mock.assert_called_once_with(unittest.mock.ANY)
@pytest.fixture
def patch_editor(self, mocker):
"""Write a config.py file."""
def do_patch(text):
def _write_file(editor_self):
with open(editor_self._filename, 'w', encoding='utf-8') as f:
f.write(text)
editor_self.file_updated.emit(text)
return mocker.patch('qutebrowser.config.configcommands.editor.'
'ExternalEditor._start_editor', autospec=True,
side_effect=_write_file)
return do_patch
def test_with_sourcing(self, commands, config_stub, patch_editor):
assert config_stub.val.content.javascript.enabled
mock = patch_editor('c.content.javascript.enabled = False')
commands.config_edit()
mock.assert_called_once_with(unittest.mock.ANY)
assert not config_stub.val.content.javascript.enabled
def test_error(self, commands, config_stub, patch_editor, message_mock,
caplog):
patch_editor('c.foo = 42')
with caplog.at_level(logging.ERROR):
commands.config_edit()
msg = message_mock.getmsg()
expected = ("Errors occurred while reading config.py:\n"
" While setting 'foo': No option 'foo'")
assert msg.text == expected
class TestWritePy:
"""Tests for :config-write-py."""
def test_custom(self, commands, config_stub, key_config_stub, tmpdir):
confpy = tmpdir / 'config.py'
config_stub.val.content.javascript.enabled = True
key_config_stub.bind(keyseq(',x'), 'message-info foo', mode='normal')
commands.config_write_py(str(confpy))
lines = confpy.read_text('utf-8').splitlines()
assert "c.content.javascript.enabled = True" in lines
assert "config.bind(',x', 'message-info foo')" in lines
def test_defaults(self, commands, tmpdir):
confpy = tmpdir / 'config.py'
commands.config_write_py(str(confpy), defaults=True)
lines = confpy.read_text('utf-8').splitlines()
assert "# c.content.javascript.enabled = True" in lines
assert "# config.bind('H', 'back')" in lines
def test_default_location(self, commands, config_tmpdir):
confpy = config_tmpdir / 'config.py'
commands.config_write_py()
lines = confpy.read_text('utf-8').splitlines()
assert '# Autogenerated config.py' in lines
def test_relative_path(self, commands, config_tmpdir):
confpy = config_tmpdir / 'config2.py'
commands.config_write_py('config2.py')
lines = confpy.read_text('utf-8').splitlines()
assert '# Autogenerated config.py' in lines
def test_existing_file(self, commands, tmpdir):
confpy = tmpdir / 'config.py'
confpy.ensure()
with pytest.raises(cmdutils.CommandError) as excinfo:
commands.config_write_py(str(confpy))
expected = " already exists - use --force to overwrite!"
assert str(excinfo.value).endswith(expected)
def test_existing_file_force(self, commands, tmpdir):
confpy = tmpdir / 'config.py'
confpy.ensure()
commands.config_write_py(str(confpy), force=True)
lines = confpy.read_text('utf-8').splitlines()
assert '# Autogenerated config.py' in lines
def test_oserror(self, commands, tmpdir):
"""Test writing to a directory which does not exist."""
with pytest.raises(cmdutils.CommandError):
commands.config_write_py(str(tmpdir / 'foo' / 'config.py'))
class TestBind:
"""Tests for :bind and :unbind."""
@pytest.fixture
def no_bindings(self):
"""Get a dict with no bindings."""
return {'normal': {}}
def test_bind_no_args(self, commands, config_stub, no_bindings,
tabbed_browser_stubs):
"""Run ':bind'.
Should open qute://bindings."""
config_stub.val.bindings.default = no_bindings
config_stub.val.bindings.commands = no_bindings
commands.bind(win_id=0)
assert tabbed_browser_stubs[0].loaded_url == QUrl('qute://bindings')
@pytest.mark.parametrize('command', ['nop', 'nope'])
def test_bind(self, commands, config_stub, no_bindings, key_config_stub,
yaml_value, command):
"""Simple :bind test (and aliases)."""
config_stub.val.aliases = {'nope': 'nop'}
config_stub.val.bindings.default = no_bindings
config_stub.val.bindings.commands = no_bindings
commands.bind(0, 'a', command)
assert key_config_stub.get_command(keyseq('a'), 'normal') == command
yaml_bindings = yaml_value('bindings.commands')['normal']
assert yaml_bindings['a'] == command
@pytest.mark.parametrize('key, mode, expected', [
# Simple
('a', 'normal', "a is bound to 'message-info a' in normal mode"),
# Alias
('b', 'normal', "b is bound to 'mib' in normal mode"),
# Custom binding
('c', 'normal', "c is bound to 'message-info c' in normal mode"),
# Special key
('<Ctrl-X>', 'normal',
"<Ctrl+x> is bound to 'message-info C-x' in normal mode"),
# unbound
('x', 'normal', "x is unbound in normal mode"),
# non-default mode
('x', 'caret', "x is bound to 'nop' in caret mode"),
])
def test_bind_print(self, commands, config_stub, message_mock,
key, mode, expected):
"""Run ':bind key'.
Should print the binding.
"""
config_stub.val.aliases = {'mib': 'message-info b'}
config_stub.val.bindings.default = {
'normal': {'a': 'message-info a',
'b': 'mib',
'<Ctrl+x>': 'message-info C-x'},
'caret': {'x': 'nop'}
}
config_stub.val.bindings.commands = {
'normal': {'c': 'message-info c'}
}
commands.bind(0, key, mode=mode)
msg = message_mock.getmsg(usertypes.MessageLevel.info)
assert msg.text == expected
@pytest.mark.parametrize('command, args, kwargs, expected', [
# :bind --mode=wrongmode a nop
('bind', ['a', 'nop'], {'mode': 'wrongmode'},
'Invalid mode wrongmode!'),
# :bind --mode=wrongmode a
('bind', ['a'], {'mode': 'wrongmode'},
'Invalid mode wrongmode!'),
# :bind --default --mode=wrongmode a
('bind', ['a'], {'mode': 'wrongmode', 'default': True},
'Invalid mode wrongmode!'),
# :bind --default foobar
('bind', ['foobar'], {'default': True},
"Can't find binding 'foobar' in normal mode"),
# :bind <blub> nop
('bind', ['<blub>', 'nop'], {},
"Could not parse '<blub>': Got invalid key!"),
# :unbind foobar
('unbind', ['foobar'], {},
"Can't find binding 'foobar' in normal mode"),
# :unbind --mode=wrongmode x
('unbind', ['x'], {'mode': 'wrongmode'},
'Invalid mode wrongmode!'),
# :unbind <blub>
('unbind', ['<blub>'], {},
"Could not parse '<blub>': Got invalid key!"),
])
def test_bind_invalid(self, commands,
command, args, kwargs, expected):
"""Run various wrong :bind/:unbind invocations.
Should show an error.
"""
if command == 'bind':
func = functools.partial(commands.bind, 0)
elif command == 'unbind':
func = commands.unbind
with pytest.raises(cmdutils.CommandError, match=expected):
func(*args, **kwargs)
@pytest.mark.parametrize('key', ['a', 'b', '<Ctrl-X>'])
def test_bind_duplicate(self, commands, config_stub, key_config_stub, key):
"""Run ':bind' with a key which already has been bound.'.
Also tests for https://github.com/qutebrowser/qutebrowser/issues/1544
"""
config_stub.val.bindings.default = {
'normal': {'a': 'nop', '<Ctrl+x>': 'nop'}
}
config_stub.val.bindings.commands = {
'normal': {'b': 'nop'},
}
commands.bind(0, key, 'message-info foo', mode='normal')
command = key_config_stub.get_command(keyseq(key), 'normal')
assert command == 'message-info foo'
def test_bind_none(self, commands, config_stub):
config_stub.val.bindings.commands = None
commands.bind(0, ',x', 'nop')
def test_bind_default(self, commands, key_config_stub, config_stub):
"""Bind a key to its default."""
default_cmd = 'message-info default'
bound_cmd = 'message-info bound'
config_stub.val.bindings.default = {'normal': {'a': default_cmd}}
config_stub.val.bindings.commands = {'normal': {'a': bound_cmd}}
command = key_config_stub.get_command(keyseq('a'), mode='normal')
assert command == bound_cmd
commands.bind(0, 'a', mode='normal', default=True)
command = key_config_stub.get_command(keyseq('a'), mode='normal')
assert command == default_cmd
def test_unbind_none(self, commands, config_stub):
config_stub.val.bindings.commands = None
commands.unbind('H')
@pytest.mark.parametrize('key, normalized', [
('a', 'a'), # default bindings
('b', 'b'), # custom bindings
('c', 'c'), # :bind then :unbind
('<Ctrl-X>', '<Ctrl+x>') # normalized special binding
])
def test_unbind(self, commands, key_config_stub, config_stub, yaml_value,
key, normalized):
config_stub.val.bindings.default = {
'normal': {'a': 'nop', '<ctrl+x>': 'nop'},
'caret': {'a': 'nop', '<ctrl+x>': 'nop'},
}
config_stub.val.bindings.commands = {
'normal': {'b': 'nop'},
'caret': {'b': 'nop'},
}
if key == 'c':
# Test :bind and :unbind
commands.bind(0, key, 'nop')
commands.unbind(key)
assert key_config_stub.get_command(keyseq(key), 'normal') is None
yaml_bindings = yaml_value('bindings.commands')['normal']
if key in 'bc':
# Custom binding
assert normalized not in yaml_bindings
else:
assert yaml_bindings[normalized] is None
|
gpl-3.0
|
Jgarcia-IAS/SITE
|
addons/account/wizard/account_statement_from_invoice.py
|
106
|
3626
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_name = "account.statement.from.invoice.lines"
_description = "Entries by Statement from Invoices"
_columns = {
'line_ids': fields.many2many('account.move.line', 'account_move_line_relation', 'move_id', 'line_id', 'Invoices'),
}
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
line_date = time.strftime('%Y-%m-%d')
statement = statement_obj.browse(cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.debit
elif line.credit > 0:
amount = -line.credit
if line.amount_currency:
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, line.amount_currency, context=ctx)
elif (line.invoice and line.invoice.currency_id.id != statement.currency.id):
amount = currency_obj.compute(cr, uid, line.invoice.currency_id.id,
statement.currency.id, amount, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
teichopsia-/conFusion
|
node_modules/browser-sync/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py
|
1788
|
1435
|
#!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
mit
|
tody411/ImageViewerFramework
|
ivf/ui/tool/drag_tool.py
|
1
|
1163
|
# -*- coding: utf-8 -*-
## @package npr_sfs.ui.tool.drag_tool
#
# npr_sfs.ui.tool.drag_tool utility package.
# @author tody
# @date 2015/10/27
import numpy as np
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from npr_sfs.ui.tool.base_tool import BaseTool
class DragTool(BaseTool):
outPoint = pyqtSignal(object)
## Constructor
def __init__(self):
super(DragTool, self).__init__()
self._drag_point = None
self._pen = QPen(QColor(255, 0, 0, 100))
self._pen.setWidth(2)
self._pen.setCapStyle(Qt.RoundCap)
def setPointSize(self, point_size):
self._pen.setWidth(point_size)
def setPoint(self, p):
self._drag_point = p
self._view.update()
def mouseMoveEvent(self, e):
if e.buttons() & Qt.LeftButton:
self._drag_point = self._mousePosition(e)
self.outPoint.emit(self._drag_point)
self._view.update()
def _overlayFunc(self, painter):
if self._drag_point is None:
return
painter.setPen(self._pen)
p = self._drag_point
painter.drawPoint(QPoint(p[0], p[1]))
|
mit
|
rockneurotiko/django
|
django/conf/urls/__init__.py
|
264
|
4592
|
import warnings
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import (
LocaleRegexURLResolver, RegexURLPattern, RegexURLResolver,
)
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'patterns', 'url']
handler400 = 'django.views.defaults.bad_request'
handler403 = 'django.views.defaults.permission_denied'
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if app_name and not namespace:
raise ValueError('Must specify a namespace if specifying app_name.')
if app_name:
warnings.warn(
'The app_name argument to django.conf.urls.include() is deprecated. '
'Set the app_name in the included URLconf instead.',
RemovedInDjango20Warning, stacklevel=2
)
if isinstance(arg, tuple):
# callable returning a namespace hint
try:
urlconf_module, app_name = arg
except ValueError:
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that provides a namespace'
)
warnings.warn(
'Passing a 3-tuple to django.conf.urls.include() is deprecated. '
'Pass a 2-tuple containing the list of patterns and app_name, '
'and provide the namespace argument to include() instead.',
RemovedInDjango20Warning, stacklevel=2
)
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
if isinstance(urlconf_module, six.string_types):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
app_name = getattr(urlconf_module, 'app_name', app_name)
if namespace and not app_name:
warnings.warn(
'Specifying a namespace in django.conf.urls.include() without '
'providing an app_name is deprecated. Set the app_name attribute '
'in the included module, or pass a 2-tuple containing the list of '
'patterns and app_name instead.',
RemovedInDjango20Warning, stacklevel=2
)
namespace = namespace or app_name
# Make sure we can iterate through the patterns (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
# Test if the LocaleRegexURLResolver is used within the include;
# this should throw an error since this is not allowed!
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.')
return (urlconf_module, app_name, namespace)
def patterns(prefix, *args):
warnings.warn(
'django.conf.urls.patterns() is deprecated and will be removed in '
'Django 1.10. Update your urlpatterns to be a list of '
'django.conf.urls.url() instances instead.',
RemovedInDjango110Warning, stacklevel=2
)
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
warnings.warn(
'Support for string view arguments to url() is deprecated and '
'will be removed in Django 1.10 (got %s). Pass the callable '
'instead.' % view,
RemovedInDjango110Warning, stacklevel=2
)
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
|
bsd-3-clause
|
poulp/flask-todolist
|
app/api/views.py
|
1
|
6050
|
# -*- coding: utf-8 -*-
from flask import jsonify, request, abort
from . import api
from ..models import User, Todo, TodoList
from ..decorators import admin_required
@api.route('/users/')
def get_users():
users = User.query.all()
return jsonify({
'users': [{'user': user.to_json()} for user in users]
})
@api.route('/user/<username>/')
def get_user(username):
user = User.query.filter_by(username=username).first()
if not user:
abort(404)
return jsonify({'user': user.to_json()})
@api.route('/user/', methods=['POST'])
def add_user():
try:
username = request.json.get('username')
email = request.json.get('email')
password = request.json.get('password')
if User.is_valid_username(username) and User.is_valid_email(email) \
and User.is_valid_password(password):
user = User(
username=username, email=email, password=password).save()
else:
abort(400)
except:
abort(400)
return jsonify({'user': user.to_json()}), 201
@api.route('/user/<username>/todolists/')
def get_user_todolists(username):
user = User.query.filter_by(username=username).first()
if not user:
abort(404)
todolists = user.todolists
return jsonify({
'todolists': [todolist.to_json() for todolist in todolists]
})
@api.route('/user/<username>/todolist/<int:todolist_id>/')
def get_user_todolist(username, todolist_id):
user = User.query.filter_by(username=username).first()
todolist = TodoList.query.get_or_404(todolist_id)
if not user or username != todolist.creator:
abort(404)
return jsonify({'todolist': todolist.to_json()})
@api.route('/user/<username>/todolist/', methods=['POST'])
def add_user_todolist(username):
try:
user = User.query.filter_by(username=username).one()
todolist = TodoList(title=request.json.get('title'),
creator=user.username).save()
except:
abort(400)
return jsonify({'todolist': todolist.to_json()}), 201
@api.route('/todolists/')
def get_todolists():
todolists = TodoList.query.all()
return jsonify({
'todolists': [todolist.to_json() for todolist in todolists]
})
@api.route('/todolist/<int:todolist_id>/')
def get_todolist(todolist_id):
todolist = TodoList.query.get_or_404(todolist_id)
return jsonify({'todolist': todolist.to_json()})
@api.route('/todolist/', methods=['POST'])
def add_todolist():
try:
title = request.json.get('title')
if title and TodoList.is_valid_title(title):
todolist = TodoList(title=title).save()
else:
abort(400)
except:
abort(400)
return jsonify({'todolist': todolist.to_json()}), 201
@api.route('/todolist/<int:todolist_id>/todos/')
def get_todolist_todos(todolist_id):
todolist = TodoList.query.get_or_404(todolist_id)
return jsonify({
'todos': [todo.to_json() for todo in todolist.todos]
})
@api.route('/user/<username>/todolist/<int:todolist_id>/todos/')
def get_user_todolist_todos(username, todolist_id):
todolist = TodoList.query.get_or_404(todolist_id)
if todolist.creator != username:
abort(404)
return jsonify({
'todos': [todo.to_json() for todo in todolist.todos]
})
@api.route('/user/<username>/todolist/<int:todolist_id>/', methods=['POST'])
def add_user_todolist_todo(username, todolist_id):
try:
user = User.query.filter_by(username=username).one()
# this way we check the existence of the todolist
todolist = TodoList.query.get(todolist_id)
todo = Todo(description=request.json.get('description'),
todolist_id=todolist.id, creator=username).save()
except:
abort(400)
return jsonify({'todo': todo.to_json()}), 201
@api.route('/todolist/<int:todolist_id>/', methods=['POST'])
def add_todolist_todo(todolist_id):
try:
todolist = TodoList.query.get(todolist_id)
todo = Todo(description=request.json.get('description'),
todolist_id=todolist.id).save()
except:
abort(400)
return jsonify({'todo': todo.to_json()}), 201
@api.route('/todo/<int:todo_id>/', methods=['PUT'])
def update_todo_status(todo_id):
try:
todo = Todo.query.get(todo_id)
if request.json.get('status') == 'finished':
todo.finished()
elif request.json.get('status') == 'reopen':
todo.reopen()
else:
abort(400)
except:
abort(400)
return jsonify({'todo': todo.to_json()})
@api.route('/todolist/<int:todolist_id>/', methods=['PUT'])
def change_todolist_title(todolist_id):
try:
todolist = TodoList.query.get(todolist_id)
title = request.json.get('title')
if TodoList.is_valid_title(title):
todolist.change_title(title)
else:
abort(400)
except:
abort(400)
return jsonify({'todolist': todolist.to_json()})
@api.route('/user/<int:user_id>/', methods=['DELETE'])
@admin_required
def delete_user(user_id):
try:
user = User.query.get(user_id)
if user_id == request.json.get('user_id'):
user.delete()
return jsonify()
else:
abort(400)
except:
abort(400)
@api.route('/todolist/<int:todolist_id>/', methods=['DELETE'])
@admin_required
def delete_todolist(todolist_id):
try:
todolist = TodoList.query.get(todolist_id)
if todolist_id == request.json.get('todolist_id'):
todolist.delete()
return jsonify()
else:
abort(400)
except:
abort(400)
@api.route('/todo/<int:todo_id>/', methods=['DELETE'])
@admin_required
def delete_todo(todo_id):
try:
todo = Todo.query.get(todo_id)
if todo_id == request.json.get('todo_id'):
todo.delete()
return jsonify()
else:
abort(400)
except:
abort(400)
|
mit
|
rohanp/Algorithm-Implementations
|
100_Doors_Problem/Python/kennyledet/100_Doors.py
|
25
|
2463
|
'''
Author: Kendrick Ledet 2014
Problem: You have 100 doors in a row that are all initially closed. You make 100 passes by the doors.
The first time through, you visit every door and toggle the door (if the door is closed, you open it; if it is open, you close it).
The second time you only visit every 2nd door (door #2, #4, #6, ...). The third time, every 3rd door (door #3, #6, #9, ...),
etc, until you only visit the 100th door.
Question: What state are the doors in after the last pass? Which are open, which are closed?
Note: The only doors that remain open are whose numbers are perfect squares of integers. Opening only those doors is an optimization that may also be expressed.
See: http://rosettacode.org/wiki/100_doors
'''
import math
def test_doors(doors):
'''Check that all open door numbers are perfect squares'''
for i, door in enumerate(doors):
door_num = i + 1
if door and math.sqrt(door_num) % 1 > 0: # if door open and door number is not a perfect square
return False
return True
def print_doors(doors):
for door_num, door in enumerate(doors):
print "Door #{0}: {1}".format(door_num+1, "Open" if door else "Closed")
def pass_doors(doors):
for pass_num in xrange(1, 101): # Make 100 passes
current_door = pass_num-1 # Start current door offset at the current pass number ('-1' to account for lists/arrays starting count at 0)
while current_door <= 99:
doors[current_door] = not doors[current_door] # Open door if close, close if open (negate old value using 'not')
current_door += pass_num # Increment current door number by current pass number
return doors
pass_doors_optimized = lambda doors: (0 if math.sqrt(door_num+1) % 1 > 0 else 1 for door_num, door in enumerate(doors))
def main():
# Create list of 100 elements initialized to 0 to represent all doors being closed.
doors = [0 for x in xrange(0, 100)]
# Run algorithm
doors = pass_doors(doors)
# Print final door states
print_doors(doors)
# Test algorithm
result = test_doors(doors)
print "Algorithm has {0}".format("passed" if result else "failed")
# Run optimized algorithm
doors = pass_doors_optimized(doors)
print_doors(doors)
# Test optimized algorithm
result = test_doors(doors)
print "Optimized algorithm has {0}".format("passed" if result else "failed")
if __name__ == "__main__":
main()
|
mit
|
kisonecat/sequences-and-series
|
logo/scene.py
|
3
|
3875
|
#! /usr/bin/python
import random
header="""
#version 3.7;
global_settings {
assumed_gamma 1
radiosity{
pretrace_start 0.08
pretrace_end 0.001
count 150
error_bound 0.2
nearest_count 20
recursion_limit 2
brightness 1
}
}
camera {
right x*600/340
location 1.5*<-3.2, 2.2, -3.4>
look_at <3.3, 0, 0>
angle 60
}
"""
plain_plane="""
plane {
y, 0
pigment {
color rgb <1,1,1>
}
finish { ambient 0 diffuse 0.8 }
}
"""
plane="""
plane {
y, 0
#declare BlurAmount = 0.15;
#declare BlurSamples = 20;
texture {
average
texture_map
{
#declare Ind = 0;
#declare S = seed(0);
#while(Ind < BlurSamples)
[1
pigment { color rgb 1.25 }
finish {
reflection {0.2, 1.0 fresnel on}
diffuse 0.7 ambient 0
}
normal {
bumps BlurAmount
translate <rand(S),rand(S),rand(S)>*10
scale 1000
}
]
#declare Ind = Ind+1;
#end
}
}
interior { ior 1.5 }
}
"""
sky="""
sky_sphere {
pigment {
gradient x
color_map {
[0.4 color rgb < 0.700, 0.715, 1.000>*0.5]
[0.85 color rgb < 1.500, 1.400, 1.250>*1.7]
}
scale 2
translate -x
rotate 20*y
}
}
"""
box="""
box {
0, <0.3, 0.3, 0.3>
texture {
pigment {
color rgb <0.9, 0.9, 0.9>
}
finish { ambient 0 diffuse 0.6 }
}
rotate<0, -30, 0>
}
"""
print(header)
print(plain_plane)
print(sky)
block_size = 1.0
block_spacing = 1.35
block_label = """
#declare block_{label} = union {{
box {{
0.5*<-1,0,-1>, 0.5*<1,2,1>
}}
text {{
ttf "cmunbsr.ttf" "{label}" 1, 0
translate <-{shift},0.2,0>
translate <0,0,-0.501>
texture {{
pigment {{
color rgb <20,20,20>/255
}}
finish {{ ambient 0 diffuse 0.8 }}
}}
}}
texture {{
pigment {{
color rgb <230,230,230>/255
}}
finish {{ ambient 0 diffuse 0.8 }} }}
}};
"""
print(block_label.format(label=1,shift=0.175))
print(block_label.format(label=2,shift=0.2))
sequence = '12'
for reps in range(0,15):
new_sequence = ''
flip_flop = 0
for x in sequence:
if x == '1':
new_sequence = new_sequence + str(flip_flop+1)
if x == '2':
new_sequence = new_sequence + str(flip_flop+1) + str(flip_flop+1)
flip_flop = 1 - flip_flop
sequence = new_sequence
random.seed(0)
x = -3.0
z = 0.0
for index in range(0,len(sequence)):
x = x + block_spacing + random.uniform( -0.03, 0.03 )
z = z + random.uniform( -0.01, 0.01 )
print("""
object {{ block_{label} rotate y*{angle} translate <{x},0,{z}> }}
""".format(x=x,z=z, label=sequence[index], angle = random.uniform(4,10)))
#print(box)
slab_thickness = 0.036
slab_width = 1.9
print("""
#declare slab = box {{
0, <{width} {thickness}, 1.0>
}};
""".format(width=slab_width, thickness=slab_thickness))
total = 80
print("""union {""")
x = - 3.0
for index in range(0,total):
# for i in range(0,index):
# x = x + slab_width/(2*(total - i - 1))
if index > 0:
x = x + slab_width / (2*(total - index))
x = x + (slab_width / (2*(total - index))) * random.uniform(-0.01,0)
z = 0
y = index * slab_thickness
z = z + random.uniform(-0.03,0.03)
red = random.uniform(0.65,0.95)
green = random.uniform(0.65,0.95)
blue = random.uniform(0.65,0.95)
print("""
object {{ slab rotate y*{angle} translate <{x},{y},{z}>
texture {{
pigment {{
color rgb <{red},{green},{blue}>
}}
finish {{ ambient 0 diffuse 0.8 }}
}}
}}
""".format(x=x,y=y, z=z, red=red, green=green, blue=blue, angle = random.uniform(-1.5,1.5), color= random.uniform(0.75,0.85)))
print("""rotate 270*y """)
print("""translate <1.7,0,-2.0> """)
print("""}""")
|
gpl-3.0
|
postlund/home-assistant
|
tests/components/simulated/test_sensor.py
|
10
|
1807
|
"""The tests for the simulated sensor."""
import unittest
from homeassistant.components.simulated.sensor import (
CONF_AMP,
CONF_FWHM,
CONF_MEAN,
CONF_PERIOD,
CONF_PHASE,
CONF_RELATIVE_TO_EPOCH,
CONF_SEED,
CONF_UNIT,
DEFAULT_AMP,
DEFAULT_FWHM,
DEFAULT_MEAN,
DEFAULT_NAME,
DEFAULT_PHASE,
DEFAULT_RELATIVE_TO_EPOCH,
DEFAULT_SEED,
)
from homeassistant.const import CONF_FRIENDLY_NAME
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class TestSimulatedSensor(unittest.TestCase):
"""Test the simulated sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_default_config(self):
"""Test default config."""
config = {"sensor": {"platform": "simulated"}}
assert setup_component(self.hass, "sensor", config)
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get("sensor.simulated")
assert state.attributes.get(CONF_FRIENDLY_NAME) == DEFAULT_NAME
assert state.attributes.get(CONF_AMP) == DEFAULT_AMP
assert state.attributes.get(CONF_UNIT) is None
assert state.attributes.get(CONF_MEAN) == DEFAULT_MEAN
assert state.attributes.get(CONF_PERIOD) == 60.0
assert state.attributes.get(CONF_PHASE) == DEFAULT_PHASE
assert state.attributes.get(CONF_FWHM) == DEFAULT_FWHM
assert state.attributes.get(CONF_SEED) == DEFAULT_SEED
assert state.attributes.get(CONF_RELATIVE_TO_EPOCH) == DEFAULT_RELATIVE_TO_EPOCH
|
apache-2.0
|
muraliselva10/designate
|
designate/api/v1/extensions/sync.py
|
7
|
1759
|
# Copyright 2012 Hewlett-Packard Development Company, L.P. All Rights Reserved.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from oslo_log import log as logging
from designate.central import rpcapi as central_rpcapi
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
blueprint = flask.Blueprint('sync', __name__)
@blueprint.route('/domains/sync', methods=['POST'])
def sync_domains():
context = flask.request.environ.get('context')
central_api.sync_domains(context)
return flask.Response(status=200)
@blueprint.route('/domains/<uuid:domain_id>/sync', methods=['POST'])
def sync_domain(domain_id):
context = flask.request.environ.get('context')
central_api.sync_domain(context, domain_id)
return flask.Response(status=200)
@blueprint.route('/domains/<uuid:domain_id>/records/<uuid:record_id>/sync',
methods=['POST'])
def sync_record(domain_id, record_id):
context = flask.request.environ.get('context')
record = central_api.find_record(context, {'id': record_id})
central_api.sync_record(context, domain_id, record['recordset_id'],
record_id)
return flask.Response(status=200)
|
apache-2.0
|
tylertian/Openstack
|
openstack F/python-keystoneclient/tests/v2_0/test_service_catalog.py
|
1
|
3069
|
import copy
from keystoneclient import access
from keystoneclient import exceptions
from tests import utils
from tests.v2_0 import client_fixtures
class ServiceCatalogTest(utils.TestCase):
def setUp(self):
super(ServiceCatalogTest, self).setUp()
self.AUTH_RESPONSE_BODY = client_fixtures.AUTH_RESPONSE_BODY
def test_building_a_service_catalog(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
self.assertEquals(sc.url_for(service_type='compute'),
"https://compute.north.host/v1/1234")
self.assertEquals(sc.url_for('tenantId', '1', service_type='compute'),
"https://compute.north.host/v1/1234")
self.assertEquals(sc.url_for('tenantId', '2', service_type='compute'),
"https://compute.north.host/v1.1/3456")
self.assertRaises(exceptions.EndpointNotFound, sc.url_for, "region",
"South", service_type='compute')
def test_service_catalog_endpoints(self):
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
public_ep = sc.get_endpoints(service_type='compute',
endpoint_type='publicURL')
self.assertEquals(public_ep['compute'][1]['tenantId'], '2')
self.assertEquals(public_ep['compute'][1]['versionId'], '1.1')
self.assertEquals(public_ep['compute'][1]['internalURL'],
"https://compute.north.host/v1.1/3456")
def test_service_catalog_regions(self):
self.AUTH_RESPONSE_BODY['access']['region_name'] = "North"
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', endpoint_type='publicURL')
self.assertEquals(url, "https://image.north.host/v1/")
self.AUTH_RESPONSE_BODY['access']['region_name'] = "South"
auth_ref = access.AccessInfo.factory(None, self.AUTH_RESPONSE_BODY)
sc = auth_ref.service_catalog
url = sc.url_for(service_type='image', endpoint_type='internalURL')
self.assertEquals(url, "https://image-internal.south.host/v1/")
def test_service_catalog_empty(self):
# We need to do a copy.deepcopy here since
# dict(self.AUTH_RESPONSE_BODY) or self.AUTH_RESPONSE_BODY.copy() will
# only do a shadowcopy and sc_empty['token']['catalog'] will still be a
# reference to self.AUTH_RESPONSE_BODY so setting it to empty will fail
# the other tests that needs a service catalog.
sc_empty = copy.deepcopy(self.AUTH_RESPONSE_BODY)
sc_empty['access']['serviceCatalog'] = []
auth_ref = access.AccessInfo.factory(None, sc_empty)
self.assertRaises(exceptions.EmptyCatalog,
auth_ref.service_catalog.url_for,
service_type='image',
endpoint_type='internalURL')
|
apache-2.0
|
zhujzhuo/Sahara
|
sahara/plugins/mapr/services/mysql/mysql.py
|
5
|
8869
|
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections as c
from oslo_log import log as logging
import six
import sahara.plugins.mapr.domain.configuration_file as cf
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.services.hive.hive as hive
from sahara.plugins.mapr.services.spark import spark
import sahara.plugins.mapr.util.general as g
import sahara.utils.files as f
LOG = logging.getLogger(__name__)
db_spec = c.namedtuple('DatabaseSpec', ['db_name', 'user', 'password'])
@six.add_metaclass(s.Single)
class MySQL(s.Service):
METRICS_SPECS = db_spec('metrics', 'maprmetrics', 'mapr')
HUE_SPECS = db_spec('hue', 'maprhue', 'mapr')
METASTORE_SPECS = db_spec('metastore', 'maprmetastore', 'mapr')
RDBMS_SPECS = db_spec('rdbms', 'maprrdbms', 'mapr')
OOZIE_SPECS = db_spec('oozie', 'maproozie', 'mapr')
SELECT_DATA = 'mysql -uroot --skip-column-names -e "%s"| grep -E "\w+"'
GET_DBS_LIST = SELECT_DATA % 'SHOW DATABASES'
GET_USERS_HOSTS = (
SELECT_DATA % "SELECT Host FROM mysql.user WHERE mysql.user.User='%s'"
)
SCHEMA_PATH = (
'/opt/mapr/hive/hive-{0}/scripts/metastore/upgrade/mysql/'
'hive-schema-{0}.0.mysql.sql')
DRIVER_CLASS = 'com.mysql.jdbc.Driver'
MYSQL_SERVER_PORT = 3306
MYSQL_INSTALL_SCRIPT = 'plugins/mapr/resources/install_mysql.sh'
INSTALL_PACKAGES_TIMEOUT = 1800
def __init__(self):
super(MySQL, self).__init__()
self._ui_name = 'MySQL'
@staticmethod
def _get_db_daemon_name(distro):
if distro.lower() == 'ubuntu':
return 'mysql'
if distro.lower() in ['centos', 'redhatenterpriseserver', 'suse']:
return 'mysqld'
return None
@staticmethod
def _execute_script(instance, script_path, script_text=None,
user='root', password=None):
with instance.remote() as r:
if script_text:
r.write_file_to(script_path, script_text, run_as_root=True)
LOG.debug('Executing SQL script {path}'.format(path=script_path))
r.execute_command(("mysql %s %s < %s" %
('-u' + user if user else '',
'-p' + password if password else '',
script_path)),
run_as_root=True)
@staticmethod
def _create_service_db(instance, specs):
f_name = 'create_db_%s.sql' % specs.db_name
script = MySQL._create_script_obj(f_name, 'create_database.sql',
db_name=specs.db_name,
user=specs.user,
password=specs.password)
MySQL._execute_script(instance, script.remote_path, script.render())
@staticmethod
def _create_metrics_db(instance, databases, instances):
if MySQL.METRICS_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.METRICS_SPECS)
MySQL._execute_script(instance=instance,
script_path='/opt/mapr/bin/setup.sql')
MySQL._grant_access(instance, MySQL.METRICS_SPECS, instances)
@staticmethod
def _create_hue_db(instance, databases, instances):
if MySQL.HUE_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.HUE_SPECS)
MySQL._grant_access(instance, MySQL.HUE_SPECS, instances)
@staticmethod
def _create_rdbms_db(instance, databases, instances):
if MySQL.RDBMS_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.RDBMS_SPECS)
MySQL._grant_access(instance, MySQL.RDBMS_SPECS, instances)
@staticmethod
def _create_metastore_db(instance, cluster_context, databases, instances):
hive_meta = cluster_context.get_instance(hive.HIVE_METASTORE)
if not hive_meta:
return
db_name = MySQL.METASTORE_SPECS.db_name
if db_name not in databases:
MySQL._create_service_db(instance, MySQL.METASTORE_SPECS)
MySQL._grant_access(instance, MySQL.METASTORE_SPECS, instances)
with hive_meta.remote() as r:
hive_serv = cluster_context.get_service(hive.HIVE_METASTORE)
schema_path = MySQL.SCHEMA_PATH.format(hive_serv.version)
script = MySQL._create_script_obj('hive_schema.sql',
'hive_schema.sql',
db_name=db_name,
path=schema_path)
r.write_file_to(script.remote_path, script.render())
args = {
'user': MySQL.METASTORE_SPECS.user,
'password': MySQL.METASTORE_SPECS.password,
'host': instance.management_ip,
'path': script.remote_path
}
cmd = 'mysql -h{host} -u{user} -p{password} < {path}'
r.execute_command(cmd.format(**args), run_as_root=True)
else:
MySQL._grant_access(instance, MySQL.METASTORE_SPECS, instances)
@staticmethod
def _create_oozie_db(instance, databases, instances):
if MySQL.OOZIE_SPECS.db_name not in databases:
MySQL._create_service_db(instance, MySQL.OOZIE_SPECS)
MySQL._grant_access(instance, MySQL.OOZIE_SPECS, instances)
@staticmethod
def start_mysql_server(cluster_context):
LOG.debug('Starting MySQL Server')
instance = MySQL.get_db_instance(cluster_context)
distro = cluster_context.distro
with instance.remote() as r:
r.execute_command(('service %s restart' %
MySQL._get_db_daemon_name(distro.name)),
run_as_root=True)
LOG.debug('MySQL Server successfully started')
@staticmethod
def get_databases_list(db_instance):
with db_instance.remote() as r:
ec, out = r.execute_command(MySQL.GET_DBS_LIST)
if out:
return out.splitlines()
return list()
@staticmethod
def get_user_hosts(db_instance, username):
with db_instance.remote() as r:
ec, out = r.execute_command(MySQL.GET_USERS_HOSTS % username)
if out:
return out.splitlines()
return list()
@staticmethod
def get_db_instance(context):
return context.oozie_server or context.get_instance(spark.SPARK_MASTER)
@staticmethod
def create_databases(cluster_context, instances):
db_instance = MySQL.get_db_instance(cluster_context)
databases = MySQL.get_databases_list(db_instance)
MySQL._create_metrics_db(db_instance, databases, instances)
MySQL._create_hue_db(db_instance, databases, instances)
MySQL._create_rdbms_db(db_instance, databases, instances)
MySQL._create_oozie_db(db_instance, databases, instances)
MySQL._create_metastore_db(
db_instance, cluster_context, databases, instances)
@staticmethod
def _create_script_obj(filename, template, **kwargs):
script = cf.TemplateFile(filename)
script.remote_path = '/tmp/'
script.parse(f.get_file_text(
'plugins/mapr/services/mysql/resources/%s' % template))
for k, v in six.iteritems(kwargs):
script.add_property(k, v)
return script
@staticmethod
def _grant_access(instance, specs, instances):
f_name = 'grant_access_%s.sql' % specs.db_name
ips = [i.management_ip for i in instances]
user_hosts = MySQL.get_user_hosts(instance, specs.user)
script = MySQL._create_script_obj(f_name, 'grant_access.sql',
hosts=set(ips)-set(user_hosts),
db_name=specs.db_name,
user=specs.user,
password=specs.password)
MySQL._execute_script(instance, script.remote_path, script.render())
@staticmethod
def install_mysql(instance, distro_name):
g.run_script(instance, MySQL.MYSQL_INSTALL_SCRIPT, 'root', distro_name)
|
apache-2.0
|
cancro7/gem5
|
src/mem/HMCController.py
|
25
|
3819
|
# Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2015 The University of Bologna
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Erfan Azarkhish
from m5.params import *
from XBar import *
# References:
# [1] http://www.open-silicon.com/open-silicon-ips/hmc/
# [2] Ahn, J.; Yoo, S.; Choi, K., "Low-Power Hybrid Memory Cubes With Link
# Power Management and Two-Level Prefetching," TVLSI 2015
# The HMCController class highlights the fact that a component is required
# between host and HMC to convert the host protocol (AXI for example) to the
# serial links protocol. Moreover, this component should have large internal
# queueing to hide the access latency of the HMC.
# Plus, this controller can implement more advanced global scheduling policies
# and can reorder and steer transactions if required. A good example of such
# component is available in [1].
# Also in [2] there is a similar component which is connected to all serial
# links, and it schedules the requests to the ones which are not busy.
# These two references clarify two things:
# 1. The serial links support the same address range and packets can travel
# over any of them.
# 2. One host can be connected to more than 1 serial link simply to achieve
# higher bandwidth, and not for any other reason.
# In this model, we have used a round-robin counter, because it is the
# simplest way to schedule packets over the non-busy serial links. However,
# more advanced scheduling algorithms are possible and even host can dedicate
# each serial link to a portion of the address space and interleave packets
# over them. Yet in this model, we have not made any such assumptions on the
# address space.
class HMCController(NoncoherentXBar):
type = 'HMCController'
cxx_header = "mem/hmc_controller.hh"
|
bsd-3-clause
|
yocome/zulip
|
zerver/filters.py
|
124
|
1058
|
from __future__ import absolute_import
from django.views.debug import SafeExceptionReporterFilter
from django.http import build_request_repr
class ZulipExceptionReporterFilter(SafeExceptionReporterFilter):
def get_post_parameters(self, request):
filtered_post = SafeExceptionReporterFilter.get_post_parameters(self, request).copy()
filtered_vars = ['content', 'secret', 'password', 'key', 'api-key', 'subject', 'stream',
'subscriptions', 'to', 'csrfmiddlewaretoken', 'api_key']
for var in filtered_vars:
if var in filtered_post:
filtered_post[var] = '**********'
return filtered_post
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request,
POST_override=self.get_post_parameters(request),
COOKIES_override="**********",
META_override="**********")
|
apache-2.0
|
emergence/suds-philpem
|
suds/cache.py
|
1
|
8967
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Contains basic caching classes.
"""
import os
import suds
from tempfile import gettempdir as tmp
from suds.transport import *
from suds.sax.parser import Parser
from suds.sax.element import Element
from datetime import datetime as dt
from datetime import timedelta
from cStringIO import StringIO
from logging import getLogger
try:
import cPickle as pickle
except:
import pickle
log = getLogger(__name__)
class Cache:
"""
An object object cache.
"""
def get(self, id):
"""
Get a object from the cache by ID.
@param id: The object ID.
@type id: str
@return: The object, else None
@rtype: any
"""
raise Exception('not-implemented')
def getf(self, id):
"""
Get a object from the cache by ID.
@param id: The object ID.
@type id: str
@return: The object, else None
@rtype: any
"""
raise Exception('not-implemented')
def put(self, id, object):
"""
Put a object into the cache.
@param id: The object ID.
@type id: str
@param object: The object to add.
@type object: any
"""
raise Exception('not-implemented')
def putf(self, id, fp):
"""
Write a fp into the cache.
@param id: The object ID.
@type id: str
@param fp: File pointer.
@type fp: file-like object.
"""
raise Exception('not-implemented')
def purge(self, id):
"""
Purge a object from the cache by id.
@param id: A object ID.
@type id: str
"""
raise Exception('not-implemented')
def clear(self):
"""
Clear all objects from the cache.
"""
raise Exception('not-implemented')
class NoCache(Cache):
"""
The passthru object cache.
"""
def get(self, id):
return None
def getf(self, id):
return None
def put(self, id, object):
pass
def putf(self, id, fp):
pass
class FileCache(Cache):
"""
A file-based URL cache.
@cvar fnprefix: The file name prefix.
@type fnsuffix: str
@ivar duration: The cached file duration which defines how
long the file will be cached.
@type duration: (unit, value)
@ivar location: The directory for the cached files.
@type location: str
"""
fnprefix = 'suds'
units = ('months', 'weeks', 'days', 'hours', 'minutes', 'seconds')
def __init__(self, location=None, **duration):
"""
@param location: The directory for the cached files.
@type location: str
@param duration: The cached file duration which defines how
long the file will be cached. A duration=0 means forever.
The duration may be: (months|weeks|days|hours|minutes|seconds).
@type duration: {unit:value}
"""
if location is None:
location = os.path.join(tmp(), 'suds')
self.location = location
self.duration = (None, 0)
self.setduration(**duration)
self.checkversion()
def fnsuffix(self):
"""
Get the file name suffix
@return: The suffix
@rtype: str
"""
return 'gcf'
def setduration(self, **duration):
"""
Set the caching duration which defines how long the
file will be cached.
@param duration: The cached file duration which defines how
long the file will be cached. A duration=0 means forever.
The duration may be: (months|weeks|days|hours|minutes|seconds).
@type duration: {unit:value}
"""
if len(duration) == 1:
arg = duration.items()[0]
if not arg[0] in self.units:
raise Exception('must be: %s' % str(self.units))
self.duration = arg
return self
def setlocation(self, location):
"""
Set the location (directory) for the cached files.
@param location: The directory for the cached files.
@type location: str
"""
self.location = location
def mktmp(self):
"""
Make the I{location} directory if it doesn't already exits.
"""
try:
if not os.path.isdir(self.location):
os.makedirs(self.location)
except:
log.debug(self.location, exc_info=1)
return self
def put(self, id, bfr):
try:
fn = self.__fn(id)
f = self.open(fn, 'w')
f.write(str(bfr))
f.close()
return bfr
except:
log.debug(id, exc_info=1)
return bfr
def putf(self, id, fp):
try:
fn = self.__fn(id)
f = self.open(fn, 'w')
f.write(fp.read())
fp.close()
f.close()
return open(fn)
except:
log.debug(id, exc_info=1)
return fp
def get(self, id):
try:
f = self.getf(id)
bfr = f.read()
f.close()
return bfr
except:
pass
def getf(self, id):
try:
fn = self.__fn(id)
self.validate(fn)
return self.open(fn)
except:
pass
def validate(self, fn):
"""
Validate that the file has not expired based on the I{duration}.
@param fn: The file name.
@type fn: str
"""
if self.duration[1] < 1:
return
created = dt.fromtimestamp(os.path.getctime(fn))
d = { self.duration[0]:self.duration[1] }
expired = created+timedelta(**d)
if expired < dt.now():
log.debug('%s expired, deleted', fn)
os.remove(fn)
def clear(self):
for fn in os.listdir(self.location):
path = os.path.join(self.location, fn)
if os.path.isdir(path):
continue
if fn.startswith(self.fnprefix):
os.remove(path)
log.debug('deleted: %s', path)
def purge(self, id):
fn = self.__fn(id)
try:
os.remove(fn)
except:
pass
def open(self, fn, *args):
"""
Open the cache file making sure the directory is created.
"""
self.mktmp()
return open(fn, *args)
def checkversion(self):
path = os.path.join(self.location, 'version')
try:
f = self.open(path)
version = f.read()
f.close()
if version != suds.__version__:
raise Exception()
except:
self.clear()
f = self.open(path, 'w')
f.write(suds.__version__)
f.close()
def __fn(self, id):
name = id
suffix = self.fnsuffix()
fn = '%s-%s.%s' % (self.fnprefix, name, suffix)
return os.path.join(self.location, fn)
class DocumentCache(FileCache):
"""
Provides xml document caching.
"""
def fnsuffix(self):
return 'xml'
def get(self, id):
try:
fp = FileCache.getf(self, id)
if fp is None:
return None
p = Parser()
return p.parse(fp)
except:
FileCache.purge(self, id)
def put(self, id, object):
if isinstance(object, Element):
FileCache.put(self, id, str(object))
return object
class ObjectCache(FileCache):
"""
Provides pickled object caching.
@cvar protocol: The pickling protocol.
@type protocol: int
"""
protocol = 2
def fnsuffix(self):
return 'px'
def get(self, id):
try:
fp = FileCache.getf(self, id)
if fp is None:
return None
else:
return pickle.load(fp)
except:
FileCache.purge(self, id)
def put(self, id, object):
bfr = pickle.dumps(object, self.protocol)
FileCache.put(self, id, bfr)
return object
|
lgpl-3.0
|
galtys/odoo
|
addons/mail/ir_attachment.py
|
378
|
5643
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import os.path
class IrAttachment(osv.Model):
""" Update partner to add a field about notification preferences """
_name = "ir.attachment"
_inherit = 'ir.attachment'
_fileext_to_type = {
'7z': 'archive',
'aac': 'audio',
'ace': 'archive',
'ai': 'vector',
'aiff': 'audio',
'apk': 'archive',
'app': 'binary',
'as': 'script',
'asf': 'video',
'ass': 'text',
'avi': 'video',
'bat': 'script',
'bin': 'binary',
'bmp': 'image',
'bzip2': 'archive',
'c': 'script',
'cab': 'archive',
'cc': 'script',
'ccd': 'disk',
'cdi': 'disk',
'cdr': 'vector',
'cer': 'certificate',
'cgm': 'vector',
'cmd': 'script',
'coffee': 'script',
'com': 'binary',
'cpp': 'script',
'crl': 'certificate',
'crt': 'certificate',
'cs': 'script',
'csr': 'certificate',
'css': 'html',
'csv': 'spreadsheet',
'cue': 'disk',
'd': 'script',
'dds': 'image',
'deb': 'archive',
'der': 'certificate',
'djvu': 'image',
'dmg': 'archive',
'dng': 'image',
'doc': 'document',
'docx': 'document',
'dvi': 'print',
'eot': 'font',
'eps': 'vector',
'exe': 'binary',
'exr': 'image',
'flac': 'audio',
'flv': 'video',
'gif': 'webimage',
'gz': 'archive',
'gzip': 'archive',
'h': 'script',
'htm': 'html',
'html': 'html',
'ico': 'image',
'icon': 'image',
'img': 'disk',
'iso': 'disk',
'jar': 'archive',
'java': 'script',
'jp2': 'image',
'jpe': 'webimage',
'jpeg': 'webimage',
'jpg': 'webimage',
'jpx': 'image',
'js': 'script',
'key': 'presentation',
'keynote': 'presentation',
'lisp': 'script',
'lz': 'archive',
'lzip': 'archive',
'm': 'script',
'm4a': 'audio',
'm4v': 'video',
'mds': 'disk',
'mdx': 'disk',
'mid': 'audio',
'midi': 'audio',
'mkv': 'video',
'mng': 'image',
'mp2': 'audio',
'mp3': 'audio',
'mp4': 'video',
'mpe': 'video',
'mpeg': 'video',
'mpg': 'video',
'nrg': 'disk',
'numbers': 'spreadsheet',
'odg': 'vector',
'odm': 'document',
'odp': 'presentation',
'ods': 'spreadsheet',
'odt': 'document',
'ogg': 'audio',
'ogm': 'video',
'otf': 'font',
'p12': 'certificate',
'pak': 'archive',
'pbm': 'image',
'pdf': 'print',
'pem': 'certificate',
'pfx': 'certificate',
'pgf': 'image',
'pgm': 'image',
'pk3': 'archive',
'pk4': 'archive',
'pl': 'script',
'png': 'webimage',
'pnm': 'image',
'ppm': 'image',
'pps': 'presentation',
'ppt': 'presentation',
'ps': 'print',
'psd': 'image',
'psp': 'image',
'py': 'script',
'r': 'script',
'ra': 'audio',
'rar': 'archive',
'rb': 'script',
'rpm': 'archive',
'rtf': 'text',
'sh': 'script',
'sub': 'disk',
'svg': 'vector',
'sxc': 'spreadsheet',
'sxd': 'vector',
'tar': 'archive',
'tga': 'image',
'tif': 'image',
'tiff': 'image',
'ttf': 'font',
'txt': 'text',
'vbs': 'script',
'vc': 'spreadsheet',
'vml': 'vector',
'wav': 'audio',
'webp': 'image',
'wma': 'audio',
'wmv': 'video',
'woff': 'font',
'xar': 'vector',
'xbm': 'image',
'xcf': 'image',
'xhtml': 'html',
'xls': 'spreadsheet',
'xlsx': 'spreadsheet',
'xml': 'html',
'zip': 'archive'
}
def get_attachment_type(self, cr, uid, ids, name, args, context=None):
result = {}
for attachment in self.browse(cr, uid, ids, context=context):
fileext = os.path.splitext(attachment.datas_fname or '')[1].lower()[1:]
result[attachment.id] = self._fileext_to_type.get(fileext, 'unknown')
return result
_columns = {
'file_type_icon': fields.function(get_attachment_type, type='char', string='File Type Icon'),
'file_type': fields.related('file_type_icon', type='char'), # FIXME remove in trunk
}
|
agpl-3.0
|
google-research/tapas
|
tapas/utils/tabfact_utils.py
|
1
|
6847
|
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding=utf-8
r"""Utils for converting WTQ to SQA format."""
import collections
import csv
import json
import os
import re
from typing import Any, Iterable, List, Mapping, Set, Text, Tuple
from absl import logging
import six
from tapas.protos import interaction_pb2
import tensorflow.compat.v1 as tf
_TABLE_DIR_NAME = 'table_csv' # Name that the table folder has in SQA.
FEATURE_ALL = 'ALL'
FEATURE_TRUE = 'TRUE'
FEATURE_FALSE = 'FALSE'
FEATURE_SUPERLATIVE = 'SUPER'
FEATURE_COMPARATIVE = 'COMP'
FEATURE_NEGATIVE = 'NEG'
FEATURE_PERCENTAGE = 'PERC'
FEATURE_AGGREGATION = 'AGG'
_SUPERLATIVE_WORDS = {
'lowest', 'highest', 'best', 'worst', 'least', 'most', 'oldest', 'newest',
'fewest', 'last', 'first', 'earliest', 'latest', 'greatest', 'smallest'
}
_COMPARATIVE_WORDS = {
'than', 'less', 'more', 'better', 'worse', 'higher', 'lower', 'shorter',
'same'
}
_NEGATIVE_WORDS = {'not', 'any', 'none', 'no', 'never'}
_AGGREGATION_WORDS = {
'only', 'there', 'total', 'count', 'average', 'sum', 'amount'
}
_PERCENTAGE_RE = re.compile(r'\d[\d.]* ?%')
_DEFAULT_FEATURE_LIST = [
FEATURE_TRUE, FEATURE_FALSE, FEATURE_SUPERLATIVE, FEATURE_COMPARATIVE,
FEATURE_NEGATIVE, FEATURE_PERCENTAGE, FEATURE_AGGREGATION
]
_ExperimentMetric = Mapping[Text, Iterable[Tuple[Text, float]]]
def get_features_single(question, label):
"""Add a feature bag of words features as well as some hard-coded ones."""
tokens = question.split()
features = set(tokens)
features.add(FEATURE_ALL)
features.add(FEATURE_TRUE if label else FEATURE_FALSE)
if features & _NEGATIVE_WORDS:
features.add(FEATURE_NEGATIVE)
if features & _SUPERLATIVE_WORDS:
features.add(FEATURE_SUPERLATIVE)
if features & _COMPARATIVE_WORDS:
features.add(FEATURE_COMPARATIVE)
if features & _AGGREGATION_WORDS:
features.add(FEATURE_AGGREGATION)
if re.search(_PERCENTAGE_RE, question):
features.add(FEATURE_PERCENTAGE)
return features, label
def get_features(questions):
return [
get_features_single(question, label)
for question, label in zip(questions[0], questions[1])
]
def get_interesting_features(accuracy,
heardoom,
num_features):
"""Find the most intersting features in terms of headroom and saliency."""
def _get_feature_score(feature):
# Compute a heauristic to rank features according to both the headroom but
# also the error rate in that particular group compared to baseline.
metrics = zip(accuracy[FEATURE_ALL], accuracy[feature], heardoom[feature])
return max(exp_room * (overall_acc - exp_acc)
for (_, overall_acc), (_, exp_acc), (_, exp_room) in metrics)
non_default_features = (
feature for feature in accuracy if feature not in _DEFAULT_FEATURE_LIST)
results = sorted(non_default_features, key=_get_feature_score, reverse=True)
return (_DEFAULT_FEATURE_LIST + list(results))[:num_features]
def _log_stats(counter, file_name):
"""Logs counters to file."""
logging.info('-------- STATS for: %s --------', file_name)
logging.info('Questions: %d', counter['questions'])
logging.info('----------------')
def _convert_data(
all_questions,
input_file,
tables,
):
"""Converts TabFact data to interactions format."""
logging.info('Converting data from: %s...', input_file)
counter = collections.Counter() # Counter for stats.
with tf.io.gfile.GFile(input_file) as file_in:
for table_id in json.load(file_in):
questions, labels, _ = all_questions[table_id]
for i, (text, label) in enumerate(zip(questions, labels)):
# The extra zeros are there to match SQA id format.
question_id = f'{table_id}_{i}-0'
question = interaction_pb2.Question(
id=f'{question_id}_0',
original_text=text,
answer=interaction_pb2.Answer(class_index=label))
table = interaction_pb2.Table()
table.CopyFrom(tables[table_id])
yield interaction_pb2.Interaction(
id=question_id, questions=[question], table=table)
counter['questions'] += 1
if counter['questions'] % 1000 == 0:
logging.info('Processed %d questions...', counter['questions'])
_log_stats(counter, input_file)
def read_all_tables(input_dir):
"""Read tables from the original format."""
csv_path = os.path.join(input_dir, 'data', 'all_csv')
results = {}
for table_id in tf.io.gfile.listdir(csv_path):
with tf.io.gfile.GFile(os.path.join(csv_path, table_id)) as f:
results[table_id] = f.read()
return results
def _convert_table(table_id, table_text):
"""Parses a table from # separated values format into proto format."""
rows = []
with six.StringIO(table_text) as csv_in:
for index, row in enumerate(csv.reader(csv_in, delimiter='#')):
cells = [interaction_pb2.Cell(text=text) for text in row]
if index == 0:
columns = cells
else:
rows.append(interaction_pb2.Cells(cells=cells))
return interaction_pb2.Table(
table_id=f'{_TABLE_DIR_NAME}/{table_id}', columns=columns, rows=rows)
def _convert_tables(input_dir):
"""Read tables the original format file and parse into proto format."""
return {
table_id: _convert_table(table_id, table_text)
for table_id, table_text in read_all_tables(input_dir).items()
}
def read_questions(input_dir):
collected_data = {}
for i in [1, 2]:
input_file = os.path.join(input_dir,
f'collected_data/r{i}_training_all.json')
with tf.gfile.Open(input_file) as f:
collected_data.update(json.load(f))
return collected_data
def convert(
input_dir,):
"""Converts from TabFact to SQA format.
Args:
input_dir: The original TabFact data.
Returns:
Iterables of interactions for each split in train, dev and test.
"""
tables = _convert_tables(input_dir)
questions = read_questions(input_dir)
splits = {
'train': 'train_id.json',
'test': 'test_id.json',
'dev': 'val_id.json',
}
return {
split: _convert_data(questions, os.path.join(input_dir, 'data', file),
tables) for split, file in splits.items()
}
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.