repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
grantsewell/nzbToMedia | libs/unidecode/x065.py | 252 | 4638 | data = (
'Pan ', # 0x00
'Yang ', # 0x01
'Lei ', # 0x02
'Sa ', # 0x03
'Shu ', # 0x04
'Zan ', # 0x05
'Nian ', # 0x06
'Xian ', # 0x07
'Jun ', # 0x08
'Huo ', # 0x09
'Li ', # 0x0a
'La ', # 0x0b
'Han ', # 0x0c
'Ying ', # 0x0d
'Lu ', # 0x0e
'Long ', # 0x0f
'Qian ', # 0x10
'Qian ', # 0x11
'Zan ', # 0x12
'Qian ', # 0x13
'Lan ', # 0x14
'San ', # 0x15
'Ying ', # 0x16
'Mei ', # 0x17
'Rang ', # 0x18
'Chan ', # 0x19
'[?] ', # 0x1a
'Cuan ', # 0x1b
'Xi ', # 0x1c
'She ', # 0x1d
'Luo ', # 0x1e
'Jun ', # 0x1f
'Mi ', # 0x20
'Li ', # 0x21
'Zan ', # 0x22
'Luan ', # 0x23
'Tan ', # 0x24
'Zuan ', # 0x25
'Li ', # 0x26
'Dian ', # 0x27
'Wa ', # 0x28
'Dang ', # 0x29
'Jiao ', # 0x2a
'Jue ', # 0x2b
'Lan ', # 0x2c
'Li ', # 0x2d
'Nang ', # 0x2e
'Zhi ', # 0x2f
'Gui ', # 0x30
'Gui ', # 0x31
'Qi ', # 0x32
'Xin ', # 0x33
'Pu ', # 0x34
'Sui ', # 0x35
'Shou ', # 0x36
'Kao ', # 0x37
'You ', # 0x38
'Gai ', # 0x39
'Yi ', # 0x3a
'Gong ', # 0x3b
'Gan ', # 0x3c
'Ban ', # 0x3d
'Fang ', # 0x3e
'Zheng ', # 0x3f
'Bo ', # 0x40
'Dian ', # 0x41
'Kou ', # 0x42
'Min ', # 0x43
'Wu ', # 0x44
'Gu ', # 0x45
'He ', # 0x46
'Ce ', # 0x47
'Xiao ', # 0x48
'Mi ', # 0x49
'Chu ', # 0x4a
'Ge ', # 0x4b
'Di ', # 0x4c
'Xu ', # 0x4d
'Jiao ', # 0x4e
'Min ', # 0x4f
'Chen ', # 0x50
'Jiu ', # 0x51
'Zhen ', # 0x52
'Duo ', # 0x53
'Yu ', # 0x54
'Chi ', # 0x55
'Ao ', # 0x56
'Bai ', # 0x57
'Xu ', # 0x58
'Jiao ', # 0x59
'Duo ', # 0x5a
'Lian ', # 0x5b
'Nie ', # 0x5c
'Bi ', # 0x5d
'Chang ', # 0x5e
'Dian ', # 0x5f
'Duo ', # 0x60
'Yi ', # 0x61
'Gan ', # 0x62
'San ', # 0x63
'Ke ', # 0x64
'Yan ', # 0x65
'Dun ', # 0x66
'Qi ', # 0x67
'Dou ', # 0x68
'Xiao ', # 0x69
'Duo ', # 0x6a
'Jiao ', # 0x6b
'Jing ', # 0x6c
'Yang ', # 0x6d
'Xia ', # 0x6e
'Min ', # 0x6f
'Shu ', # 0x70
'Ai ', # 0x71
'Qiao ', # 0x72
'Ai ', # 0x73
'Zheng ', # 0x74
'Di ', # 0x75
'Zhen ', # 0x76
'Fu ', # 0x77
'Shu ', # 0x78
'Liao ', # 0x79
'Qu ', # 0x7a
'Xiong ', # 0x7b
'Xi ', # 0x7c
'Jiao ', # 0x7d
'Sen ', # 0x7e
'Jiao ', # 0x7f
'Zhuo ', # 0x80
'Yi ', # 0x81
'Lian ', # 0x82
'Bi ', # 0x83
'Li ', # 0x84
'Xiao ', # 0x85
'Xiao ', # 0x86
'Wen ', # 0x87
'Xue ', # 0x88
'Qi ', # 0x89
'Qi ', # 0x8a
'Zhai ', # 0x8b
'Bin ', # 0x8c
'Jue ', # 0x8d
'Zhai ', # 0x8e
'[?] ', # 0x8f
'Fei ', # 0x90
'Ban ', # 0x91
'Ban ', # 0x92
'Lan ', # 0x93
'Yu ', # 0x94
'Lan ', # 0x95
'Wei ', # 0x96
'Dou ', # 0x97
'Sheng ', # 0x98
'Liao ', # 0x99
'Jia ', # 0x9a
'Hu ', # 0x9b
'Xie ', # 0x9c
'Jia ', # 0x9d
'Yu ', # 0x9e
'Zhen ', # 0x9f
'Jiao ', # 0xa0
'Wo ', # 0xa1
'Tou ', # 0xa2
'Chu ', # 0xa3
'Jin ', # 0xa4
'Chi ', # 0xa5
'Yin ', # 0xa6
'Fu ', # 0xa7
'Qiang ', # 0xa8
'Zhan ', # 0xa9
'Qu ', # 0xaa
'Zhuo ', # 0xab
'Zhan ', # 0xac
'Duan ', # 0xad
'Zhuo ', # 0xae
'Si ', # 0xaf
'Xin ', # 0xb0
'Zhuo ', # 0xb1
'Zhuo ', # 0xb2
'Qin ', # 0xb3
'Lin ', # 0xb4
'Zhuo ', # 0xb5
'Chu ', # 0xb6
'Duan ', # 0xb7
'Zhu ', # 0xb8
'Fang ', # 0xb9
'Xie ', # 0xba
'Hang ', # 0xbb
'Yu ', # 0xbc
'Shi ', # 0xbd
'Pei ', # 0xbe
'You ', # 0xbf
'Mye ', # 0xc0
'Pang ', # 0xc1
'Qi ', # 0xc2
'Zhan ', # 0xc3
'Mao ', # 0xc4
'Lu ', # 0xc5
'Pei ', # 0xc6
'Pi ', # 0xc7
'Liu ', # 0xc8
'Fu ', # 0xc9
'Fang ', # 0xca
'Xuan ', # 0xcb
'Jing ', # 0xcc
'Jing ', # 0xcd
'Ni ', # 0xce
'Zu ', # 0xcf
'Zhao ', # 0xd0
'Yi ', # 0xd1
'Liu ', # 0xd2
'Shao ', # 0xd3
'Jian ', # 0xd4
'Es ', # 0xd5
'Yi ', # 0xd6
'Qi ', # 0xd7
'Zhi ', # 0xd8
'Fan ', # 0xd9
'Piao ', # 0xda
'Fan ', # 0xdb
'Zhan ', # 0xdc
'Guai ', # 0xdd
'Sui ', # 0xde
'Yu ', # 0xdf
'Wu ', # 0xe0
'Ji ', # 0xe1
'Ji ', # 0xe2
'Ji ', # 0xe3
'Huo ', # 0xe4
'Ri ', # 0xe5
'Dan ', # 0xe6
'Jiu ', # 0xe7
'Zhi ', # 0xe8
'Zao ', # 0xe9
'Xie ', # 0xea
'Tiao ', # 0xeb
'Xun ', # 0xec
'Xu ', # 0xed
'Xu ', # 0xee
'Xu ', # 0xef
'Gan ', # 0xf0
'Han ', # 0xf1
'Tai ', # 0xf2
'Di ', # 0xf3
'Xu ', # 0xf4
'Chan ', # 0xf5
'Shi ', # 0xf6
'Kuang ', # 0xf7
'Yang ', # 0xf8
'Shi ', # 0xf9
'Wang ', # 0xfa
'Min ', # 0xfb
'Min ', # 0xfc
'Tun ', # 0xfd
'Chun ', # 0xfe
'Wu ', # 0xff
)
| gpl-3.0 |
ngannguyen/aimseqtk | src/normalize/normalize.py | 1 | 8169 | #Copyright (C) 2013 by Ngan Nguyen
#
#Released under the MIT license, see LICENSE.txt
'''
Using Bioconductor's metagenomeSeq package normalization to normalize
the samples
'''
import os
import sys
import rpy2.robjects as robjs
import rpy2.robjects.numpy2ri as rpyn
from rpy2.rinterface import NARealType
import gzip
import cPickle as pickle
from jobTree.scriptTree.target import Target
from sonLib.bioio import system
import aimseqtk.lib.statcommon as statcommon
import aimseqtk.lib.sample as libsample
import aimseqtk.lib.common as libcommon
def clone_matrix(colnames, clone2sample2size):
# Convert into a matrix. Cols = Samples, Rows = Clones, Cells = Sizes
# Sizes can be count or freq or normfreq (sizetype)
rownames = clone2sample2size.keys() # clone ids: v_cdr3_j
rows = []
for clone, sam2size in clone2sample2size.iteritems():
row = []
for sam in colnames:
size = 0
if sam in sam2size:
size = sam2size[sam]
row.append(size)
rows.extend(row)
return rows, rownames
def get_R_matrix(rows, colnames, rownames):
numcol = len(colnames)
numrow = len(rownames)
v = robjs.FloatVector(rows)
m = robjs.r['matrix'](v, ncol=numcol, nrow=numrow, byrow=True,
dimnames=[rownames, colnames])
return m
def get_meta_matrix(group2samples):
# Metadata information: rows = samples; cols = group
colnames = ['Group']
rownames = []
rows = []
for group, samples in group2samples.iteritems():
rownames.extend(samples)
rows.extend([group] * len(samples))
return rows, colnames, rownames
#def prepare_MRexp(samples, sizetype, group2samples):
## get phenotype matrix:
#r, cn, rn = get_meta_matrix(group2samples)
#pheno_matrix = get_R_matrix(r, cn, rn)
def normalize_MRexp(rows, colnames, rownames):
from rpy2.robjects.packages import importr
mgs = importr("metagenomeSeq")
count_matrix = get_R_matrix(rows, colnames, rownames)
# prepare MRexperiment object:
mrexp = mgs.newMRexperiment(count_matrix)
# normalized using CSS:
#normstat = mgs.cumNormStat(mrexp)
#mrexp2 = mgs.cumNorm(mrexp, p=normstat)
norm_count_matrix = mgs.MRcounts(mrexp, norm=True)
return norm_count_matrix
def matrix_to_normcount(matrix, samples):
# read normalized count from matrix (row=clone; col=sample) and
# update the samples' clones
samplenames = matrix.colnames
cloneids = matrix.rownames
for sample in samples:
if sample.name not in samplenames:
sys.stderr.write(("Warning: sample %s does not have normalized "
% sample.name + "count."))
continue
for clone in sample.clones:
normcount = 0.0
for id in clone.vjseq_ids:
assert id in cloneids
nc = matrix.rx[id, sample.name]
normcount = normcount + rpyn.ri2numpy(nc)[0]
clone.set_normcount(normcount)
return samples
#========== job Objs ====
class CloneMatrixAgg(Target):
def __init__(self, sams, indir, outfile):
Target.__init__(self)
self.sams = sams
self.indir = indir
self.outfile = outfile
def run(self):
objs = libcommon.load_pickledir(self.indir)
colnames = self.sams
rows = []
rownames = []
for obj in objs:
rs, rns = clone_matrix(colnames, obj)
rows.extend(rs)
rownames.extend(rns)
pickle.dump((rows, rownames, colnames), gzip.open(self.outfile, 'wb'))
system("rm -Rf %s" % self.indir)
class CloneMatrix(Target):
'''
Convert into a matrix. Cols = Samples, Rows = Clones, Cells = Sizes
Pickle matrix to outfile
Sizes can be count or freq or normfreq (sizetype)
'''
def __init__(self, indir, outfile, workdir, sizetype):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
self.workdir = workdir
self.sizetype = sizetype
def run(self):
# get clone2sample2size
sams = os.listdir(self.indir)
self.addChildTarget(statcommon.GetClone2Samples(self.indir,
self.workdir, self.sizetype))
self.setFollowOnTarget(CloneMatrixAgg(sams, self.workdir,
self.outfile))
class UpdateNormCount(Target):
def __init__(self, infile, outfile, norm_col, names):
Target.__init__(self)
self.infile = infile
self.outfile = outfile
self.norm_col = norm_col
self.names = names
def run(self):
name2nc = {}
for i, name in enumerate(self.names):
name2nc[name] = self.norm_col[i]
clones = pickle.load(gzip.open(self.infile, "rb"))
cloneids = self.names
for clone in clones:
normcount = 0.0
id = clone.get_vseqj()
assert id in cloneids
#nc = self.norm_col.rx(id)
nc = name2nc[id]
if isinstance(nc, NARealType):
nc = 0.0
#clone.normcount = nc
#HACK!!
clone.count = nc
pickle.dump(clones, gzip.open(self.outfile, "wb"))
class NormalizeMRexp2(Target):
def __init__(self, matrix_file, samdir, outdir):
Target.__init__(self)
self.mx_file = matrix_file
self.samdir = samdir
self.outdir = outdir
def run(self):
self.logToMaster("NormalizeMRexp2")
(rows, rownames, colnames) = pickle.load(gzip.open(self.mx_file, "rb"))
#norm_matrix = normalize_MRexp(rows, rownames, colnames)
norm_matrix = normalize_MRexp(rows, colnames, rownames)
# get samples with normalized counts:
for sam in os.listdir(self.samdir):
#norm_col = norm_matrix.rx[sam, True]
norm_col = norm_matrix.rx[True, sam]
#DEBUG
#print sam
#print len(norm_col)
#print len(rownames)
#rowindex = 0
#for i, rn in enumerate(rownames):
# if rn == 'TCRBV12-03_CASSLGGVGAFF_TCRBJ01-01':
# rowindex = i
# print rowindex, rn
# break
#print norm_col[rowindex]
#col = []
#index = -1
#for i, colname in enumerate(colnames):
# if colname == sam:
# index = i
# print index, colname
# break
##print rows
#print rows[rowindex*len(colnames) + index]
#if sam == 'MH':
# sys.exit(1)
#END DEBUG
samdir = os.path.join(self.samdir, sam)
samout = os.path.join(self.outdir, sam)
system("mkdir -p %s" % samout)
for vj in os.listdir(samdir):
vjin = os.path.join(samdir, vj)
vjout = os.path.join(samout, vj)
if vj == sam: # sample file: out/sam/sam
system("cp %s %s" % (vjin, vjout))
else:
assert len(rownames) == len(norm_col)
self.addChildTarget(UpdateNormCount(vjin, vjout, norm_col,
rownames))
self.setFollowOnTarget(libcommon.CleanupFile(self.mx_file))
class NormalizeMRexp(Target):
def __init__(self, indir, outdir, sizetype):
Target.__init__(self)
self.indir = indir
self.outdir = outdir
self.sizetype = sizetype
def run(self):
matrix_tempdir = os.path.join(self.outdir, "matrix_temp")
matrix_file = os.path.join(self.outdir, "matrix_info.pickle")
system("mkdir -p %s" % matrix_tempdir)
self.addChildTarget(CloneMatrix(self.indir, matrix_file,
matrix_tempdir, self.sizetype))
self.setFollowOnTarget(NormalizeMRexp2(matrix_file, self.indir,
self.outdir))
| mit |
flyingwip/zoekdeverschillen | node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
sudheesh001/oh-mainline | mysite/profile/migrations/0045_remove_static_slash_static_photos.py | 17 | 10858 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
import os
class Migration:
no_dry_run = True
def forwards(self, orm):
"Write your forwards migration here"
for person in orm['profile.Person'].objects.all():
if not person.photo:
continue
if not os.path.exists(person.photo.path):
print 'Clearing photo for', person.user.username,
print 'whose old path was', person.photo.path
person.photo = None
person.save()
def backwards(self, orm):
"Write your backwards migration here"
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.dataimportattempt': {
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'person_wants_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'stale': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'profile.link_person_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_project_tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag', 'project_exp', 'source')]"},
'favorite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.ProjectExp']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person', 'project')]"},
'date_collected': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"})
},
'profile.person': {
'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}),
'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profile.projectexp': {
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'man_months': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']", 'null': 'True'}),
'person_role': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'should_show_this': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.sourceforgeperson': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.sourceforgeproject': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'profile.tagtype': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'prefix': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'search.project': {
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['profile']
| agpl-3.0 |
xxhank/namebench | nb_third_party/dns/rdtypes/nsbase.py | 248 | 2995 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""NS-like base classes."""
import cStringIO
import dns.exception
import dns.rdata
import dns.name
class NSBase(dns.rdata.Rdata):
"""Base class for rdata that is like an NS record.
@ivar target: the target name of the rdata
@type target: dns.name.Name object"""
__slots__ = ['target']
def __init__(self, rdclass, rdtype, target):
super(NSBase, self).__init__(rdclass, rdtype)
self.target = target
def to_text(self, origin=None, relativize=True, **kw):
target = self.target.choose_relativity(origin, relativize)
return str(target)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
target = tok.get_name()
target = target.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, target)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.target.to_wire(file, compress, origin)
def to_digestable(self, origin = None):
return self.target.to_digestable(origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(target, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
target = target.relativize(origin)
return cls(rdclass, rdtype, target)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.target = self.target.choose_relativity(origin, relativize)
def _cmp(self, other):
return cmp(self.target, other.target)
class UncompressedNS(NSBase):
"""Base class for rdata that is like an NS record, but whose name
is not compressed when convert to DNS wire format, and whose
digestable form is not downcased."""
def to_wire(self, file, compress = None, origin = None):
super(UncompressedNS, self).to_wire(file, None, origin)
def to_digestable(self, origin = None):
f = cStringIO.StringIO()
self.to_wire(f, None, origin)
return f.getvalue()
| apache-2.0 |
lhilt/scipy | scipy/special/spfun_stats.py | 27 | 3499 | # Last Change: Sat Mar 21 02:00 PM 2009 J
# Copyright (c) 2001, 2002 Enthought, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Enthought nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Some more special functions which may be useful for multivariate statistical
analysis."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.special import gammaln as loggam
__all__ = ['multigammaln']
def multigammaln(a, d):
r"""Returns the log of multivariate gamma, also sometimes called the
generalized gamma.
Parameters
----------
a : ndarray
The multivariate gamma is computed for each item of `a`.
d : int
The dimension of the space of integration.
Returns
-------
res : ndarray
The values of the log multivariate gamma at the given points `a`.
Notes
-----
The formal definition of the multivariate gamma of dimension d for a real
`a` is
.. math::
\Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA
with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of
all the positive definite matrices of dimension `d`. Note that `a` is a
scalar: the integrand only is multivariate, the argument is not (the
function is defined over a subset of the real set).
This can be proven to be equal to the much friendlier equation
.. math::
\Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2).
References
----------
R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in
probability and mathematical statistics).
"""
a = np.asarray(a)
if not np.isscalar(d) or (np.floor(d) != d):
raise ValueError("d should be a positive integer (dimension)")
if np.any(a <= 0.5 * (d - 1)):
raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met"
% (a, 0.5 * (d-1)))
res = (d * (d-1) * 0.25) * np.log(np.pi)
res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0)
return res
| bsd-3-clause |
winzard/django-seo2 | tests/settings.py | 3 | 2490 | """
Django settings for tests project.
"""
from __future__ import unicode_literals
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=)c(th7-3@w*n9mf9_b+2qg685lc6qgfars@yu1g516xu5&is)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.admin',
'django.contrib.flatpages',
'djangoseo',
'userapp',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
'django.core.context_processors.request',
)
ROOT_URLCONF = 'tests.urls'
# WSGI_APPLICATION = 'tests.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
SITE_ID = 1
CACHE_BACKEND = 'dummy://'
# Enable when testing cache
# CACHE_BACKEND = "locmem://?timeout=30&max_entries=400"
SEO_MODELS = ('userapp',)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| mit |
ToonTownInfiniteRepo/ToontownInfinite | toontown/hood/MailboxTwoAnimatedProp.py | 5 | 1508 | from toontown.hood import ZeroAnimatedProp
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
class MailboxTwoAnimatedProp(ZeroAnimatedProp.ZeroAnimatedProp):
notify = DirectNotifyGlobal.directNotify.newCategory('MailboxTwoAnimatedProp')
PauseTimeMult = base.config.GetFloat('zero-pause-mult', 1.0)
PhaseInfo = {0: ('tt_a_ara_dod_mailbox_firstMoveFlagSpin1', 40 * PauseTimeMult),
1: (('tt_a_ara_dod_mailbox_firstMoveStruggle', 'tt_a_ara_dod_mailbox_firstMoveJump'), 20 * PauseTimeMult),
2: ('tt_a_ara_dod_mailbox_firstMoveFlagSpin2', 10 * PauseTimeMult),
3: ('tt_a_ara_dod_mailbox_firstMoveFlagSpin3', 8 * PauseTimeMult),
4: ('tt_a_ara_dod_mailbox_firstMoveJumpSummersault', 6 * PauseTimeMult),
5: ('tt_a_ara_dod_mailbox_firstMoveJumpFall', 4 * PauseTimeMult),
6: ('tt_a_ara_dod_mailbox_firstMoveJump3Summersaults', 2 * PauseTimeMult)}
PhaseWeStartAnimating = 5
def __init__(self, node):
ZeroAnimatedProp.ZeroAnimatedProp.__init__(self, node, 'mailbox', self.PhaseInfo, ToontownGlobals.MAILBOX_ZERO_HOLIDAY)
def startIfNeeded(self):
try:
self.curPhase = self.getPhaseToRun()
if self.curPhase >= self.PhaseWeStartAnimating:
self.request('DoAnim')
except:
pass
def handleNewPhase(self, newPhase):
if newPhase < self.PhaseWeStartAnimating:
self.request('Off')
else:
self.startIfNeeded()
| mit |
samuelchong/libcloud | libcloud/test/loadbalancer/test_slb.py | 4 | 25613 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.compute.base import Node
from libcloud.compute.types import NodeState
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.loadbalancer.drivers.slb import SLBDriver, \
SLBLoadBalancerHttpListener, SLBLoadBalancerHttpsListener, \
SLBLoadBalancerTcpListener, SLBLoadBalancerUdpListener
from libcloud.loadbalancer.types import State
from libcloud.test.file_fixtures import LoadBalancerFileFixtures
from libcloud.test import MockHttpTestCase
from libcloud.test.secrets import LB_SLB_PARAMS
from libcloud.utils.py3 import httplib
class SLBDriverTestCases(unittest.TestCase):
region = LB_SLB_PARAMS[2]
def setUp(self):
SLBMockHttp.test = self
SLBDriver.connectionCls.conn_class = SLBMockHttp
SLBMockHttp.type = None
SLBMockHttp.use_param = 'Action'
self.driver = SLBDriver(*LB_SLB_PARAMS)
def test_list_protocols(self):
protocols = self.driver.list_protocols()
self.assertEqual(4, len(protocols))
expected = ['tcp', 'udp', 'http', 'https']
diff = set(expected) - set(protocols)
self.assertEqual(0, len(diff))
def test_list_balancers(self):
balancers = self.driver.list_balancers()
self.assertEqual(len(balancers), 1)
balancer = balancers[0]
self.assertEqual('15229f88562-cn-hangzhou-dg-a01', balancer.id)
self.assertEqual('abc', balancer.name)
self.assertEqual(State.RUNNING, balancer.state)
self.assertEqual('120.27.186.149', balancer.ip)
self.assertTrue(balancer.port is None)
self.assertEqual(self.driver, balancer.driver)
expected_extra = {
'create_timestamp': 1452403099000,
'address_type': 'internet',
'region_id': 'cn-hangzhou-dg-a01',
'region_id_alias': 'cn-hangzhou',
'create_time': '2016-01-10T13:18Z',
'master_zone_id': 'cn-hangzhou-d',
'slave_zone_id': 'cn-hangzhou-b',
'network_type': 'classic'
}
self._validate_extras(expected_extra, balancer.extra)
def _validate_extras(self, expected, actual):
self.assertTrue(actual is not None)
for key, value in iter(expected.items()):
self.assertTrue(key in actual)
self.assertEqual(value, actual[key], ('extra %(key)s not equal, '
'expected: "%(expected)s", '
'actual: "%(actual)s"' %
{'key': key,
'expected': value,
'actual': actual[key]}))
def test_list_balancers_with_ids(self):
SLBMockHttp.type = 'list_balancers_ids'
self.balancer_ids = ['id1', 'id2']
balancers = self.driver.list_balancers(
ex_balancer_ids=self.balancer_ids)
self.assertTrue(balancers is not None)
def test_list_balancers_with_ex_filters(self):
SLBMockHttp.type = 'list_balancers_filters'
self.ex_filters = {'AddressType': 'internet'}
balancers = self.driver.list_balancers(ex_filters=self.ex_filters)
self.assertTrue(balancers is not None)
def test_get_balancer(self):
SLBMockHttp.type = 'get_balancer'
balancer = self.driver.get_balancer(balancer_id='tests')
self.assertEqual(balancer.id, '15229f88562-cn-hangzhou-dg-a01')
self.assertEqual(balancer.name, 'abc')
self.assertEqual(balancer.state, State.RUNNING)
def test_destroy_balancer(self):
balancer = self.driver.get_balancer(balancer_id='tests')
self.assertTrue(self.driver.destroy_balancer(balancer))
def test_create_balancer(self):
self.name = 'balancer1'
self.port = 80
self.protocol = 'http'
self.algorithm = Algorithm.WEIGHTED_ROUND_ROBIN
self.extra = {
'ex_address_type': 'internet',
'ex_internet_charge_type': 'paybytraffic',
'ex_bandwidth': 1,
'ex_master_zone_id': 'cn-hangzhou-d',
'ex_slave_zone_id': 'cn-hangzhou-b',
'StickySession': 'on',
'HealthCheck': 'on'}
self.members = [Member('node1', None, None)]
balancer = self.driver.create_balancer(name=self.name, port=self.port,
protocol=self.protocol,
algorithm=self.algorithm,
members=self.members,
**self.extra)
self.assertEqual(balancer.name, self.name)
self.assertEqual(balancer.port, self.port)
self.assertEqual(balancer.state, State.UNKNOWN)
def test_create_balancer_no_port_exception(self):
self.assertRaises(AttributeError, self.driver.create_balancer,
None, None, 'http', Algorithm.WEIGHTED_ROUND_ROBIN,
None)
def test_create_balancer_unsupport_protocol_exception(self):
self.assertRaises(AttributeError, self.driver.create_balancer,
None, 443, 'ssl', Algorithm.WEIGHTED_ROUND_ROBIN,
None)
def test_create_balancer_multiple_member_ports_exception(self):
members = [Member('m1', '1.2.3.4', 80),
Member('m2', '1.2.3.5', 81)]
self.assertRaises(AttributeError, self.driver.create_balancer,
None, 80, 'http', Algorithm.WEIGHTED_ROUND_ROBIN,
members)
def test_create_balancer_bandwidth_value_error(self):
self.assertRaises(AttributeError, self.driver.create_balancer,
None, 80, 'http', Algorithm.WEIGHTED_ROUND_ROBIN,
None, ex_bandwidth='NAN')
def test_create_balancer_paybybandwidth_without_bandwidth_exception(self):
self.assertRaises(AttributeError, self.driver.create_balancer,
None, 80, 'http', Algorithm.WEIGHTED_ROUND_ROBIN,
None, ex_internet_charge_type='paybybandwidth')
def test_balancer_list_members(self):
balancer = self.driver.get_balancer(balancer_id='tests')
members = balancer.list_members()
self.assertEqual(len(members), 1)
self.assertEqual(members[0].balancer, balancer)
self.assertEqual('i-23tshnsdq', members[0].id)
def test_balancer_list_listeners(self):
balancer = self.driver.get_balancer(balancer_id='tests')
listeners = self.driver.ex_list_listeners(balancer)
self.assertEqual(1, len(listeners))
listener = listeners[0]
self.assertEqual('80', listener.port)
def test_balancer_detach_member(self):
self.balancer = self.driver.get_balancer(balancer_id='tests')
self.member = Member('i-23tshnsdq', None, None)
self.assertTrue(self.balancer.detach_member(self.member))
def test_balancer_attach_compute_node(self):
SLBMockHttp.type = 'attach_compute_node'
self.balancer = self.driver.get_balancer(balancer_id='tests')
self.node = Node(id='node1', name='node-name',
state=NodeState.RUNNING,
public_ips=['1.2.3.4'], private_ips=['4.3.2.1'],
driver=self.driver)
member = self.driver.balancer_attach_compute_node(
self.balancer, self.node)
self.assertEqual(self.node.id, member.id)
self.assertEqual(self.node.public_ips[0], member.ip)
self.assertEqual(self.balancer.port, member.port)
def test_ex_create_listener(self):
SLBMockHttp.type = 'create_listener'
self.balancer = self.driver.get_balancer(balancer_id='tests')
self.backend_port = 80
self.protocol = 'http'
self.algorithm = Algorithm.WEIGHTED_ROUND_ROBIN
self.bandwidth = 1
self.extra = {'StickySession': 'off', 'HealthCheck': 'off'}
self.assertTrue(self.driver.ex_create_listener(self.balancer,
self.backend_port,
self.protocol,
self.algorithm,
self.bandwidth,
**self.extra))
def test_ex_create_listener_override_port(self):
SLBMockHttp.type = 'create_listener_override_port'
self.balancer = self.driver.get_balancer(balancer_id='tests')
self.backend_port = 80
self.protocol = 'http'
self.algorithm = Algorithm.WEIGHTED_ROUND_ROBIN
self.bandwidth = 1
self.extra = {'StickySession': 'off',
'HealthCheck': 'off',
'ListenerPort': 8080}
self.assertTrue(self.driver.ex_create_listener(self.balancer,
self.backend_port,
self.protocol,
self.algorithm,
self.bandwidth,
**self.extra))
def test_ex_start_listener(self):
SLBMockHttp.type = 'start_listener'
balancer = self.driver.get_balancer(balancer_id='tests')
self.port = 80
self.assertTrue(self.driver.ex_start_listener(balancer, self.port))
def test_ex_stop_listener(self):
SLBMockHttp.type = 'stop_listener'
balancer = self.driver.get_balancer(balancer_id='tests')
self.port = 80
self.assertTrue(self.driver.ex_stop_listener(balancer, self.port))
def test_ex_upload_certificate(self):
self.name = 'cert1'
self.cert = 'cert-data'
self.key = 'key-data'
certificate = self.driver.ex_upload_certificate(self.name, self.cert,
self.key)
self.assertEqual(self.name, certificate.name)
self.assertEqual('01:DF:AB:CD', certificate.fingerprint)
def test_ex_list_certificates(self):
certs = self.driver.ex_list_certificates()
self.assertEqual(2, len(certs))
cert = certs[0]
self.assertEqual('139a00604ad-cn-east-hangzhou-01', cert.id)
self.assertEqual('abe', cert.name)
self.assertEqual('A:B:E', cert.fingerprint)
def test_ex_list_certificates_ids(self):
SLBMockHttp.type = 'list_certificates_ids'
self.cert_ids = ['cert1', 'cert2']
certs = self.driver.ex_list_certificates(certificate_ids=self.cert_ids)
self.assertTrue(certs is not None)
def test_ex_delete_certificate(self):
self.cert_id = 'cert1'
self.assertTrue(self.driver.ex_delete_certificate(self.cert_id))
def test_ex_set_certificate_name(self):
self.cert_id = 'cert1'
self.cert_name = 'cert-name'
self.assertTrue(self.driver.ex_set_certificate_name(self.cert_id,
self.cert_name))
class SLBMockHttp(MockHttpTestCase):
fixtures = LoadBalancerFileFixtures('slb')
def _DescribeLoadBalancers(self, method, url, body, headers):
body = self.fixtures.load('describe_load_balancers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _list_balancers_ids_DescribeLoadBalancers(self, method, url, body,
headers):
params = {'LoadBalancerId': ','.join(self.test.balancer_ids)}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('describe_load_balancers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _list_balancers_filters_DescribeLoadBalancers(self, method, url, body,
headers):
params = {'AddressType': 'internet'}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('describe_load_balancers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _get_balancer_DescribeLoadBalancers(self, method, url, body, headers):
params = {'LoadBalancerId': 'tests'}
self.assertUrlContainsQueryParams(url, params)
return self._DescribeLoadBalancers(method, url, body, headers)
def _DeleteLoadBalancer(self, method, url, body, headers):
params = {'LoadBalancerId': '15229f88562-cn-hangzhou-dg-a01'}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('delete_load_balancer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeLoadBalancerAttribute(self, method, url, body, headers):
body = self.fixtures.load('describe_load_balancer_attribute.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateLoadBalancer(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'LoadBalancerName': self.test.name}
balancer_keys = {
'AddressType': 'ex_address_type',
'InternetChargeType': 'ex_internet_charge_type',
'Bandwidth': 'ex_bandwidth',
'MasterZoneId': 'ex_master_zone_id',
'SlaveZoneId': 'ex_slave_zone_id'
}
for key in balancer_keys:
params[key] = str(self.test.extra[balancer_keys[key]])
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('create_load_balancer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _AddBackendServers(self, method, url, body, headers):
_id = self.test.members[0].id
self.assertTrue("ServerId" in url and _id in url)
self.assertTrue("Weight" in url and "100" in url)
body = self.fixtures.load('add_backend_servers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _CreateLoadBalancerHTTPListener(self, method, url, body, headers):
body = self.fixtures.load('create_load_balancer_http_listener.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _StartLoadBalancerListener(self, method, url, body, headers):
body = self.fixtures.load('start_load_balancer_listener.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RemoveBackendServers(self, method, url, body, headers):
_id = self.test.member.id
servers_json = '["%s"]' % _id
params = {'LoadBalancerId': self.test.balancer.id,
'BackendServers': servers_json}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('add_backend_servers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _attach_compute_node_DescribeLoadBalancers(self, method, url, body,
headers):
body = self.fixtures.load('describe_load_balancers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _attach_compute_node_AddBackendServers(self, method, url, body,
headers):
_id = self.test.node.id
self.assertTrue("ServerId" in url and _id in url)
self.assertTrue("Weight" in url and "100" in url)
body = self.fixtures.load('add_backend_servers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_listener_CreateLoadBalancerHTTPListener(self, method, url,
body, headers):
params = {'LoadBalancerId': self.test.balancer.id,
'RegionId': self.test.region,
'ListenerPort': str(self.test.balancer.port),
'BackendServerPort': str(self.test.backend_port),
'Scheduler': 'wrr',
'Bandwidth': '1',
'StickySession': 'off',
'HealthCheck': 'off'}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('create_load_balancer_http_listener.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_listener_DescribeLoadBalancers(self, method, url, body,
headers):
body = self.fixtures.load('describe_load_balancers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_listener_override_port_CreateLoadBalancerHTTPListener(
self, method, url, body, headers):
params = {'LoadBalancerId': self.test.balancer.id,
'RegionId': self.test.region,
'ListenerPort': str(self.test.extra['ListenerPort']),
'BackendServerPort': str(self.test.backend_port),
'Scheduler': 'wrr',
'Bandwidth': '1',
'StickySession': 'off',
'HealthCheck': 'off'}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('create_load_balancer_http_listener.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _create_listener_override_port_DescribeLoadBalancers(
self, method, url, body, headers):
body = self.fixtures.load('describe_load_balancers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _start_listener_DescribeLoadBalancers(self, method, url, body,
headers):
return self._DescribeLoadBalancers(method, url, body, headers)
def _start_listener_StartLoadBalancerListener(self, method, url, body,
headers):
params = {'ListenerPort': str(self.test.port)}
self.assertUrlContainsQueryParams(url, params)
return self._StartLoadBalancerListener(method, url, body, headers)
def _stop_listener_DescribeLoadBalancers(self, method, url, body,
headers):
return self._DescribeLoadBalancers(method, url, body, headers)
def _stop_listener_StopLoadBalancerListener(self, method, url, body,
headers):
params = {'ListenerPort': str(self.test.port)}
self.assertUrlContainsQueryParams(url, params)
return self._StartLoadBalancerListener(method, url, body, headers)
def _UploadServerCertificate(self, method, url, body, headers):
body = self.fixtures.load('upload_server_certificate.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DescribeServerCertificates(self, method, url, body, headers):
body = self.fixtures.load('describe_server_certificates.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _list_certificates_ids_DescribeServerCertificates(self, method, url,
body, headers):
params = {'RegionId': self.test.region,
'ServerCertificateId': ','.join(self.test.cert_ids)}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('describe_server_certificates.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _DeleteServerCertificate(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ServerCertificateId': self.test.cert_id}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('delete_server_certificate.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _SetServerCertificateName(self, method, url, body, headers):
params = {'RegionId': self.test.region,
'ServerCertificateId': self.test.cert_id,
'ServerCertificateName': self.test.cert_name}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('set_server_certificate_name.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class AssertDictMixin(object):
def assert_dict_equals(self, expected, actual):
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
self.assertEqual(len(expected_keys), len(actual_keys))
self.assertEqual(0, len(expected_keys - actual_keys))
for key in expected:
self.assertEqual(expected[key], actual[key])
class SLBLoadBalancerHttpListenerTestCase(unittest.TestCase, AssertDictMixin):
def setUp(self):
self.listener = SLBLoadBalancerHttpListener.create(
80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1,
extra={'StickySession': 'on',
'StickySessionType': 'insert',
'HealthCheck': 'on'}
)
def test_get_required_params(self):
expected = {'Action': 'CreateLoadBalancerHTTPListener',
'ListenerPort': 80,
'BackendServerPort': 8080,
'Scheduler': 'wrr',
'Bandwidth': 1,
'StickySession': 'on',
'HealthCheck': 'on'}
self.assert_dict_equals(expected,
self.listener.get_required_params())
def test_get_optional_params(self):
expected = {'StickySessionType': 'insert'}
self.assert_dict_equals(expected, self.listener.get_optional_params())
def test_repr(self):
self.assertTrue('SLBLoadBalancerHttpListener' in str(self.listener))
class SLBLoadBalancerHttpsListenerTestCase(unittest.TestCase, AssertDictMixin):
def setUp(self):
self.listener = SLBLoadBalancerHttpsListener.create(
80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1,
extra={'StickySession': 'on',
'StickySessionType': 'insert',
'HealthCheck': 'on',
'ServerCertificateId': 'fake-cert1'}
)
def test_get_required_params(self):
expected = {'Action': 'CreateLoadBalancerHTTPSListener',
'ListenerPort': 80,
'BackendServerPort': 8080,
'Scheduler': 'wrr',
'Bandwidth': 1,
'StickySession': 'on',
'HealthCheck': 'on',
'ServerCertificateId': 'fake-cert1'}
self.assert_dict_equals(expected,
self.listener.get_required_params())
def test_get_optional_params(self):
expected = {'StickySessionType': 'insert'}
self.assert_dict_equals(expected, self.listener.get_optional_params())
class SLBLoadBalancerTcpListenerTestCase(unittest.TestCase, AssertDictMixin):
def setUp(self):
self.listener = SLBLoadBalancerTcpListener.create(
80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1,
extra={'PersistenceTimeout': 0,
'HealthCheckDomain': ''}
)
def test_get_required_params(self):
expected = {'Action': 'CreateLoadBalancerTCPListener',
'ListenerPort': 80,
'BackendServerPort': 8080,
'Scheduler': 'wrr',
'Bandwidth': 1}
self.assert_dict_equals(expected,
self.listener.get_required_params())
def test_get_optional_params(self):
expected = {'PersistenceTimeout': 0,
'HealthCheckDomain': ''}
self.assert_dict_equals(expected, self.listener.get_optional_params())
class SLBLoadBalancerUdpListenerTestCase(unittest.TestCase, AssertDictMixin):
def setUp(self):
self.listener = SLBLoadBalancerUdpListener.create(
80, 8080, Algorithm.WEIGHTED_ROUND_ROBIN, 1,
extra={'PersistenceTimeout': 0}
)
def test_get_required_params(self):
expected = {'Action': 'CreateLoadBalancerUDPListener',
'ListenerPort': 80,
'BackendServerPort': 8080,
'Scheduler': 'wrr',
'Bandwidth': 1}
self.assert_dict_equals(expected,
self.listener.get_required_params())
def test_get_optional_params(self):
expected = {'PersistenceTimeout': 0}
self.assert_dict_equals(expected, self.listener.get_optional_params())
if __name__ == "__main__":
sys.exit(unittest.main())
| apache-2.0 |
ellio167/lammps | tools/i-pi/ipi/inputs/thermostats.py | 8 | 9093 | """Deals with creating the thermostats class.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Chooses between the different possible thermostat options and creates the
appropriate thermostat object, with suitable parameters.
Classes:
InputThermo: Deals with creating the thermostat object from a file, and
writing the checkpoints.
"""
__all__ = ['InputThermo']
import numpy as np
from ipi.utils.depend import *
from ipi.utils.inputvalue import *
from ipi.engine.thermostats import *
class InputThermo(Input):
"""Thermostat input class.
Handles generating the appropriate thermostat class from the xml input file,
and generating the xml checkpoiunt tags and data from an instance of the
object.
Attributes:
mode: An optional string giving the type of the thermostat used. Defaults
to 'langevin'.
Fields:
ethermo: An optional float giving the amount of heat energy transferred
to the bath. Defaults to 0.0.
tau: An optional float giving the damping time scale. Defaults to 1.0.
pile_scale: Scaling for the PILE damping relative to the critical damping.
A: An optional array of floats giving the drift matrix. Defaults to 0.0.
C: An optional array of floats giving the static covariance matrix.
Defaults to 0.0.
s: An optional array of floats giving the additional momentum-scaled
momenta in GLE. Defaults to 0.0.
"""
attribs = { "mode": (InputAttribute, { "dtype" : str,
"options" : [ "", "langevin", "svr", "pile_l", "pile_g", "gle", "nm_gle", "nm_gle_g" ],
"help" : "The style of thermostatting. 'langevin' specifies a white noise langevin equation to be attached to the cartesian representation of the momenta. 'svr' attaches a velocity rescaling thermostat to the cartesian representation of the momenta. Both 'pile_l' and 'pile_g' attaches a white noise langevin thermostat to the normal mode representation, with 'pile_l' attaching a local langevin thermostat to the centroid mode and 'pile_g' instead attaching a global velocity rescaling thermostat. 'gle' attaches a colored noise langevin thermostat to the cartesian representation of the momenta, 'nm_gle' attaches a colored noise langevin thermostat to the normal mode representation of the momenta and a langevin thermostat to the centroid and 'nm_gle_g' attaches a gle thermostat to the normal modes and a svr thermostat to the centroid."
}) }
fields = { "ethermo" : (InputValue, { "dtype" : float,
"default" : 0.0,
"help" : "The initial value of the thermostat energy. Used when the simulation is restarted to guarantee continuity of the conserved quantity.",
"dimension" : "energy" }),
"tau" : (InputValue, { "dtype" : float,
"default" : 0.0,
"help" : "The friction coefficient for white noise thermostats.",
"dimension" : "time" }),
"pile_scale" : (InputValue, { "dtype" : float,
"default" : 1.0,
"help" : "Scaling for the PILE damping relative to the critical damping."} ),
"A" : (InputArray, { "dtype" : float,
"default" : input_default(factory=np.zeros, args = (0,)),
"help" : "The friction matrix for GLE thermostats.",
"dimension" : "frequency" }),
"C" : (InputArray, { "dtype" : float,
"default" : input_default(factory=np.zeros, args = (0,)),
"help" : "The covariance matrix for GLE thermostats.",
"dimension" : "temperature" }),
"s" : (InputArray, { "dtype" : float,
"default" : input_default(factory=np.zeros, args = (0,)),
"help" : "Input values for the additional momenta in GLE.",
"dimension" : "ms-momentum" })
}
default_help = "Simulates an external heat bath to keep the velocity distribution at the correct temperature."
default_label = "THERMOSTATS"
def store(self, thermo):
"""Takes a thermostat instance and stores a minimal representation of it.
Args:
thermo: A thermostat object.
Raises:
TypeError: Raised if the thermostat is not a recognized type.
"""
super(InputThermo,self).store(thermo)
if type(thermo) is ThermoLangevin:
self.mode.store("langevin")
self.tau.store(thermo.tau)
elif type(thermo) is ThermoSVR:
self.mode.store("svr")
self.tau.store(thermo.tau)
elif type(thermo) is ThermoPILE_L:
self.mode.store("pile_l")
self.tau.store(thermo.tau)
self.pile_scale.store(thermo.pilescale)
elif type(thermo) is ThermoPILE_G:
self.mode.store("pile_g")
self.tau.store(thermo.tau)
self.pile_scale.store(thermo.pilescale)
elif type(thermo) is ThermoGLE:
self.mode.store("gle")
self.A.store(thermo.A)
if dget(thermo,"C")._func is None:
self.C.store(thermo.C)
self.s.store(thermo.s)
elif type(thermo) is ThermoNMGLE:
self.mode.store("nm_gle")
self.A.store(thermo.A)
if dget(thermo,"C")._func is None:
self.C.store(thermo.C)
self.s.store(thermo.s)
elif type(thermo) is ThermoNMGLEG:
self.mode.store("nm_gle_g")
self.A.store(thermo.A)
self.tau.store(thermo.tau)
if dget(thermo,"C")._func is None:
self.C.store(thermo.C)
self.s.store(thermo.s)
elif type(thermo) is Thermostat:
self.mode.store("")
else:
raise TypeError("Unknown thermostat mode " + type(thermo).__name__)
self.ethermo.store(thermo.ethermo)
def fetch(self):
"""Creates a thermostat object.
Returns:
A thermostat object of the appropriate type and with the appropriate
parameters given the attributes of the InputThermo object.
Raises:
TypeError: Raised if the thermostat type is not a recognized option.
"""
super(InputThermo,self).fetch()
if self.mode.fetch() == "langevin":
thermo = ThermoLangevin(tau=self.tau.fetch())
elif self.mode.fetch() == "svr":
thermo = ThermoSVR(tau=self.tau.fetch())
elif self.mode.fetch() == "pile_l":
thermo = ThermoPILE_L(tau=self.tau.fetch(), scale=self.pile_scale.fetch())
elif self.mode.fetch() == "pile_g":
thermo = ThermoPILE_G(tau=self.tau.fetch(), scale=self.pile_scale.fetch())
elif self.mode.fetch() == "gle":
rC = self.C.fetch()
if len(rC) == 0:
rC = None
thermo = ThermoGLE(A=self.A.fetch(),C=rC)
thermo.s = self.s.fetch()
elif self.mode.fetch() == "nm_gle":
rC = self.C.fetch()
if len(rC) == 0:
rC = None
thermo = ThermoNMGLE(A=self.A.fetch(),C=rC)
thermo.s = self.s.fetch()
elif self.mode.fetch() == "nm_gle_g":
rC = self.C.fetch()
if len(rC) == 0:
rC = None
thermo = ThermoNMGLEG(A=self.A.fetch(),C=rC, tau=self.tau.fetch())
thermo.s = self.s.fetch()
elif self.mode.fetch() == "" :
thermo=Thermostat()
else:
raise TypeError("Invalid thermostat mode " + self.mode.fetch())
thermo.ethermo = self.ethermo.fetch()
return thermo
def check(self):
"""Checks that the parameter arrays represents a valid thermostat."""
super(InputThermo,self).check()
if self.mode.fetch() in ["langevin", "svr", "pile_l", "pile_g", "nm_gle_g"]:
if self.tau.fetch() <= 0:
raise ValueError("The thermostat friction coefficient must be set to a positive value")
if self.mode.fetch() in ["gle", "nm_gle", "nm_gle_g"]:
pass # PERHAPS DO CHECKS THAT MATRICES SATISFY REASONABLE CONDITIONS (POSITIVE-DEFINITENESS, ETC)
| gpl-2.0 |
girving/tensorflow | tensorflow/contrib/legacy_seq2seq/__init__.py | 165 | 2433 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated library for creating sequence-to-sequence models in TensorFlow.
@@attention_decoder
@@basic_rnn_seq2seq
@@embedding_attention_decoder
@@embedding_attention_seq2seq
@@embedding_rnn_decoder
@@embedding_rnn_seq2seq
@@embedding_tied_rnn_seq2seq
@@model_with_buckets
@@one2many_rnn_seq2seq
@@rnn_decoder
@@sequence_loss
@@sequence_loss_by_example
@@tied_rnn_seq2seq
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import basic_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_tied_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import model_with_buckets
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import one2many_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss_by_example
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import tied_rnn_seq2seq
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
mdietrichc2c/carrier-delivery | __unported__/delivery_carrier_label_postlogistics/postlogistics/web_service.py | 2 | 14347 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from suds.client import Client, WebFault
from suds.transport.http import HttpAuthenticated
from PIL import Image
from StringIO import StringIO
from openerp.osv import orm
from openerp.tools.translate import _
_compile_itemid = re.compile('[^0-9A-Za-z+\-_]')
class PostlogisticsWebService(object):
""" Connector with PostLogistics for labels using post.ch Web Services
Handbook available here: http://www.poste.ch/post-barcode-cug.htm
Allows to generate labels
"""
def __init__(self, company):
self.init_connection(company)
def init_connection(self, company):
t = HttpAuthenticated(
username=company.postlogistics_username,
password=company.postlogistics_password)
self.client = Client(
company.postlogistics_wsdl_url,
transport=t)
def _send_request(self, request, **kwargs):
""" Wrapper for API requests
:param request: callback for API request
:param **kwargs: params forwarded to the callback
"""
res = {}
try:
res['value'] = request(**kwargs)
res['success'] = True
except WebFault as e:
res['success'] = False
res['errors'] = [e[0]]
except Exception as e:
# if authentification error
if isinstance(e[0], tuple) and e[0][0] == 401:
raise orm.except_orm(
_('Error 401'),
_('Authorization Required\n\n'
'Please verify postlogistics username and password in:\n'
'Configuration -> Postlogistics'))
raise
return res
def _get_language(self, lang):
""" Return a language to iso format from openerp format.
`iso_code` field in res.lang is not mandatory thus not always set.
Use partner language if available, otherwise use english
:param partner: partner browse record
:return: language code to use.
"""
available_languages = self.client.factory.create('ns0:Language')
lang_code = lang.split('_')[0]
if lang_code in available_languages:
return lang_code
return 'en'
def read_allowed_services_by_franking_license(self, license, company,
lang=None):
""" Get a list of allowed service for a postlogistics licence """
if not lang:
lang = company.partner_id.lang
lang = self._get_language(lang)
request = self.client.service.ReadAllowedServicesByFrankingLicense
return self._send_request(request, FrankingLicense=license, Language=lang)
def read_service_groups(self, company, lang):
""" Get group of services """
if not lang:
lang = company.partner_id.lang
lang = self._get_language(lang)
request = self.client.service.ReadServiceGroups
return self._send_request(request, Language=lang)
def read_basic_services(self, company, service_group_id, lang):
""" Get basic services for a given service group """
if not lang:
lang = company.partner_id.lang
lang = self._get_language(lang)
request = self.client.service.ReadBasicServices
return self._send_request(request, Language=lang,
ServiceGroupID=service_group_id)
def read_additional_services(self, company, service_code, lang):
""" Get additional services compatible with a basic services """
if not lang:
lang = company.partner_id.lang
lang = self._get_language(lang)
request = self.client.service.ReadAdditionalServices
return self._send_request(request, Language=lang, PRZL=service_code)
def read_delivery_instructions(self, company, service_code, lang):
""" Get delivery instruction 'ZAW' compatible with a base service """
if not lang:
lang = company.partner_id.lang
lang = self._get_language(lang)
request = self.client.service.ReadDeliveryInstructions
return self._send_request(request, Language=lang, PRZL=service_code)
def _prepare_recipient(self, picking):
""" Create a ns0:Recipient as a dict from a partner
:param partner: partner browse record
:return a dict containing data for ns0:Recipient
"""
partner = picking.partner_id
recipient = {
'Name1': partner.name,
'Street': partner.street,
'ZIP': partner.zip,
'City': partner.city,
'Country': partner.country_id.code,
'EMail': partner.email or None,
}
if partner.street2:
recipient['AddressSuffix'] = partner.street2
if partner.parent_id:
recipient['Name2'] = partner.parent_id.name
recipient['PersonallyAddressed'] = False
# Phone and / or mobile should only be diplayed if instruction to
# Notify delivery by telephone is set
is_phone_required = [option for option in picking.option_ids
if option.code == 'ZAW3213']
if is_phone_required:
if partner.phone:
recipient['Phone'] = partner.phone
if partner.mobile:
recipient['Mobile'] = partner.mobile
return recipient
def _prepare_customer(self, picking):
""" Create a ns0:Customer as a dict from picking
This is the Postlogistic Customer, thus the sender
:param picking: picking browse record
:return a dict containing data for ns0:Customer
"""
company = picking.company_id
partner = company.partner_id
customer = {
'Name1': partner.name,
'Street': partner.street,
'ZIP': partner.zip,
'City': partner.city,
'Country': partner.country_id.code,
'DomicilePostOffice': company.postlogistics_office,
}
logo_format = None
logo = company.postlogistics_logo
if logo:
logo_image = Image.open(StringIO(logo.decode('base64')))
logo_format = logo_image.format
customer['Logo'] = logo
customer['LogoFormat'] = logo_format
return customer
def _get_single_option(self, picking, option):
option = [opt.code for opt in picking.option_ids
if opt.postlogistics_type == option]
assert len(option) <= 1
return option and option[0]
def _get_label_layout(self, picking):
label_layout = self._get_single_option(picking, 'label_layout')
if not label_layout:
company = picking.company_id
label_layout = company.postlogistics_default_label_layout.code
return label_layout
def _get_output_format(self, picking):
output_format = self._get_single_option(picking, 'output_format')
if not output_format:
company = picking.company_id
output_format = company.postlogistics_default_output_format.code
return output_format
def _get_image_resolution(self, picking):
resolution = self._get_single_option(picking, 'resolution')
if not resolution:
company = picking.company_id
resolution = company.postlogistics_default_resolution.code
return resolution
def _get_license(self, picking):
""" Get the license
Take it from carrier and if not defined get the first license
depending on service group. This needs to have associated
licenses to groups.
:return: license number
"""
license = picking.carrier_id.postlogistics_license_id
if not license:
company_licenses = picking.company_id.postlogistics_license_ids
group = picking.carrier_id.postlogistics_service_group_id
if not company_licenses or not group:
return None
group_license_ids = [l.id for l in group.postlogistics_license_ids]
if not group_license_ids:
return None
license = [l for l in company_licenses
if l.id in group_license_ids][0]
return license.number
def _prepare_attributes(self, picking):
services = [option.code.split(',') for option in picking.option_ids
if option.tmpl_option_id.postlogistics_type
in ('basic', 'additional', 'delivery')]
attributes = {
'PRZL': services,
}
return attributes
def _get_itemid(self, picking, pack_no):
""" Allowed characters are alphanumeric plus `+`, `-` and `_`
Last `+` separates picking name and package number (if any)
:return string: itemid
"""
name = _compile_itemid.sub('', picking.name)
if pack_no:
pack_no = _compile_itemid.sub('', pack_no)
codes = [name, pack_no]
return "+".join(c for c in codes if c)
def _prepare_item_list(self, picking, recipient, attributes, trackings):
""" Return a list of item made from the pickings """
item_list = []
for pack in trackings:
name = pack.name if pack else picking.name
itemid = self._get_itemid(picking, name)
item = {
'ItemID': itemid,
'Recipient': recipient,
'Attributes': attributes,
}
item_list.append(item)
return item_list
def _prepare_data(self, item_list):
sending = {
'Item': item_list,
}
provider = {
'Sending': sending
}
data = {
'Provider': provider,
}
return data
def _prepare_envelope(self, picking, post_customer, data):
""" Define envelope for label request """
label_layout = self._get_label_layout(picking)
output_format = self._get_output_format(picking)
image_resolution = self._get_image_resolution(picking)
label_definitions = {
'LabelLayout': label_layout,
'PrintAddresses': 'RecipientAndCustomer',
'ImageFileType': output_format,
'ImageResolution': image_resolution,
'PrintPreview': False,
}
license = self._get_license(picking)
file_infos = {
'FrankingLicense': license,
'PpFranking': False,
'CustomerSystem': 'OpenERP',
'Customer': post_customer,
}
envelope = {
'LabelDefinition': label_definitions,
'FileInfos': file_infos,
'Data': data,
}
return envelope
def generate_label(self, picking, trackings, user_lang='en_US'):
""" Generate a label for a picking
:param picking: picking browse record
:param user_lang: OpenERP language code
:param trackings: list of browse records of trackings to filter on
:return: {
value: [{item_id: pack id
binary: file returned by API
tracking_number: id number for tracking
file_type: str of file type
}
]
errors: list of error message if any
warnings: list of warning message if any
}
"""
# get options
lang = self._get_language(user_lang)
post_customer = self._prepare_customer(picking)
attributes = self._prepare_attributes(picking)
recipient = self._prepare_recipient(picking)
item_list = self._prepare_item_list(picking, recipient, attributes,
trackings)
data = self._prepare_data(item_list)
envelope = self._prepare_envelope(picking, post_customer, data)
output_format = self._get_output_format(picking).lower()
res = {'value': []}
request = self.client.service.GenerateLabel
response = self._send_request(request, Language=lang,
Envelope=envelope)
if not response['success']:
return response
error_messages = []
warning_messages = []
for item in response['value'].Data.Provider.Sending.Item:
if hasattr(item, 'Errors') and item.Errors:
for error in item.Errors.Error:
message = '[%s] %s' % (error.Code, error.Message)
error_messages.append(message)
else:
file_type = output_format if output_format != 'spdf' else 'pdf'
res['value'].append({
'item_id': item.ItemID,
'binary': item.Label,
'tracking_number': item.IdentCode,
'file_type': file_type,
})
if hasattr(item, 'Warnings') and item.Warnings:
for warning in item.Warnings.Warning:
message = '[%s] %s' % (warning.Code, warning.Message)
warning_messages.append(message)
if error_messages:
res['errors'] = error_messages
if warning_messages:
res['warnings'] = warning_messages
return res
| agpl-3.0 |
shankarathi07/linux_motorola_lollipop | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
cloudbase/nova-virtualbox | nova/tests/unit/network/security_group/test_neutron_driver.py | 1 | 13344 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mox3 import mox
from neutronclient.common import exceptions as n_exc
from neutronclient.v2_0 import client
from nova import context
from nova import exception
from nova.network.neutronv2 import api as neutronapi
from nova.network.security_group import neutron_driver
from nova import test
class TestNeutronDriver(test.NoDBTestCase):
def setUp(self):
super(TestNeutronDriver, self).setUp()
self.mox.StubOutWithMock(neutronapi, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
def test_list_with_project(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
security_groups_list = {'security_groups': []}
self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(self.context, project=project_id)
def test_get_with_name_duplicated(self):
sg_name = 'web_server'
expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
list_security_groups = {'security_groups':
[{'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server',
'rules': []}
]}
self.moxed_client.list_security_groups(name=sg_name, fields='id',
tenant_id=self.context.tenant).AndReturn(list_security_groups)
expected_sg = {'security_group': {'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server', 'rules': []}}
self.moxed_client.show_security_group(expected_sg_id).AndReturn(
expected_sg)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
observed_sg = sg_api.get(self.context, name=sg_name)
expected_sg['security_group']['project_id'] = self.context.tenant
del expected_sg['security_group']['tenant_id']
self.assertEqual(expected_sg['security_group'], observed_sg)
def test_get_with_invalid_name(self):
sg_name = 'invalid_name'
expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
list_security_groups = {'security_groups':
[{'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server',
'rules': []}
]}
self.moxed_client.list_security_groups(name=sg_name, fields='id',
tenant_id=self.context.tenant).AndReturn(list_security_groups)
self.moxed_client.show_security_group(expected_sg_id).AndRaise(
TypeError)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupNotFound,
sg_api.get, self.context, name=sg_name)
def test_create_security_group_exceed_quota(self):
name = 'test-security-group'
description = 'test-security-group'
body = {'security_group': {'name': name,
'description': description}}
message = "Quota exceeded for resources: ['security_group']"
self.moxed_client.create_security_group(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.create_security_group, self.context, name,
description)
def test_create_security_group_rules_exceed_quota(self):
vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'from_port': 1025, 'to_port': 1025}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 1025, 'port_range_min': 1025,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "Quota exceeded for resources: ['security_group_rule']"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.add_rules, self.context, None, name, [vals])
def test_create_security_group_rules_bad_request(self):
vals = {'protocol': 'icmp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'to_port': 255}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'icmp',
'ethertype': 'IPv4', 'port_range_max': 255,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "ICMP code (port-range-max) 255 is provided but ICMP type" \
" (port-range-min) is missing"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=400,
message=message))
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
self.assertRaises(exception.Invalid, sg_api.add_rules,
self.context, None, name, [vals])
def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self):
sg1 = {'description': 'default',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6',
'security_group_rules':
[{'direction': 'ingress',
'ethertype': 'IPv4',
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb',
'port_range_max': None,
'port_range_min': None,
'protocol': '51',
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id':
'07f1362f-34f6-4136-819a-2dcde112269e',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]}
self.moxed_client.list_security_groups().AndReturn(
{'security_groups': [sg1]})
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.list(self.context)
expected = [{'rules':
[{'from_port': -1, 'protocol': '51', 'to_port': -1,
'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e',
'cidr': '0.0.0.0/0', 'group_id': None,
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}],
'project_id': 'c166d9316f814891bcb66b96c4c891d6',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default', 'description': 'default'}]
self.assertEqual(expected, result)
def test_instances_security_group_bindings(self):
server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1'
port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0'
port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44'
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
servers = [{'id': server_id}]
ports = [{'id': port1_id, 'device_id': server_id,
'security_groups': [sg1_id]},
{'id': port2_id, 'device_id': server_id,
'security_groups': [sg2_id]}]
port_list = {'ports': ports}
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]}
self.moxed_client.list_ports(device_id=[server_id]).AndReturn(
port_list)
self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def _test_instances_security_group_bindings_scale(self, num_servers):
max_query = 150
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
servers = []
device_ids = []
ports = []
sg_bindings = {}
for i in xrange(0, num_servers):
server_id = "server-%d" % i
port_id = "port-%d" % i
servers.append({'id': server_id})
device_ids.append(server_id)
ports.append({'id': port_id,
'device_id': server_id,
'security_groups': [sg1_id, sg2_id]})
sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}]
for x in xrange(0, num_servers, max_query):
self.moxed_client.list_ports(
device_id=device_ids[x:x + max_query]).\
AndReturn({'ports': ports[x:x + max_query]})
self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instances_security_group_bindings_less_than_max(self):
self._test_instances_security_group_bindings_scale(100)
def test_instances_security_group_bindings_max(self):
self._test_instances_security_group_bindings_scale(150)
def test_instances_security_group_bindings_more_then_max(self):
self._test_instances_security_group_bindings_scale(300)
def test_instances_security_group_bindings_with_hidden_sg(self):
servers = [{'id': 'server_1'}]
ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']},
{'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}]
port_list = {'ports': ports}
sg1 = {'id': '1', 'name': 'wol'}
# User doesn't have access to sg2
security_groups_list = {'security_groups': [sg1]}
sg_bindings = {'dev_1': [{'name': 'wol'}]}
self.moxed_client.list_ports(device_id=['server_1']).AndReturn(
port_list)
self.moxed_client.list_security_groups(id=['1', '2']).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instance_empty_security_groups(self):
port_list = {'ports': [{'id': 1, 'device_id': '1',
'security_groups': []}]}
self.moxed_client.list_ports(device_id=['1']).AndReturn(port_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instance_security_groups(self.context, '1')
self.assertEqual([], result)
| apache-2.0 |
onitake/ansible | test/units/parsing/test_unquote.py | 298 | 1602 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing.quoting import unquote
import pytest
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
@pytest.mark.parametrize("quoted, expected", UNQUOTE_DATA)
def test_unquote(quoted, expected):
assert unquote(quoted) == expected
| gpl-3.0 |
google/packet-queue | packet_queue/simulation.py | 1 | 3507 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from twisted.internet import reactor
class PipePair(object):
"""Holds two Pipe instances sharing a parameter dictionary and event log."""
def __init__(self, params, event_log):
self.event_log = event_log
self.up = Pipe('up', params, event_log)
self.down = Pipe('down', params, event_log)
class Pipe(object):
"""Takes packets, represented by a callback and a size in bytes, and possibly
invokes the callback later.
Limits bandwidth by holding packets in a "buffer", and rejects packets when
the buffer is full.
Applies constant random packet loss prior to packets joining the buffer, and
constant delay after they are released.
"""
PARAMS = {
'bandwidth': -1, # bytes per second, defaults to infinity
'buffer': -1, # max bytes allowed, defaults to infinity
'delay': 0.0,
'loss': 0.0,
}
def __init__(self, name, params, event_log):
self.name = name
self.params = params
self.events = event_log
self.size = 0
def attempt(self, deliver_callback, drop_callback, size):
"""Possibly invoke a callback representing a packet.
The callback may be invoked later using the Twisted reactor, simulating
network latency, or it may be ignored entirely, simulating packet loss.
"""
attempt_time = time.time()
def deliver():
delivery_time = time.time()
latency = delivery_time - attempt_time
self.events.add(delivery_time, self.name, 'deliver', size)
self.events.add(delivery_time, self.name, 'latency', latency)
deliver_callback()
if self.params['buffer'] > 0 and self.size + size > self.params['buffer']:
self.events.add(attempt_time, self.name, 'drop', size)
drop_callback()
return
if random.random() < self.params['loss']:
self.events.add(attempt_time, self.name, 'drop', size)
drop_callback()
return
self.size += size
self.events.add(attempt_time, self.name, 'buffer', self.size)
def release_buffer():
release_time = time.time()
self.size -= size
self.events.add(release_time, self.name, 'buffer', self.size)
# Delay has two components: throttled (proportional to size) and constant.
#
# Throttle delay is calculated by estimating the time it will take all of
# the enqueued packets to be released, including the current one. Release
# the current packet (subtract its size) after this period of time.
#
# After the packet is released, there is an additional period of constant
# delay, so schedule a second event to finally call the packet's callback.
throttle_delay = 0
if self.params['bandwidth'] > 0:
throttle_delay = float(self.size) / self.params['bandwidth']
constant_delay = self.params['delay']
reactor.callLater(throttle_delay, release_buffer)
reactor.callLater(throttle_delay + constant_delay, deliver)
| apache-2.0 |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/django/core/cache/backends/filebased.py | 114 | 4904 | "File-based cache backend"
import hashlib
import os
import shutil
import time
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache, DEFAULT_TIMEOUT
from django.utils.encoding import force_bytes
class FileBasedCache(BaseCache):
def __init__(self, dir, params):
BaseCache.__init__(self, params)
self._dir = dir
if not os.path.exists(self._dir):
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version=version):
return False
self.set(key, value, timeout, version=version)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
try:
with open(fname, 'rb') as f:
exp = pickle.load(f)
now = time.time()
if exp is not None and exp < now:
self._delete(fname)
else:
return pickle.load(f)
except (IOError, OSError, EOFError, pickle.PickleError):
pass
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
dirname = os.path.dirname(fname)
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
self._cull()
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(fname, 'wb') as f:
expiry = None if timeout is None else time.time() + timeout
pickle.dump(expiry, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
except (IOError, OSError):
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
try:
self._delete(self._key_to_file(key))
except (IOError, OSError):
pass
def _delete(self, fname):
os.remove(fname)
try:
# Remove the 2 subdirs if they're empty
dirname = os.path.dirname(fname)
os.rmdir(dirname)
os.rmdir(os.path.dirname(dirname))
except (IOError, OSError):
pass
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
fname = self._key_to_file(key)
try:
with open(fname, 'rb') as f:
exp = pickle.load(f)
now = time.time()
if exp < now:
self._delete(fname)
return False
else:
return True
except (IOError, OSError, EOFError, pickle.PickleError):
return False
def _cull(self):
if int(self._num_entries) < self._max_entries:
return
try:
filelist = sorted(os.listdir(self._dir))
except (IOError, OSError):
return
if self._cull_frequency == 0:
doomed = filelist
else:
doomed = [os.path.join(self._dir, k) for (i, k) in enumerate(filelist) if i % self._cull_frequency == 0]
for topdir in doomed:
try:
for root, _, files in os.walk(topdir):
for f in files:
self._delete(os.path.join(root, f))
except (IOError, OSError):
pass
def _createdir(self):
try:
os.makedirs(self._dir)
except OSError:
raise EnvironmentError("Cache directory '%s' does not exist and could not be created'" % self._dir)
def _key_to_file(self, key):
"""
Convert the filename into an md5 string. We'll turn the first couple
bits of the path into directory prefixes to be nice to filesystems
that have problems with large numbers of files in a directory.
Thus, a cache key of "foo" gets turnned into a file named
``{cache-dir}ac/bd/18db4cc2f85cedef654fccc4a4d8``.
"""
path = hashlib.md5(force_bytes(key)).hexdigest()
path = os.path.join(path[:2], path[2:4], path[4:])
return os.path.join(self._dir, path)
def _get_num_entries(self):
count = 0
for _,_,files in os.walk(self._dir):
count += len(files)
return count
_num_entries = property(_get_num_entries)
def clear(self):
try:
shutil.rmtree(self._dir)
except (IOError, OSError):
pass
# For backwards compatibility
class CacheClass(FileBasedCache):
pass
| bsd-3-clause |
feroda/gasistafelice | gasistafelice/lib/shortcuts.py | 6 | 1242 |
# Copyright (C) 2008 Laboratori Guglielmo Marconi S.p.A. <http://www.labs.it>
#
# This file is part of SANET
# SANET is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# SANET is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with SANET. If not, see <http://www.gnu.org/licenses/>.
from django.template import loader
from django.shortcuts import *
from django.template import RequestContext
from http import SVGHttpResponse, XMLHttpResponse
def render_to_svg_response(*args, **kwargs):
return SVGHttpResponse(loader.render_to_string(*args, **kwargs))
def render_to_xml_response(*args, **kwargs):
return XMLHttpResponse(loader.render_to_string(*args, **kwargs))
def render_to_context_response(request, template, context={}):
return render_to_response(template, context, context_instance=RequestContext(request))
| agpl-3.0 |
p0cisk/Quantum-GIS | python/ext-libs/pygments/lexers/trafficscript.py | 23 | 1546 | # -*- coding: utf-8 -*-
"""
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
class RtsLexer(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['rts','trafficscript']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
}
| gpl-2.0 |
sestrella/ansible | lib/ansible/modules/network/check_point/cp_mgmt_application_site_category.py | 20 | 5115 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_application_site_category
short_description: Manages application-site-category objects on Check Point over Web Services API
description:
- Manages application-site-category objects on Check Point devices including creating, updating and removing objects.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
type: str
required: True
description:
description:
- N/A
type: str
tags:
description:
- Collection of tag identifiers.
type: list
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
groups:
description:
- Collection of group identifiers.
type: list
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: checkpoint_objects
"""
EXAMPLES = """
- name: add-application-site-category
cp_mgmt_application_site_category:
description: My Application Site category
name: New Application Site Category 1
state: present
- name: set-application-site-category
cp_mgmt_application_site_category:
description: My new Application Site category
name: New Application Site Category 1
state: present
- name: delete-application-site-category
cp_mgmt_application_site_category:
name: New Application Site Category 2
state: absent
"""
RETURN = """
cp_mgmt_application_site_category:
description: The checkpoint object created or updated.
returned: always, except when deleting the object.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_objects, api_call
def main():
argument_spec = dict(
name=dict(type='str', required=True),
description=dict(type='str'),
tags=dict(type='list'),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green',
'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral', 'sea green',
'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna',
'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
groups=dict(type='list'),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool')
)
argument_spec.update(checkpoint_argument_spec_for_objects)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
api_call_object = 'application-site-category'
result = api_call(module, api_call_object)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
DinoCow/airflow | airflow/migrations/versions/fe461863935f_increase_length_for_connection_password.py | 10 | 1787 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""increase_length_for_connection_password
Revision ID: fe461863935f
Revises: 08364691d074
Create Date: 2019-12-08 09:47:09.033009
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'fe461863935f'
down_revision = '08364691d074'
branch_labels = None
depends_on = None
def upgrade():
"""Apply increase_length_for_connection_password"""
with op.batch_alter_table('connection', schema=None) as batch_op:
batch_op.alter_column(
'password',
existing_type=sa.VARCHAR(length=500),
type_=sa.String(length=5000),
existing_nullable=True,
)
def downgrade():
"""Unapply increase_length_for_connection_password"""
with op.batch_alter_table('connection', schema=None) as batch_op:
batch_op.alter_column(
'password',
existing_type=sa.String(length=5000),
type_=sa.VARCHAR(length=500),
existing_nullable=True,
)
| apache-2.0 |
guijomatos/SickRage | sickbeard/autoPostProcesser.py | 18 | 2060 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os.path
import threading
import sickbeard
from sickbeard import logger
from sickbeard import processTV
from sickrage.helper.encoding import ek
class PostProcesser():
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False):
"""
TODO: Rename class to PostProcessor (classname contains a typo)
Runs the postprocessor
:param force: Forces postprocessing run (reserved for future use)
:return: Returns when done without a return state/code
"""
self.amActive = True
if not ek(os.path.isdir, sickbeard.TV_DOWNLOAD_DIR):
logger.log(u"Automatic post-processing attempted but dir " + sickbeard.TV_DOWNLOAD_DIR + " doesn't exist",
logger.ERROR)
self.amActive = False
return
if not ek(os.path.isabs, sickbeard.TV_DOWNLOAD_DIR):
logger.log(
u"Automatic post-processing attempted but dir " + sickbeard.TV_DOWNLOAD_DIR + " is relative (and probably not what you really want to process)",
logger.ERROR)
self.amActive = False
return
processTV.processDir(sickbeard.TV_DOWNLOAD_DIR)
self.amActive = False
def __del__(self):
pass
| gpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/lib/googlecloudsdk/api_lib/app/runtimes/python.py | 3 | 1232 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fingerprinting code for the Python runtime."""
import textwrap
DOCKERFILE_PREAMBLE = 'FROM gcr.io/google_appengine/python\n'
DOCKERFILE_VIRTUALENV_TEMPLATE = textwrap.dedent("""\
LABEL python_version=python{python_version}
RUN virtualenv --no-download /env -p python{python_version}
# Set virtualenv environment variables. This is equivalent to running
# source /env/bin/activate
ENV VIRTUAL_ENV /env
ENV PATH /env/bin:$PATH
""")
DOCKERFILE_REQUIREMENTS_TXT = textwrap.dedent("""\
ADD requirements.txt /app/
RUN pip install -r requirements.txt
""")
DOCKERFILE_INSTALL_APP = 'ADD . /app/\n'
| apache-2.0 |
rosslagerwall/xen | tools/libxl/idl.py | 32 | 12682 | import sys
PASS_BY_VALUE = 1
PASS_BY_REFERENCE = 2
DIR_NONE = 0
DIR_IN = 1
DIR_OUT = 2
DIR_BOTH = 3
_default_namespace = ""
def namespace(s):
if type(s) != str:
raise TypeError, "Require a string for the default namespace."
global _default_namespace
_default_namespace = s
def _get_default_namespace():
global _default_namespace
return _default_namespace
_default_hidden = False
def hidden(b):
global _default_hidden
_default_hidden = b
def _get_default_hidden():
global _default_hidden
return _default_hidden
class Type(object):
def __init__(self, typename, **kwargs):
self.namespace = kwargs.setdefault('namespace',
_get_default_namespace())
self._hidden = kwargs.setdefault('hidden', _get_default_hidden())
self.dir = kwargs.setdefault('dir', DIR_BOTH)
if self.dir not in [DIR_NONE, DIR_IN, DIR_OUT, DIR_BOTH]:
raise ValueError
self.passby = kwargs.setdefault('passby', PASS_BY_VALUE)
if self.passby not in [PASS_BY_VALUE, PASS_BY_REFERENCE]:
raise ValueError
self.private = kwargs.setdefault('private', False)
if typename is None: # Anonymous type
self.typename = None
self.rawname = None
elif self.namespace is None: # e.g. system provided types
self.typename = typename
self.rawname = typename
else:
self.typename = self.namespace + typename
self.rawname = typename
if self.typename is not None:
self.dispose_fn = kwargs.setdefault('dispose_fn', self.typename + "_dispose")
else:
self.dispose_fn = kwargs.setdefault('dispose_fn', None)
self.autogenerate_dispose_fn = kwargs.setdefault('autogenerate_dispose_fn', True)
if self.typename is not None:
self.copy_fn = kwargs.setdefault('copy_fn', self.typename + "_copy")
else:
self.copy_fn = kwargs.setdefault('copy_fn', None)
self.autogenerate_copy_fn = kwargs.setdefault('autogenerate_copy_fn', True)
self.init_fn = kwargs.setdefault('init_fn', None)
self.init_val = kwargs.setdefault('init_val', None)
self.autogenerate_init_fn = kwargs.setdefault('autogenerate_init_fn', False)
self.check_default_fn = kwargs.setdefault('check_default_fn', None)
if self.typename is not None and not self.private:
self.json_gen_fn = kwargs.setdefault('json_gen_fn', self.typename + "_gen_json")
self.json_parse_type = kwargs.setdefault('json_parse_type', "JSON_ANY")
if self.namespace is not None:
self.json_parse_fn = kwargs.setdefault('json_parse_fn',
self.namespace + "_" + self.rawname + "_parse_json")
else:
self.json_parse_fn = kwargs.setdefault('json_parse_fn',
self.typename + "_parse_json")
else:
self.json_gen_fn = kwargs.setdefault('json_gen_fn', None)
self.json_parse_type = kwargs.setdefault('json_parse_type', None)
self.json_parse_fn = kwargs.setdefault('json_parse_fn', None)
self.autogenerate_json = kwargs.setdefault('autogenerate_json', True)
def marshal_in(self):
return self.dir in [DIR_IN, DIR_BOTH]
def marshal_out(self):
return self.dir in [DIR_OUT, DIR_BOTH]
def hidden(self):
if self._hidden:
return "_hidden "
else:
return ""
def make_arg(self, n, passby=None):
if passby is None: passby = self.passby
if passby == PASS_BY_REFERENCE:
return "%s *%s" % (self.typename, n)
else:
return "%s %s" % (self.typename, n)
def pass_arg(self, n, isref=None, passby=None):
if passby is None: passby = self.passby
if isref is None: isref = self.passby == PASS_BY_REFERENCE
if passby == PASS_BY_REFERENCE:
if isref:
return "%s" % (n)
else:
return "&%s" % (n)
else:
if isref:
return "*%s" % (n)
else:
return "%s" % (n)
class Builtin(Type):
"""Builtin type"""
def __init__(self, typename, **kwargs):
kwargs.setdefault('dispose_fn', None)
kwargs.setdefault('autogenerate_dispose_fn', False)
kwargs.setdefault('autogenerate_json', False)
Type.__init__(self, typename, **kwargs)
class Number(Builtin):
def __init__(self, ctype, **kwargs):
kwargs.setdefault('namespace', None)
kwargs.setdefault('dispose_fn', None)
kwargs.setdefault('copy_fn', None)
kwargs.setdefault('signed', False)
kwargs.setdefault('json_gen_fn', "yajl_gen_integer")
kwargs.setdefault('json_parse_type', "JSON_INTEGER")
# json_parse_fn might be overriden on specific type
kwargs.setdefault('json_parse_fn', "libxl__int_parse_json")
self.signed = kwargs['signed']
Builtin.__init__(self, ctype, **kwargs)
class UInt(Number):
def __init__(self, w, **kwargs):
kwargs.setdefault('namespace', None)
kwargs.setdefault('dispose_fn', None)
kwargs.setdefault('json_parse_fn', "libxl__uint%d_parse_json" % w)
kwargs.setdefault('copy_fn', None)
Number.__init__(self, "uint%d_t" % w, **kwargs)
self.width = w
class EnumerationValue(object):
def __init__(self, enum, value, name, **kwargs):
self.enum = enum
self.valuename = str.upper(name)
self.rawname = str.upper(enum.rawname) + "_" + self.valuename
self.name = str.upper(enum.value_namespace) + self.rawname
self.value = value
class Enumeration(Type):
def __init__(self, typename, values, **kwargs):
kwargs.setdefault('dispose_fn', None)
kwargs.setdefault('copy_fn', None)
kwargs.setdefault('json_parse_type', "JSON_STRING")
Type.__init__(self, typename, **kwargs)
self.value_namespace = kwargs.setdefault('value_namespace',
self.namespace)
self.values = []
for v in values:
# (value, name)
(num,name) = v
self.values.append(EnumerationValue(self, num, name,
typename=self.rawname))
def lookup(self, name):
for v in self.values:
if v.valuename == str.upper(name):
return v
return ValueError
class Field(object):
"""An element of an Aggregate type"""
def __init__(self, type, name, **kwargs):
self.type = type
self.name = name
self.const = kwargs.setdefault('const', False)
self.enumname = kwargs.setdefault('enumname', None)
self.init_val = kwargs.setdefault('init_val', None)
class Aggregate(Type):
"""A type containing a collection of other types"""
def __init__(self, kind, typename, fields, **kwargs):
kwargs.setdefault('json_parse_type', "JSON_MAP")
Type.__init__(self, typename, **kwargs)
if self.typename is not None:
self.init_fn = kwargs.setdefault('init_fn', self.typename + "_init")
else:
self.init_fn = kwargs.setdefault('init_fn', None)
self.autogenerate_init_fn = kwargs.setdefault('autogenerate_init_fn', True)
self.kind = kind
self.fields = []
for f in fields:
# (name, type[, {kw args}])
if len(f) == 2:
n,t = f
kw = {}
elif len(f) == 3:
n,t,kw = f
else:
raise ValueError
if n is None:
raise ValueError
self.fields.append(Field(t,n,**kw))
# Returns a tuple (stem, field-expr)
#
# field-expr is a C expression for a field "f" within the struct
# "v".
#
# stem is the stem common to both "f" and any other sibbling field
# within the "v".
def member(self, v, f, isref):
if isref:
deref = v + "->"
else:
deref = v + "."
if f.name is None: # Anonymous
return (deref, deref)
else:
return (deref, deref + f.name)
class Struct(Aggregate):
def __init__(self, name, fields, **kwargs):
kwargs.setdefault('passby', PASS_BY_REFERENCE)
Aggregate.__init__(self, "struct", name, fields, **kwargs)
def has_fields(self):
return len(self.fields) != 0
class Union(Aggregate):
def __init__(self, name, fields, **kwargs):
# Generally speaking some intelligence is required to free a
# union therefore any specific instance of this class will
# need to provide an explicit destructor function.
kwargs.setdefault('passby', PASS_BY_REFERENCE)
kwargs.setdefault('dispose_fn', None)
Aggregate.__init__(self, "union", name, fields, **kwargs)
class KeyedUnion(Aggregate):
"""A union which is keyed of another variable in the parent structure"""
def __init__(self, name, keyvar_type, keyvar_name, fields, **kwargs):
Aggregate.__init__(self, "union", name, [], **kwargs)
if not isinstance(keyvar_type, Enumeration):
raise ValueError
kv_kwargs = dict([(x.lstrip('keyvar_'),y) for (x,y) in kwargs.items() if x.startswith('keyvar_')])
self.keyvar = Field(keyvar_type, keyvar_name, **kv_kwargs)
for f in fields:
# (name, enum, type)
e, ty = f
ev = keyvar_type.lookup(e)
en = ev.name
self.fields.append(Field(ty, e, enumname=en))
#
# Standard Types
#
void = Builtin("void *", namespace = None)
bool = Builtin("bool", namespace = None,
copy_fn=None,
json_gen_fn = "yajl_gen_bool",
json_parse_type = "JSON_BOOL",
json_parse_fn = "libxl__bool_parse_json",
autogenerate_json = False)
size_t = Number("size_t", namespace = None)
integer = Number("int", namespace = None, signed = True)
uint8 = UInt(8)
uint16 = UInt(16)
uint32 = UInt(32)
uint64 = UInt(64, json_gen_fn = "libxl__uint64_gen_json")
string = Builtin("char *", namespace = None, copy_fn = "libxl_string_copy", dispose_fn = "free",
json_gen_fn = "libxl__string_gen_json",
json_parse_type = "JSON_STRING | JSON_NULL",
json_parse_fn = "libxl__string_parse_json",
autogenerate_json = False)
class Array(Type):
"""An array of the same type"""
def __init__(self, elem_type, lenvar_name, **kwargs):
kwargs.setdefault('dispose_fn', 'free')
kwargs.setdefault('json_parse_type', 'JSON_ARRAY')
Type.__init__(self, namespace=elem_type.namespace, typename=elem_type.rawname + " *", **kwargs)
lv_kwargs = dict([(x.lstrip('lenvar_'),y) for (x,y) in kwargs.items() if x.startswith('lenvar_')])
self.lenvar = Field(integer, lenvar_name, **lv_kwargs)
self.elem_type = elem_type
class OrderedDict(dict):
"""A dictionary which remembers insertion order.
push to back on duplicate insertion"""
def __init__(self):
dict.__init__(self)
self.__ordered = []
def __setitem__(self, key, value):
try:
self.__ordered.remove(key)
except ValueError:
pass
self.__ordered.append(key)
dict.__setitem__(self, key, value)
def ordered_keys(self):
return self.__ordered
def ordered_values(self):
return [self[x] for x in self.__ordered]
def ordered_items(self):
return [(x,self[x]) for x in self.__ordered]
def parse(f):
print >>sys.stderr, "Parsing %s" % f
globs = {}
locs = OrderedDict()
for n,t in globals().items():
if isinstance(t, Type):
globs[n] = t
elif isinstance(t,type(object)) and issubclass(t, Type):
globs[n] = t
elif n in ['PASS_BY_REFERENCE', 'PASS_BY_VALUE',
'DIR_NONE', 'DIR_IN', 'DIR_OUT', 'DIR_BOTH',
'namespace', 'hidden']:
globs[n] = t
try:
execfile(f, globs, locs)
except SyntaxError,e:
raise SyntaxError, \
"Errors were found at line %d while processing %s:\n\t%s"\
%(e.lineno,f,e.text)
types = [t for t in locs.ordered_values() if isinstance(t,Type)]
builtins = [t for t in types if isinstance(t,Builtin)]
types = [t for t in types if not isinstance(t,Builtin)]
return (builtins,types)
| gpl-2.0 |
Simage/shinken | test/test_reactionner_tag_get_notif.py | 18 | 6785 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestReactionnerTagGetNotifs(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_reactionner_tag_get_notif.cfg')
# For a service, we generate a notification and a event handler.
# Each one got a specific reactionner_tag that we will look for.
def test_good_checks_get_only_tags_with_specific_tags(self):
now = int(time.time())
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'BAD | value1=0 value2=0']])
print "Go bad now"
self.scheduler_loop(2, [[svc, 2, 'BAD | value1=0 value2=0']])
to_del = []
for a in self.sched.actions.values():
print "\n\nA?", a, "\nZZZ%sZZZ" % a.command
# Set them go NOW
a.t_to_go = now
# In fact they are already launched, so we-reenabled them :)
print "AHAH?", a.status, a.__class__.my_type
if a.__class__.my_type == 'notification' and (a.status == 'zombie' or a.status == ' scheduled'):
to_del.append(a.id)
a.status = 'scheduled'
# And look for good tagging
if a.command.startswith('plugins/notifier.pl'):
print 'TAG:%s' % a.reactionner_tag
self.assertEqual('runonwindows', a.reactionner_tag)
if a.command.startswith('plugins/sms.pl'):
print 'TAG:%s' % a.reactionner_tag
self.assertEqual('sms', a.reactionner_tag)
if a.command.startswith('plugins/test_eventhandler.pl'):
print 'TAG: %s' % a.reactionner_tag
self.assertEqual('eventtag', a.reactionner_tag)
print "\n\n"
for _i in to_del:
print "DELETING", self.sched.actions[_i]
del self.sched.actions[_i]
print "NOW ACTION!"*20,'\n\n'
# Ok the tags are defined as it should, now try to get them as a reactionner :)
# Now get only tag ones
taggued_runonwindows_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['runonwindows'])
self.assertGreater(len(taggued_runonwindows_checks), 0)
for c in taggued_runonwindows_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/notifier.pl'))
# Ok the tags are defined as it should, now try to get them as a reactionner :)
# Now get only tag ones
taggued_sms_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['sms'])
self.assertGreater(len(taggued_sms_checks), 0)
for c in taggued_sms_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/sms.pl'))
taggued_eventtag_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['eventtag'])
self.assertGreater(len(taggued_eventtag_checks), 0)
for c in taggued_eventtag_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/test_eventhandler.pl'))
# Same that upper, but with modules types
def test_good_checks_get_only_tags_with_specific_tags_andmodule_types(self):
now = int(time.time())
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 0, 'BAD | value1=0 value2=0']])
print "Go bad now"
self.scheduler_loop(2, [[svc, 2, 'BAD | value1=0 value2=0']])
for a in self.sched.actions.values():
# Set them go NOW
a.t_to_go = now
# In fact they are already launched, so we-reenabled them :)
a.status = 'scheduled'
# And look for good tagging
if a.command.startswith('plugins/notifier.pl'):
print a.__dict__
print a.reactionner_tag
self.assertEqual('runonwindows', a.reactionner_tag)
if a.command.startswith('plugins/test_eventhandler.pl'):
print a.__dict__
print a.reactionner_tag
self.assertEqual('eventtag', a.reactionner_tag)
# Ok the tags are defined as it should, now try to get them as a reactionner :)
# Now get only tag ones
taggued_runonwindows_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['runonwindows'], module_types=['fork'])
self.assertGreater(len(taggued_runonwindows_checks), 0)
for c in taggued_runonwindows_checks:
# Should be the host one only
self.assertTrue(c.command.startswith('plugins/notifier.pl'))
taggued_eventtag_checks = self.sched.get_to_run_checks(False, True, reactionner_tags=['eventtag'], module_types=['myassischicken'])
self.assertEqual(0, len(taggued_eventtag_checks))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
liangazhou/django-rdp | packages/eclipse/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/_pyio.py | 28 | 68854 | """
Python implementation of the io module.
"""
from __future__ import (print_function, unicode_literals)
import os
import abc
import codecs
import warnings
import errno
# Import thread instead of threading to reduce startup cost
try:
from thread import allocate_lock as Lock
except ImportError:
from dummy_thread import allocate_lock as Lock
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
from errno import EINTR
__metaclass__ = type
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't
# want to inherit the C implementations.
class BlockingIOError(IOError):
"""Exception raised when I/O would block on a non-blocking I/O stream."""
def __init__(self, errno, strerror, characters_written=0):
super(IOError, self).__init__(errno, strerror)
if not isinstance(characters_written, (int, long)):
raise TypeError("characters_written must be a integer")
self.characters_written = characters_written
def open(file, mode="r", buffering=-1,
encoding=None, errors=None,
newline=None, closefd=True):
r"""Open file and return a stream. Raise IOError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), and 'a' for appending (which on some Unix systems,
means that all writes append to the end of the file regardless of the
current seek position). In text mode, if encoding is not specified the
encoding used is platform dependent. (For reading and writing raw
bytes use binary mode and leave encoding unspecified.) The available
modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (for backwards compatibility; unneeded
for new code)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, (basestring, int, long)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, basestring):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, (int, long)):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, basestring):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, basestring):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (os.error, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return raw
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
if binary:
return buffer
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
text.mode = mode
return text
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pythonrun.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
class UnsupportedOperation(ValueError, IOError):
pass
class IOBase:
__metaclass__ = abc.ABCMeta
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise a IOError when operations they do not support are called.
The basic type used for binary data read from or written to a file is
bytes. bytearrays are accepted too, and in some cases (such as
readinto) needed. Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise IOError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise an IOError if file is not seekable
"""
if not self.seekable():
raise IOError("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise an IOError if file is not readable
"""
if not self.readable():
raise IOError("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise an IOError if file is not writable
"""
if not self.writable():
raise IOError("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise an ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self):
"""Context management protocol. Returns self."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, limit=-1):
r"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if limit >= 0:
n = min(n, limit)
return n
else:
def nreadahead():
return 1
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
res = bytearray()
while limit < 0 or len(res) < limit:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is not None and not isinstance(hint, (int, long)):
raise TypeError("integer or None expected")
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, n=-1):
"""Read and return up to n bytes.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if n is None:
n = -1
if n < 0:
return self.readall()
b = bytearray(n.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than len(b).
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, n=None):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, n=None):
"""Read up to n bytes with at most one read() system call."""
self._unsupported("read1")
def readinto(self, b):
"""Read up to len(b) bytes into b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
# XXX This ought to work with anything that supports the buffer API
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array(b'b', data)
return n
def write(self, b):
"""Write the given buffer to the IO stream.
Return the number of bytes written, which is never less than
len(b).
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise IOError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise IOError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
def readable(self):
return self.raw.readable()
def writable(self):
return self.raw.writable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __repr__(self):
clsname = self.__class__.__name__
try:
name = self.name
except AttributeError:
return "<_pyio.{0}>".format(clsname)
else:
return "<_pyio.{0} name={1!r}>".format(clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf.extend(initial_bytes)
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def read(self, n=None):
if self.closed:
raise ValueError("read from closed file")
if n is None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("integer argument expected, got {0!r}".format(
type(n)))
if n < 0:
n = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + n)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, n):
"""This is the same as read.
"""
return self.read(n)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("invalid whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError:
raise TypeError("an integer is required")
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise IOError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, n=None):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If n is negative, read until EOF or until read() would
block.
"""
if n is not None and n < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(n)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
try:
chunk = self.raw.read()
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
try:
chunk = self.raw.read(wanted)
except IOError as e:
if e.errno != EINTR:
raise
continue
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more then avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, n=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(n)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
while True:
try:
current = self.raw.read(to_read)
except IOError as e:
if e.errno != EINTR:
raise
continue
break
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, n):
"""Reads up to n bytes, with at most one read() system call."""
# Returns up to n bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if n < 0:
raise ValueError("number of bytes to read must be positive")
if n == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(n, len(self._read_buf) - self._read_pos))
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 2
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
if not raw.writable():
raise IOError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
self._warning_stack_offset)
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, unicode):
raise TypeError("can't write unicode to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
except IOError as e:
if e.errno != EINTR:
raise
continue
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise IOError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
"""Constructor.
The arguments are two RawIO instances.
"""
if max_buffer_size is not None:
warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
if not reader.readable():
raise IOError('"reader" argument must be readable.')
if not writer.writable():
raise IOError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, n=None):
if n is None:
n = -1
return self.reader.read(n)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, n=0):
return self.reader.peek(n)
def read1(self, n):
return self.reader.read1(n)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
self.writer.close()
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
_warning_stack_offset = 3
def __init__(self, raw,
buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
def seek(self, pos, whence=0):
if not (0 <= whence <= 2):
raise ValueError("invalid whence")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise IOError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, n=None):
if n is None:
n = -1
self.flush()
return BufferedReader.read(self, n)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, n=0):
self.flush()
return BufferedReader.peek(self, n)
def read1(self, n):
self.flush()
return BufferedReader.read1(self, n)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, n=-1):
"""Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding.
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False):
if newline is not None and not isinstance(newline, basestring):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding()
if not isinstance(encoding, basestring):
raise ValueError("invalid encoding: %r" % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, basestring):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
try:
name = self.name
except AttributeError:
return "<_pyio.TextIOWrapper encoding='{0}'>".format(self.encoding)
else:
return "<_pyio.TextIOWrapper name={0!r} encoding='{1}'>".format(
name, self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, unicode):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
eof = not input_chunk
self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise IOError("underlying stream is not seekable")
if not self._telling:
raise IOError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Note our initial start point.
decoder.setstate((b'', dec_flags))
start_pos = position
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
need_eof = 0
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
for next_byte in next_input:
bytes_fed += 1
chars_decoded += len(decoder.decode(next_byte))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise IOError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise IOError("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise IOError("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise IOError("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return position
if whence != 0:
raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
(whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise IOError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
# Finally, reset the encoder (merely useful for proper BOM handling)
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if cookie != 0:
encoder.setstate(0)
else:
encoder.reset()
return cookie
def read(self, n=None):
self._checkReadable()
if n is None:
n = -1
decoder = self._decoder or self._get_decoder()
try:
n.__index__
except AttributeError:
raise TypeError("an integer is required")
if n < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have n characters to return.
eof = False
result = self._get_decoded_chars(n)
while len(result) < n and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(n - len(result))
return result
def next(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, limit=None):
if self.closed:
raise ValueError("read from closed file")
if limit is None:
limit = -1
elif not isinstance(limit, (int, long)):
raise TypeError("limit must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if limit >= 0 and len(line) >= limit:
endpos = limit # reached length limit
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if limit >= 0 and endpos > limit:
endpos = limit # don't exceed limit
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="strict",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value:
if not isinstance(initial_value, unicode):
initial_value = unicode(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's a implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| apache-2.0 |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/scons-2.0.1/engine/SCons/Tool/SCCS.py | 61 | 2381 | """SCons.Tool.SCCS.py
Tool-specific initialization for SCCS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/SCCS.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
SCCS to an Environment."""
def SCCSFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The SCCS() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR')
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'SCCS', SCCSFactory)
env.SCCS = SCCSFactory
env['SCCS'] = 'sccs'
env['SCCSFLAGS'] = SCons.Util.CLVar('')
env['SCCSGETFLAGS'] = SCons.Util.CLVar('')
env['SCCSCOM'] = '$SCCS $SCCSFLAGS get $SCCSGETFLAGS $TARGET'
def exists(env):
return env.Detect('sccs')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
mmbtba/odoo | addons/document/wizard/__init__.py | 444 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_configuration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
reinout/ansible | lib/ansible/plugins/strategy/free.py | 47 | 9177 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import time
from ansible.errors import *
from ansible.playbook.included_file import IncludedFile
from ansible.plugins.strategy import StrategyBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def run(self, iterator, play_context):
'''
The "free" strategy is a bit more complex, in that it allows tasks to
be sent to hosts as quickly as they can be processed. This means that
some hosts may finish very quickly if run tasks result in little or no
work being done versus other systems.
The algorithm used here also tries to be more "fair" when iterating
through hosts by remembering the last host in the list to be given a task
and starting the search from there as opposed to the top of the hosts
list again, which would end up favoring hosts near the beginning of the
list.
'''
# the last host to be given a task
last_host = 0
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
if len(hosts_left) == 0:
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
work_to_do = False # assume we have no more work to do
starting_host = last_host # save current position so we know when we've
# looped back around and need to break
# try and find an unblocked host with a task to run
host_results = []
while True:
host = hosts_left[last_host]
self._display.debug("next free host: %s" % host)
host_name = host.get_name()
# peek at the next task for the host, to see if there's
# anything to do do for this host
(state, task) = iterator.get_next_task_for_host(host, peek=True)
self._display.debug("free host state: %s" % state)
self._display.debug("free host task: %s" % task)
if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
# set the flag so the outer loop knows we've still found
# some work which needs to be done
work_to_do = True
self._display.debug("this host has work to do")
# check to see if this host is blocked (still executing a previous task)
if not host_name in self._blocked_hosts or not self._blocked_hosts[host_name]:
# pop the task, mark the host blocked, and queue it
self._blocked_hosts[host_name] = True
(state, task) = iterator.get_next_task_for_host(host)
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
self._display.debug("done getting variables")
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if not task.evaluate_tags(play_context.only_tags, play_context.skip_tags, task_vars) and task.action != 'setup':
self._display.debug("'%s' failed tag evaluation" % task)
continue
if task.action == 'meta':
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
continue
elif meta_action == 'flush_handlers':
# FIXME: in the 'free' mode, flushing handlers should result in
# only those handlers notified for the host doing the flush
self.run_handlers(iterator, play_context)
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
self._blocked_hosts[host_name] = False
else:
# handle step if needed, skip meta actions as they are used internally
if not self._step or self._take_step(task, host_name):
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
self._queue_task(host, task, task_vars, play_context)
# move on to the next host and make sure we
# haven't gone past the end of our hosts list
last_host += 1
if last_host > len(hosts_left) - 1:
last_host = 0
# if we've looped around back to the start, break out
if last_host == starting_host:
break
results = self._process_pending_results(iterator)
host_results.extend(results)
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError as e:
return False
if len(included_files) > 0:
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_blocks = []
for new_block in new_blocks:
final_blocks.append(new_block.filter_tagged_tasks(play_context, task_vars))
iterator.add_tasks(host, final_blocks)
# pause briefly so we don't spin lock
time.sleep(0.05)
try:
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
except Exception as e:
# FIXME: ctrl+c can cause some failures here, so catch them
# with the appropriate error type
pass
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 |
sandeepgupta2k4/tensorflow | tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py | 19 | 5617 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_triangular_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import test
class MatrixTriangularSolveOpTest(test.TestCase):
def _verifySolveAllWays(self, x, y, batch_dims=None):
for lower in True, False:
for adjoint in True, False:
for use_placeholder in True, False:
self._verifySolve(
x,
y,
lower=lower,
adjoint=adjoint,
batch_dims=batch_dims,
use_placeholder=use_placeholder)
def _verifySolve(self,
x,
y,
lower=True,
adjoint=False,
batch_dims=None,
use_placeholder=False):
for np_type in [np.float32, np.float64]:
a = x.astype(np_type)
b = y.astype(np_type)
# For numpy.solve we have to explicitly zero out the strictly
# upper or lower triangle.
if lower and a.size > 0:
a_np = np.tril(a)
elif a.size > 0:
a_np = np.triu(a)
else:
a_np = a
if adjoint:
a_np = np.conj(np.transpose(a_np))
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
a_tf = array_ops.placeholder(a.dtype)
b_tf = array_ops.placeholder(b.dtype)
tf_ans = linalg_ops.matrix_triangular_solve(
a_tf, b_tf, lower=lower, adjoint=adjoint)
tf_val = sess.run(tf_ans, feed_dict={a_tf: a, b_tf: b})
np_ans = np.linalg.solve(a_np, b)
else:
a_tf = constant_op.constant(a)
b_tf = constant_op.constant(b)
tf_ans = linalg_ops.matrix_triangular_solve(
a_tf, b_tf, lower=lower, adjoint=adjoint)
tf_val = tf_ans.eval()
np_ans = np.linalg.solve(a_np, b)
self.assertEqual(np_ans.shape, tf_ans.get_shape())
self.assertEqual(np_ans.shape, tf_val.shape)
self.assertAllClose(np_ans, tf_val)
def testSolve(self):
# 1x1 matrix, single rhs.
matrix = np.array([[0.1]])
rhs0 = np.array([[1.]])
self._verifySolveAllWays(matrix, rhs0)
# 2x2 matrices, single right-hand side.
matrix = np.array([[1., 2.], [3., 4.]])
rhs0 = np.array([[1.], [1.]])
self._verifySolveAllWays(matrix, rhs0)
# 2x2 matrices, 3 right-hand sides.
rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolveAllWays(matrix, rhs1)
def testSolveBatch(self):
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolveAllWays(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
self._verifySolveAllWays(matrix, rhs, batch_dims=[3, 2])
def testNonSquareMatrix(self):
# A non-square matrix should cause an error.
matrix = np.array([[1., 2., 3.], [3., 4., 5.]])
with self.test_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, matrix)
with self.assertRaises(ValueError):
self._verifySolve(matrix, matrix, batch_dims=[2, 3])
def testWrongDimensions(self):
# The matrix should have the same number of rows as the
# right-hand sides.
matrix = np.array([[1., 0.], [0., 1.]])
rhs = np.array([[1., 0.]])
with self.test_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs)
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs, batch_dims=[2, 3])
def testNotInvertible(self):
# The input should be invertible.
# The matrix is singular because it has a zero on the diagonal.
singular_matrix = np.array([[1., 0., -1.], [-1., 0., 1.], [0., -1., 1.]])
with self.test_session():
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix)
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix, batch_dims=[2, 3])
def testEmpty(self):
self._verifySolve(np.empty([0, 2, 2]), np.empty([0, 2, 2]), lower=True)
self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True)
self._verifySolve(np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=False)
self._verifySolve(
np.empty([2, 0, 0]), np.empty([2, 0, 0]), lower=True, batch_dims=[3, 2])
if __name__ == "__main__":
test.main()
| apache-2.0 |
dyn888/youtube-dl | youtube_dl/extractor/europa.py | 48 | 3416 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
qualities,
unified_strdate,
xpath_text
)
class EuropaIE(InfoExtractor):
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': {
'id': 'I107758',
'ext': 'mp4',
'title': 'TRADE - Wikileaks on TTIP',
'description': 'NEW LIVE EC Midday press briefing of 11/08/2015',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20150811',
'duration': 34,
'view_count': int,
'formats': 'mincount:3',
}
}, {
'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786',
'only_matching': True,
}, {
'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
playlist = self._download_xml(
'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id)
def get_item(type_, preference):
items = {}
for item in playlist.findall('./info/%s/item' % type_):
lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
if lang and label:
items[lang] = label.strip()
for p in preference:
if items.get(p):
return items[p]
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
preferred_lang = query.get('sitelang', ('en', ))[0]
preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
title = get_item('title', preferred_langs) or video_id
description = get_item('description', preferred_langs)
thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail')
upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
language_preference = qualities(preferred_langs[::-1])
formats = []
for file_ in playlist.findall('./files/file'):
video_url = xpath_text(file_, './url')
if not video_url:
continue
lang = xpath_text(file_, './lg')
formats.append({
'url': video_url,
'format_id': lang,
'format_note': xpath_text(file_, './lglabel'),
'language_preference': language_preference(lang)
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnmail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats
}
| unlicense |
idncom/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/compile_all.py | 384 | 1193 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import compileall
compileall.compile_dir('package')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
BhallaLab/moose-examples | paper-2015/Fig2_elecModels/Fig2A.py | 2 | 23553 |
#/**********************************************************************
#** This program is part of 'MOOSE', the
#** Messaging Object Oriented Simulation Environment.
#** Copyright (C) 2003-2014 Upinder S. Bhalla. and NCBS
#** It is made available under the terms of the
#** GNU Lesser General Public License version 2.1
#** See the file COPYING.LIB for the full notice.
#**********************************************************************/
'''
This LIF network with Ca plasticity is based on:
David Higgins, Michael Graupner, Nicolas Brunel
Memory Maintenance in Synapses with Calcium-Based
Plasticity in the Presence of Background Activity
PLOS Computational Biology, 2014.
Author: Aditya Gilra, NCBS, Bangalore, October, 2014.
'''
## import modules and functions to be used
import numpy as np
import matplotlib.pyplot as plt
import random
import time
import moose
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
moose.seed(100) # set seed for reproducibility of simulations
#############################################
# All parameters as per:
# David Higgins, Michael Graupner, Nicolas Brunel
# Memory Maintenance in Synapses with Calcium-Based
# Plasticity in the Presence of Background Activity
# PLOS Computational Biology, 2014.
#############################################
#############################################
# Neuron model
#############################################
# equation: dv/dt = (1/taum)*(-(v-el)) + inp
# with spike when v>vt, reset to vr
el = -70e-3 #V # Resting potential
vt = -50e-3 #V # Spiking threshold
Rm = 20e6 #Ohm # Only taum is needed, but LIF neuron accepts
Cm = 1e-9 #F # Rm and Cm and constructs taum=Rm*Cm
taum = Rm*Cm #s # Membrane time constant is 20 ms
vr = -60e-3 #V # Reset potential
Iinject = 11.5e-3/Rm # constant current injection into LIF neuron
# same as setting el=-70+15=-55 mV and inp=0
noiseInj = True # inject noisy current into each cell: boolean
noiseInjSD = 5e-3/Rm #A # SD of noise added to 'current'
# SD*sqrt(taum) is used as noise current SD
#############################################
# Network parameters: numbers
#############################################
red_fact = 10 # reduction factor for N,C,J
N = 10000/red_fact # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
#############################################
# Simulation parameters
#############################################
simtime = 1200.0 #s # Simulation time
dt = 1e-3 #s # time step
plotDt = 1.0 #s # Time step for storing output.
#############################################
# Network parameters: synapses (not for ExcInhNetBase)
#############################################
## With each presynaptic spike in exc / inh neuron,
## J / -g*J is added to post-synaptic Vm -- delta-fn synapse
## Since LIF neuron used below is derived from Compartment class,
## conductance-based synapses (SynChan class) can also be used.
C = 500/red_fact # Number of incoming connections on each neuron (exc or inh)
# 5% conn prob between any two neurons
# Since we reduced N from 10000 to 1000, C = 50 instead of 500
# but we need to increase J by 10 to maintain total input per neuron
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
J = 0.2e-3 #V # exc strength is J (in V as we add to voltage)
# Critical J is ~ 0.45e-3 V in paper for N = 10000, C = 1000
# See what happens for J = 0.2e-3 V versus J = 0.8e-3 V
J *= red_fact # Multiply J by red_fact to compensate C/red_fact.
g = 4.0 # -gJ is the inh strength. For exc-inh balance g >~ f(1-f)=4
syndelay = dt # synaptic delay:
refrT = 0.0 # s # absolute refractory time
#############################################
# Ca Plasticity parameters: synapses (not for ExcInhNetBase)
#############################################
CaPlasticity = True # set it True or False to turn on/off plasticity
tauCa = 22.6936e-3 # s # Ca decay time scale
tauSyn = 346.3615 # s # synaptic plasticity time scale
## in vitro values in Higgins et al 2014, faster plasticity
CaPre = 0.56175 # mM
CaPost = 1.2964 # mM
## in vivo values in Higgins et al 2014, slower plasticity
#CaPre = 0.33705 # mM
#CaPost = 0.74378 # mM
delayD = 4.6098e-3 # s # CaPre is added to Ca after this delay
# proxy for rise-time of NMDA
thetaD = 1.0 # mM # depression threshold for Ca
thetaP = 1.3 # mM # potentiation threshold for Ca
gammaD = 331.909 # factor for depression term
gammaP = 725.085 # factor for potentiation term
eqWeight = 0.16 # initial synaptic weight
# gammaP/(gammaP+gammaD) = eq weight w/o noise
# but see eqn (22), noiseSD also appears
bistable = True # if bistable is True, use bistable potential for weights
noisy = True # use noisy weight updates given by noiseSD
noiseSD = 3.3501 # if noisy, use noiseSD (3.3501 from Higgins et al 2014)
#noiseSD = 0.1 # if bistable==False, use a smaller noise than in Higgins et al 2014
#############################################
# Exc-Inh network base class without connections
#############################################
class ExcInhNetBase:
"""Simulates and plots LIF neurons (exc and inh separate).
Author: Aditya Gilra, NCBS, Bangalore, India, October 2014
"""
def __init__(self,N=N,fexc=fexc,el=el,vt=vt,Rm=Rm,Cm=Cm,vr=vr,\
refrT=refrT,Iinject=Iinject):
""" Constructor of the class """
self.N = N # Total number of neurons
self.fexc = fexc # Fraction of exc neurons
self.NmaxExc = int(fexc*N) # max idx of exc neurons, rest inh
self.el = el # Resting potential
self.vt = vt # Spiking threshold
self.taum = taum # Membrane time constant
self.vr = vr # Reset potential
self.refrT = refrT # Absolute refractory period
self.Rm = Rm # Membrane resistance
self.Cm = Cm # Membrane capacitance
self.Iinject = Iinject # constant input current
self.noiseInjSD = noiseInjSD # SD of injected noise
self.simif = False # whether the simulation is complete
self._setup_network()
def __str__(self):
return "LIF network of %d neurons "\
"having %d exc." % (self.N,self.NmaxExc)
def _setup_network(self):
"""Sets up the network (_init_network is enough)"""
self.network = moose.LIF( 'network', self.N );
moose.le( '/network' )
self.network.vec.Em = self.el
self.network.vec.thresh = self.vt
self.network.vec.refractoryPeriod = self.refrT
self.network.vec.Rm = self.Rm
self.network.vec.vReset = self.vr
self.network.vec.Cm = self.Cm
if not noiseInj:
self.network.vec.inject = self.Iinject
else:
## inject a constant + noisy current
## values are set in self.simulate()
self.noiseTables = moose.StimulusTable('noiseTables',self.N)
moose.connect( self.noiseTables, 'output', \
self.network, 'setInject', 'OneToOne')
def _init_network(self,v0=el):
"""Initialises the network variables before simulation"""
self.network.vec.initVm = v0
def simulate(self,simtime=simtime,dt=dt,plotif=False,**kwargs):
self.dt = dt
self.simtime = simtime
self.T = np.ceil(simtime/dt)
self.trange = np.arange(0,self.simtime,dt)
for i in range(self.N):
if noiseInj:
## Gaussian white noise SD added every dt interval should be
## divided by sqrt(dt), as the later numerical integration
## will multiply it by dt.
## See the Euler-Maruyama method, numerical integration in
## http://www.scholarpedia.org/article/Stochastic_dynamical_systems
self.noiseTables.vec[i].vector = self.Iinject + \
np.random.normal( \
scale=self.noiseInjSD*np.sqrt(self.Rm*self.Cm/self.dt), \
size=int(self.T)
) # scale = SD
self.noiseTables.vec[i].stepSize = 0 # use current time
# as x value for interpolation
self.noiseTables.vec[i].stopTime = self.simtime
self._init_network(**kwargs)
if plotif:
self._init_plots()
# moose simulation
#moose.useClock( 1, '/network', 'process' )
#moose.setClock( 0, dt )
#moose.setClock( 1, dt )
#moose.setClock( 2, dt )
#moose.setClock( 3, dt )
#moose.setClock( 9, dt )
## Do need to set the dt for MOOSE clocks
for i in range(10):
moose.setClock( i, dt )
moose.setClock( 18, plotDt )
t1 = time.time()
print('reinit MOOSE -- takes a while ~20s.')
moose.reinit()
print(('reinit time t = ', time.time() - t1))
t1 = time.time()
print('starting')
simadvance = self.simtime / 50.0
for i in range( 50 ):
moose.start( simadvance )
print(('at t = ', i * simadvance, 'realtime = ', time.time() - t1))
#moose.start(self.simtime)
print(('runtime for ', self.simtime, 'sec, is t = ', time.time() - t1))
if plotif:
self._plot()
def _init_plots(self):
## make a few tables to store a few Vm-s
numVms = 10
self.plots = moose.Table2( '/plotVms', numVms )
## draw numVms out of N neurons
nrnIdxs = random.sample(list(range(self.N)),numVms)
for i in range( numVms ):
moose.connect( self.network.vec[nrnIdxs[i]], 'VmOut', \
self.plots.vec[i], 'input')
## make self.N tables to store spikes of all neurons
self.spikes = moose.Table2( '/plotSpikes', self.N )
moose.connect( self.network, 'spikeOut', \
self.spikes, 'input', 'OneToOne' )
## make 2 tables to store spikes of all exc and all inh neurons
self.spikesExc = moose.Table2( '/plotSpikesAllExc' )
for i in range(self.NmaxExc):
moose.connect( self.network.vec[i], 'spikeOut', \
self.spikesExc, 'input' )
self.spikesInh = moose.Table2( '/plotSpikesAllInh' )
for i in range(self.NmaxExc,self.N):
moose.connect( self.network.vec[i], 'spikeOut', \
self.spikesInh, 'input' )
def _plot(self):
""" plots the spike raster for the simulated net"""
#############################################
# Exc-Inh network class with Ca plasticity based connections
# (inherits from ExcInhNetBase)
#############################################
class ExcInhNet(ExcInhNetBase):
""" Recurrent network simulation """
def __init__(self,J=J,incC=C,fC=fC,scaleI=g,syndelay=syndelay,**kwargs):
"""Overloads base (parent) class"""
self.J = J # exc connection weight
self.incC = incC # number of incoming connections per neuron
self.fC = fC # fraction of exc incoming connections
self.excC = int(fC*incC)# number of exc incoming connections
self.scaleI = scaleI # inh weight is scaleI*J
self.syndelay = syndelay# synaptic delay
# call the parent class constructor
ExcInhNetBase.__init__(self,**kwargs)
def __str__(self):
return "LIF network of %d neurons "\
"of which %d are exc." % (self.N,self.NmaxExc)
def _init_network(self,**args):
ExcInhNetBase._init_network(self,**args)
def _init_plots(self):
ExcInhNetBase._init_plots(self)
self.recN = 50 # number of neurons for which to record weights and Ca
if CaPlasticity:
## make tables to store weights of recN exc synapses
## for each post-synaptic exc neuron
self.weights = moose.Table2( '/plotWeights', self.excC*self.recN )
for i in range(self.recN): # range(self.N) is too large
for j in range(self.excC):
moose.connect( self.weights.vec[self.excC*i+j], 'requestOut',
self.synsEE.vec[i*self.excC+j].synapse[0], 'getWeight')
self.CaTables = moose.Table2( '/plotCa', self.recN )
for i in range(self.recN): # range(self.N) is too large
moose.connect( self.CaTables.vec[i], 'requestOut',
self.synsEE.vec[i*self.excC+j], 'getCa')
def _setup_network(self):
## Set up the neurons without connections
ExcInhNetBase._setup_network(self)
## Now, add in the connections...
## Each pre-synaptic spike cause Vm of post-neuron to rise by
## synaptic weight in one time step i.e. delta-fn synapse.
## Since LIF neuron is derived from Compartment class,
## conductance-based synapses (SynChan class) can also be used.
## E to E synapses can be plastic
## Two ways to do this:
## 1) Each LIF neuron has one incoming postsynaptic SynHandler,
## which collects the activation from all presynaptic neurons,
## but then a common Ca pool is used.
## 2) Each LIF neuron has multiple postsyanptic SynHandlers,
## one for each pre-synaptic neuron, i.e. one per synapse,
## then each synapse has a different Ca pool.
## Here we go with option 2) as per Higgins et al 2014 (Brunel private email)
## separate SynHandler per EE synapse, thus NmaxExc*excC
if CaPlasticity:
self.synsEE = moose.GraupnerBrunel2012CaPlasticitySynHandler( \
'/network/synsEE', self.NmaxExc*self.excC )
else:
self.synsEE = moose.SimpleSynHandler( \
'/network/synsEE', self.NmaxExc*self.excC )
moose.useClock( 0, '/network/synsEE', 'process' )
## I to E synapses are not plastic
self.synsIE = moose.SimpleSynHandler( '/network/synsIE', self.NmaxExc )
## all synapses to I neurons are not plastic
self.synsI = moose.SimpleSynHandler( '/network/synsI', self.N-self.NmaxExc )
## connect all SynHandlers to their respective neurons
for i in range(self.NmaxExc):
moose.connect( self.synsIE.vec[i], 'activationOut', \
self.network.vec[i], 'activation' )
for i in range(self.NmaxExc,self.N):
moose.connect( self.synsI.vec[i-self.NmaxExc], 'activationOut', \
self.network.vec[i], 'activation' )
## Connections from some Exc/Inh neurons to each Exc neuron
for i in range(0,self.NmaxExc):
self.synsIE.vec[i].numSynapses = self.incC-self.excC
## Connections from some Exc neurons to each Exc neuron
## draw excC number of neuron indices out of NmaxExc neurons
preIdxs = random.sample(list(range(self.NmaxExc)),self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synidx = i*self.excC+synnum
synHand = self.synsEE.vec[synidx]
## connect each synhandler to the post-synaptic neuron
moose.connect( synHand, 'activationOut', \
self.network.vec[i], 'activation' )
## important to set numSynapses = 1 for each synHandler,
## doesn't create synapses if you set the full array of SynHandlers
synHand.numSynapses = 1
synij = synHand.synapse[0]
connectExcId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
if CaPlasticity:
## set parameters for the Ca Plasticity SynHandler
## have to be set for each SynHandler
## doesn't set for full array at a time
synHand.CaInit = 0.0
synHand.tauCa = tauCa
synHand.tauSyn = tauSyn
synHand.CaPre = CaPre
synHand.CaPost = CaPost
synHand.delayD = delayD
synHand.thetaD = thetaD
synHand.thetaP = thetaP
synHand.gammaD = gammaD
synHand.gammaP = gammaP
synHand.weightMax = 1.0 # bounds on the weight
synHand.weightMin = 0.0
synHand.weightScale = \
self.J*2.0 # 0.2 mV, weight*weightScale is activation
# typically weight <~ 0.5, so activation <~ J
synHand.noisy = noisy
synHand.noiseSD = noiseSD
synHand.bistable = bistable
moose.connect( self.network.vec[i], \
'spikeOut', synHand, 'addPostSpike')
synij.weight = eqWeight # activation = weight*weightScale
# weightScale = 2*J
# weight <~ 0.5
## Randomly set 5% of them to be 1.0
if np.random.uniform()<0.05:
synij.weight = 1.0
else:
synij.weight = self.J # no weightScale here, activation = weight
## Connections from some Inh neurons to each Exc neuron
## draw inhC=incC-excC number of neuron indices out of inhibitory neurons
preIdxs = random.sample(list(range(self.NmaxExc,self.N)),self.incC-self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsIE.vec[i].synapse[synnum]
connectInhId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = -self.scaleI*self.J # activation = weight
## Connections from some Exc/Inh neurons to each Inh neuron
for i in range(self.N-self.NmaxExc):
## each neuron has incC number of synapses
self.synsI.vec[i].numSynapses = self.incC
## draw excC number of neuron indices out of NmaxExc neurons
preIdxs = random.sample(list(range(self.NmaxExc)),self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsI.vec[i].synapse[synnum]
connectExcId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = self.J # activation = weight
## draw inhC=incC-excC number of neuron indices out of inhibitory neurons
preIdxs = random.sample(list(range(self.NmaxExc,self.N)),self.incC-self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsI.vec[i].synapse[ self.excC + synnum ]
connectInhId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = -self.scaleI*self.J # activation = weight
moose.useClock( 0, '/network/synsIE', 'process' )
moose.useClock( 0, '/network/synsI', 'process' )
#############################################
# Analysis functions
#############################################
def rate_from_spiketrain(spiketimes,fulltime,dt,tau=50e-3):
"""
Returns a rate series of spiketimes convolved with a Gaussian kernel;
all times must be in SI units.
"""
sigma = tau/2.
## normalized Gaussian kernel, integral with dt is normed to 1
## to count as 1 spike smeared over a finite interval
norm_factor = 1./(np.sqrt(2.*np.pi)*sigma)
gauss_kernel = np.array([norm_factor*np.exp(-x**2/(2.*sigma**2))\
for x in np.arange(-5.*sigma,5.*sigma+dt,dt)])
kernel_len = len(gauss_kernel)
## need to accommodate half kernel_len on either side of fulltime
rate_full = np.zeros(int(fulltime/dt)+kernel_len)
for spiketime in spiketimes:
idx = int(spiketime/dt)
rate_full[idx:idx+kernel_len] += gauss_kernel
## only the middle fulltime part of the rate series
## This is already in Hz,
## since should have multiplied by dt for above convolution
## and divided by dt to get a rate, so effectively not doing either.
return rate_full[kernel_len/2:kernel_len/2+int(fulltime/dt)]
#############################################
# Make plots
#############################################
def extra_plots(net):
## extra plots apart from the spike rasters
## individual neuron Vm-s
timeseries = net.trange
## individual neuron firing rates
## population firing rates
## Ca plasticity: weight vs time plots
if CaPlasticity:
## Ca versus time in post-synaptic neurons
for i in range(net.recN): # range(net.N) is too large
net.CaTables.vec[i].xplot( 'ca.xplot', 'Ca_' + str(i) )
for i in range(net.recN): # range(net.N) is too large
for j in range(net.excC):
k = net.excC*i+j
net.weights.vec[k].xplot( 'wt.xplot', 'w_' + str(k) )
## all EE weights are used for a histogram
weights = [ net.synsEE.vec[i*net.excC+j].synapse[0].weight \
for i in range(net.NmaxExc) for j in range(net.excC) ]
histo, edges = np.histogram( weights, bins=100 )
print()
print(histo)
print()
print(edges)
print()
plt.figure()
plt.hist(weights, bins=100)
plt.title("Histogram of efficacies")
plt.xlabel("Efficacy (arb)")
plt.ylabel("# per bin")
if __name__=='__main__':
## ExcInhNetBase has unconnected neurons,
## ExcInhNet connects them
## Instantiate either ExcInhNetBase or ExcInhNet below
#net = ExcInhNetBase(N=N)
net = ExcInhNet(N=N)
print(net)
## Important to distribute the initial Vm-s
## else weak coupling gives periodic synchronous firing
net.simulate(simtime,plotif=True, v0=np.random.uniform(el-20e-3,vt,size=N))
plt.figure()
extra_plots(net)
plt.show()
| gpl-2.0 |
a-veitch/grpc | src/python/grpcio/grpc/framework/foundation/callable_util.py | 21 | 3960 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for working with callables."""
import abc
import collections
import enum
import functools
import logging
import six
class Outcome(six.with_metaclass(abc.ABCMeta)):
"""A sum type describing the outcome of some call.
Attributes:
kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the
call returned a value or raised an exception.
return_value: The value returned by the call. Must be present if kind is
Kind.RETURNED.
exception: The exception raised by the call. Must be present if kind is
Kind.RAISED.
"""
@enum.unique
class Kind(enum.Enum):
"""Identifies the general kind of the outcome of some call."""
RETURNED = object()
RAISED = object()
class _EasyOutcome(
collections.namedtuple(
'_EasyOutcome', ['kind', 'return_value', 'exception']),
Outcome):
"""A trivial implementation of Outcome."""
def _call_logging_exceptions(behavior, message, *args, **kwargs):
try:
return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs), None)
except Exception as e: # pylint: disable=broad-except
logging.exception(message)
return _EasyOutcome(Outcome.Kind.RAISED, None, e)
def with_exceptions_logged(behavior, message):
"""Wraps a callable in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
message: A string to log if the behavior raises an exception.
Returns:
A callable that when executed invokes the given behavior. The returned
callable takes the same arguments as the given behavior but returns a
future.Outcome describing whether the given behavior returned a value or
raised an exception.
"""
@functools.wraps(behavior)
def wrapped_behavior(*args, **kwargs):
return _call_logging_exceptions(behavior, message, *args, **kwargs)
return wrapped_behavior
def call_logging_exceptions(behavior, message, *args, **kwargs):
"""Calls a behavior in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
message: A string to log if the behavior raises an exception.
*args: Positional arguments to pass to the given behavior.
**kwargs: Keyword arguments to pass to the given behavior.
Returns:
An Outcome describing whether the given behavior returned a value or raised
an exception.
"""
return _call_logging_exceptions(behavior, message, *args, **kwargs)
| bsd-3-clause |
VRToxin-AOSP/android_external_skia | platform_tools/android/gyp_gen/tool_makefile_writer.py | 28 | 3741 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code for generating Android.mk for a tool."""
import android_framework_gyp
import gypd_parser
import makefile_writer
import os
import vars_dict_lib
SKIA_RESOURCES = (
"""
# Setup directory to store skia's resources in the directory structure that
# the Android testing infrastructure expects
skia_res_dir := $(call intermediates-dir-for,PACKAGING,skia_resources)/DATA
$(shell mkdir -p $(skia_res_dir))
$(shell cp -r $(LOCAL_PATH)/../resources/. $(skia_res_dir)/skia_resources)
LOCAL_PICKUP_FILES := $(skia_res_dir)
skia_res_dir :=
"""
)
def write_tool_android_mk(target_dir, var_dict):
"""Write Android.mk for a Skia tool.
Args:
target_dir: Destination for the makefile. Must not be None.
var_dict: VarsDict containing variables for the makefile.
"""
target_file = os.path.join(target_dir, 'Android.mk')
with open(target_file, 'w') as f:
f.write(makefile_writer.AUTOGEN_WARNING)
makefile_writer.write_local_path(f)
makefile_writer.write_clear_vars(f)
makefile_writer.write_local_vars(f, var_dict, False, None)
f.write(SKIA_RESOURCES)
f.write('include $(BUILD_NATIVE_TEST)\n')
def generate_tool(gyp_dir, target_file, skia_trunk, dest_dir,
skia_lib_var_dict, local_module_name, local_module_tags,
desired_targets, gyp_source_dir=None):
"""Common steps for building one of the skia tools.
Parse a gyp file and create an Android.mk for this tool.
Args:
gyp_dir: Directory containing gyp files.
target_file: gyp file for the project to be built, contained in gyp_dir.
skia_trunk: Trunk of Skia, used for determining the destination to write
'Android.mk'.
dest_dir: Destination for 'Android.mk', relative to skia_trunk. Used for
both writing relative paths in the makefile and for determining the
destination to write the it.
skia_lib_var_dict: VarsDict representing libskia. Used as a reference to
ensure we do not duplicate anything in this Android.mk.
local_module_name: Name for this tool, to set as LOCAL_MODULE.
local_module_tags: Tags to pass to LOCAL_MODULE_TAG.
desired_targets: List of targets to parse.
gyp_source_dir: Source directory for gyp.
"""
result_file = android_framework_gyp.main(target_dir=gyp_dir,
target_file=target_file,
skia_arch_type='other',
have_neon=False,
gyp_source_dir=gyp_source_dir)
var_dict = vars_dict_lib.VarsDict()
# Add known targets from skia_lib, so we do not reparse them.
var_dict.KNOWN_TARGETS.set(skia_lib_var_dict.KNOWN_TARGETS)
gypd_parser.parse_gypd(var_dict, result_file, dest_dir, desired_targets)
android_framework_gyp.clean_gypd_files(gyp_dir)
var_dict.LOCAL_MODULE.add(local_module_name)
for tag in local_module_tags:
var_dict.LOCAL_MODULE_TAGS.add(tag)
# No need for defines that are already in skia_lib.
for define in skia_lib_var_dict.DEFINES:
try:
var_dict.DEFINES.remove(define)
except ValueError:
# Okay if the define was not part of the parse for our tool.
pass
if skia_trunk:
full_dest = os.path.join(skia_trunk, dest_dir)
else:
full_dest = dest_dir
# If the path does not exist, create it. This will happen during testing,
# where there is no subdirectory for each tool (just a temporary folder).
if not os.path.exists(full_dest):
os.mkdir(full_dest)
write_tool_android_mk(target_dir=full_dest, var_dict=var_dict)
| bsd-3-clause |
82d28a/laikaboss | laikaboss/modules/explode_ole.py | 3 | 3065 | # Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import olefile
from laikaboss.objectmodel import ModuleObject, ExternalVars, QuitScanException, \
GlobalScanTimeoutError, GlobalModuleTimeoutError
from laikaboss.util import log_module
from laikaboss.si_module import SI_MODULE
class EXPLODE_OLE(SI_MODULE):
def __init__(self,):
self.module_name = "EXPLODE_OLE"
self.global_search = "GLOBAL_SEARCH"
def _run(self, scanObject, result, depth, args):
moduleResult = []
minFileSize = 0 #Explode everything!
useUnvalidatedFilenames = 0
if 'minFileSize' in args:
try:
minFileSize = int(args['minFileSize'])
except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError):
raise
except:
pass
if 'useUnvalidatedFilenames' in args:
try:
minFileSize = int(args['useUnvalidatedFilenames'])
except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError):
raise
except:
pass
ole = olefile.OleFileIO(scanObject.buffer)
lstStreams = ole.listdir()
numStreams = 0
for stream in lstStreams:
try:
if ole.get_size(stream) >= minFileSize:
numStreams += 1
streamF = ole.openstream(stream)
childBuffer = streamF.read()
if childBuffer:
filename = "e_ole_stream_"+str(numStreams)
try:
u = unicode( str(stream), "utf-8" )
filename = u.encode( "utf-8" )
except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError):
raise
except:
pass #keep ole_stream_number as filename
moduleResult.append(ModuleObject(buffer=childBuffer,
externalVars=ExternalVars(filename=filename)))
except (QuitScanException, GlobalScanTimeoutError, GlobalModuleTimeoutError):
raise
except:
log_module("MSG", self.module_name, 0, scanObject, result, "ERROR EXTRACTING STREAM: "+str(stream))
ole.close()
return moduleResult
| apache-2.0 |
ms-iot/python | cpython/Tools/scripts/findnocoding.py | 48 | 2951 | #!/usr/bin/env python3
"""List all those Python files that require a coding directive
Usage: findnocoding.py dir1 [dir2...]
"""
__author__ = "Oleg Broytmann, Georg Brandl"
import sys, os, re, getopt
# our pysource module finds Python source files
try:
import pysource
except ImportError:
# emulate the module with a simple os.walk
class pysource:
has_python_ext = looks_like_python = can_be_compiled = None
def walk_python_files(self, paths, *args, **kwargs):
for path in paths:
if os.path.isfile(path):
yield path.endswith(".py")
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".py"):
yield os.path.join(root, filename)
pysource = pysource()
print("The pysource module is not available; "
"no sophisticated Python source file search will be done.", file=sys.stderr)
decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
blank_re = re.compile(rb'^[ \t\f]*(?:[#\r\n]|$)')
def get_declaration(line):
match = decl_re.match(line)
if match:
return match.group(1)
return b''
def has_correct_encoding(text, codec):
try:
str(text, codec)
except UnicodeDecodeError:
return False
else:
return True
def needs_declaration(fullpath):
try:
infile = open(fullpath, 'rb')
except IOError: # Oops, the file was removed - ignore it
return None
with infile:
line1 = infile.readline()
line2 = infile.readline()
if (get_declaration(line1) or
blank_re.match(line1) and get_declaration(line2)):
# the file does have an encoding declaration, so trust it
return False
# check the whole file for non utf-8 characters
rest = infile.read()
if has_correct_encoding(line1+line2+rest, "utf-8"):
return False
return True
usage = """Usage: %s [-cd] paths...
-c: recognize Python source files trying to compile them
-d: debug output""" % sys.argv[0]
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'cd')
except getopt.error as msg:
print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
is_python = pysource.looks_like_python
debug = False
for o, a in opts:
if o == '-c':
is_python = pysource.can_be_compiled
elif o == '-d':
debug = True
if not args:
print(usage, file=sys.stderr)
sys.exit(1)
for fullpath in pysource.walk_python_files(args, is_python):
if debug:
print("Testing for coding: %s" % fullpath)
result = needs_declaration(fullpath)
if result:
print(fullpath)
| bsd-3-clause |
tuxx42/linux-3.1PNX8550 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
pylixm/sae-django-demo | django1.7-sae/site-packages/django/core/management/commands/createcachetable.py | 55 | 3782 | from optparse import make_option
from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.db.utils import DatabaseError
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database onto '
'which the cache tables will be installed. '
'Defaults to the "default" database.'),
)
requires_system_checks = False
def handle(self, *tablenames, **options):
db = options.get('database')
self.verbosity = int(options.get('verbosity'))
if len(tablenames):
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table)
def create_table(self, database, tablename):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type(connection=connection)]
field_output.append("%sNULL" % ("NOT " if not f.null else ""))
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append("CREATE %sINDEX %s ON %s (%s);" %
(unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
qn(f.name)))
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(');')
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as curs:
try:
curs.execute("\n".join(full_statement))
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s." %
(tablename, force_text(e)))
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
| apache-2.0 |
EliasTouil/simpleBlog | simpleBlog/Lib/encodings/base64_codec.py | 58 | 2371 | """ Python 'base64_codec' Codec - base64 content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs, base64
### Codec APIs
def base64_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.encodestring(input)
return (output, len(input))
def base64_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.decodestring(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return base64_encode(input,errors)
def decode(self, input,errors='strict'):
return base64_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return base64.encodestring(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return base64.decodestring(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='base64',
encode=base64_encode,
decode=base64_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
_is_text_encoding=False,
)
| gpl-3.0 |
dapuck/pyleus | pyleus/cli/topologies.py | 9 | 1835 | """Logic for topologies management.
"""
from __future__ import absolute_import
import zipfile
from pyleus.cli.storm_cluster import LocalStormCluster
from pyleus.cli.storm_cluster import StormCluster
def add_nimbus_arguments(parser):
"""Add Nimbus host/port arguments to the command parser."""
parser.add_argument(
"-n", "--nimbus-host", dest="nimbus_host", metavar="NIMBUS_HOST",
help="The hostname or IP address of the Storm cluster's Nimbus node")
parser.add_argument(
"-p", "--nimbus-port", dest="nimbus_port", metavar="NIMBUS_PORT",
help="The Thrift port used by the Storm cluster's Nimbus node")
def run_topology_locally(jar_path, configs):
"""Run the pyleus topology jar on the local machine."""
LocalStormCluster().run(
configs.storm_cmd_path,
jar_path,
configs.debug,
configs.jvm_opts)
def submit_topology(jar_path, configs):
"""Submit the topology jar to the Storm cluster specified in configs."""
StormCluster(
configs.storm_cmd_path,
configs.nimbus_host,
configs.nimbus_port,
configs.verbose,
configs.jvm_opts).submit(jar_path)
def list_topologies(configs):
"""List the topologies running on the Storm cluster specified in configs."""
StormCluster(
configs.storm_cmd_path,
configs.nimbus_host,
configs.nimbus_port,
configs.verbose,
configs.jvm_opts).list()
def kill_topology(configs):
"""Kill a topology running on the Storm cluster specified in configs."""
StormCluster(
configs.storm_cmd_path,
configs.nimbus_host,
configs.nimbus_port,
configs.verbose,
configs.jvm_opts).kill(configs.topology_name, configs.wait_time)
def is_jar(jar_path):
return zipfile.is_zipfile(jar_path)
| apache-2.0 |
Biktorgj/Gear_1_Kernel | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
proby/ansible-modules-core | cloud/amazon/ec2.py | 10 | 44332 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2
description:
- Creates or terminates ec2 instances.
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
required: false
default: null
aliases: ['keypair']
group:
description:
- security group (or list of groups) to use with the instance
required: false
default: null
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
required: false
default: null
aliases: []
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
required: false
default: null
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance
required: true
default: null
aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
aliases: []
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
required: false
default: null
aliases: []
image:
description:
- I(ami) ID to use for the instance
required: true
default: null
aliases: []
kernel:
description:
- kernel I(eki) to use for the instance
required: false
default: null
aliases: []
ramdisk:
description:
- ramdisk I(eri) to use for the instance
required: false
default: null
aliases: []
wait:
description:
- wait for the instance to be 'running' before returning. Does not wait for SSH, see 'wait_for' example for details.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
spot_wait_timeout:
version_added: "1.5"
description:
- how long to wait for the spot instance request to be fulfilled
default: 600
aliases: []
count:
description:
- number of instances to launch
required: False
default: 1
aliases: []
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
required: false
default: null
aliases: []
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
aliases: []
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
required: false
default: null
aliases: []
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
required: false
default: null
aliases: []
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
default: null
aliases: []
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
required: false
default: null
aliases: []
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile to use. Boto library must be 2.5.0+
required: false
default: null
aliases: []
instance_ids:
version_added: "1.3"
description:
- "list of instance ids, currently used for states: absent, running, stopped"
required: false
default: null
aliases: ['instance_id']
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false
default: true
state:
version_added: "1.3"
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
choices: ['present', 'absent', 'running', 'stopped']
volumes:
version_added: "1.5"
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
default: null
aliases: []
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
required: false
default: false
exact_count:
version_added: "1.5"
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
required: false
default: null
aliases: []
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver".
required: false
default: null
aliases: []
author: Seth Vidal, Tim Gerla, Lester Wade
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ec2:
key_name: mykey
instance_type: t2.micro
image: ami-123456
wait: yes
group: webserver
count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch
- ec2:
key_name: mykey
group: databases
instance_type: t2.micro
image: ami-123456
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
device_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple groups example
- ec2:
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
key_name: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
- name: Add new instance to host group
add_host: hostname={{ item.public_ip }} groupname=launched
with_items: ec2.instances
- name: Wait for SSH to come up
wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
with_items: ec2.instances
- name: Configure instance(s)
hosts: launched
sudo: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
ec2:
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
role:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# count_tag complex argument examples
#
# instances with tag foo
count_tag:
foo:
# instances with tag foo=bar
count_tag:
foo: bar
# instances with tags foo=bar & baz
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
count_tag:
- foo
- bar
- baz: bang
'''
import time
from ast import literal_eval
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
from boto.vpc import VPCConnection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None):
# get reservations for instances that match tag(s) and are running
reservations = get_reservations(module, ec2, tags=count_tag, state="running", zone=zone)
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result.iterkeys():
if type(result[k]) == dict:
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, tags=None, state=None, zone=None):
# TODO: filters do not work with tags that have underscores
filters = dict()
if tags is not None:
if type(tags) is str:
try:
tags = literal_eval(tags)
except:
pass
# if string, we only care that a tag of that name exists
if type(tags) is str:
filters.update({"tag-key": tags})
# if list, append each item to filters
if type(tags) is list:
for x in tags:
if type(x) is dict:
x = _set_none_to_blank(x)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems()))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if type(tags) is dict:
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems()))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
if zone:
filters.update({'availability-zone': zone})
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
"""
Retrieves instance information from an instance
ID and returns it as a dictionary
"""
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor,
'tags': inst.tags,
'groups': dict((group.id, group.name) for group in inst.groups),
}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
try:
instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
except AttributeError:
instance_info['ebs_optimized'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
"""
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
class. Added in Boto 2.13.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accepts associate_public_ip_address argument, else false
"""
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in run_instances_method.func_code.co_varnames
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
module.fail_json(msg = 'io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
ec2: authenticated ec2 connection object
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
return param in method.func_code.co_varnames
def enforce_count(module, ec2, vpc):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
# fail here if the exact count was specified without filtering
# on a tag, as this may lead to a undesired removal of instances
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone)
changed = None
checkmode = False
instance_dict_array = []
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, vpc, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([ x.id for x in instances ])
remove_ids = all_instance_ids[0:to_remove]
instances = [ x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
if type(inst) is not dict:
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, vpc, override_count=None):
"""
Creates new instances
module : AnsibleModule object
ec2: authenticated ec2 connection object
Returns:
A list of dictionaries with instance information
about the instances that were launched
"""
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
# group_id and group_name are exclusive of each other
if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
vpc_id = None
if vpc_subnet_id:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
else:
vpc_id = None
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
if vpc_id:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
else:
grp_details = ec2.get_all_security_groups()
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
# Now we try to lookup the group id testing if group exists.
elif group_id:
#wrap the group_id in a list if it's not one already
if isinstance(group_id, basestring):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id != None:
filter_dict = {'client-token':id, 'instance-state-name' : 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'monitoring_enabled': monitoring,
'placement': zone,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'user_data': user_data}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest
if not spot_price:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg = 'Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
# check to see if we're using spot pricing first before starting instances
if not spot_price:
if assign_public_ip and private_ip:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
))
else:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
private_ip_address = private_ip,
))
res = ec2.run_instances(**params)
instids = [ i.id for i in res.instances ]
while True:
try:
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg = str(e))
# The instances returned through ec2.run_instances above can be in
# terminated state due to idempotency. See commit 7f11c3d for a complete
# explanation.
terminated_instances = [
str(instance.id) for instance in res.instances if instance.state == 'terminated'
]
if terminated_instances:
module.fail_json(msg = "Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, placement_group):
params['placement_group'] = placement_group
elif placement_group :
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
params.update(dict(
count = count_remaining,
))
res = ec2.request_spot_instances(spot_price, **params)
# Now we have to do the intermediate waiting
if wait:
spot_req_inst_ids = dict()
spot_wait_timeout = time.time() + spot_wait_timeout
while spot_wait_timeout > time.time():
reqs = ec2.get_all_spot_instance_requests()
for sirb in res:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id == sirb.id and sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
break
if spot_wait_timeout <= time.time():
module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
instids = spot_req_inst_ids.values()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
num_running = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_running < len(instids):
try:
res_list = ec2.get_all_instances(instids)
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidInstanceID.NotFound':
time.sleep(1)
continue
else:
raise
num_running = 0
for res in res_list:
num_running += len([ i for i in res.instances if i.state=='running' ])
if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
#We do this after the loop ends so that we end up with one list
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by Amazon
if not source_dest_check:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
"""
Terminates a list of instances
module: Ansible module object
ec2: authenticated ec2 connection object
termination_list: a list of instances to terminate in the form of
[ {id: <inst-id>}, ..]
Returns a dictionary of instance information
about the instances terminated.
If the instance to be terminated is running
"changed" will be set to False.
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError, e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
# wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = len(response.pop().instances)
except Exception, e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state):
"""
Starts or stops a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
state: Intended state ("running" or "stopped")
Returns a dictionary of instance information
about the instances started/stopped.
If the instance was not able to change state,
"changed" will be set to False.
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# Check that our instances are not in the state we want to take them to
# and change them to our desired state
running_instances_array = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError, e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
instance_dict_array = []
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
instance_dict_array.append(get_instance_info(i))
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
key_name = dict(aliases = ['keypair']),
id = dict(),
group = dict(type='list'),
group_id = dict(type='list'),
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
spot_price = dict(),
image = dict(),
kernel = dict(),
count = dict(type='int', default='1'),
monitoring = dict(type='bool', default=False),
ramdisk = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
spot_wait_timeout = dict(default=600),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(type='dict'),
vpc_subnet_id = dict(),
assign_public_ip = dict(type='bool', default=False),
private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list', aliases=['instance_id']),
source_dest_check = dict(type='bool', default=True),
state = dict(default='present'),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
ebs_optimized = dict(type='bool', default=False),
tenancy = dict(default='default'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids']
],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
ec2 = ec2_connect(module)
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if region:
try:
vpc = boto.vpc.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
tagged_instances = []
state = module.params.get('state')
if state == 'absent':
instance_ids = module.params.get('instance_ids')
if not isinstance(instance_ids, list):
module.fail_json(msg='termination_list needs to be a list of instances to terminate')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
if not isinstance(instance_ids, list):
module.fail_json(msg='running list needs to be a list of instances to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
ApuliaSoftware/l10n-italy | __unported__/account_vat_period_end_statement/__init__.py | 5 | 1206 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2013 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
import report
import wizard
| agpl-3.0 |
mwangaben/spendkit | node_modules/node-gyp/gyp/pylib/gyp/input.py | 713 | 115880 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit |
pculture/unisubs | apps/videos/decorators.py | 6 | 3137 | from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.db import models
from django.http import Http404
from django.utils.functional import wraps
from videos.models import Video
from subtitles.models import SubtitleVersion
def get_object_or_403(request, qs, **params):
"""Get an object or throw a PermissionDenied error
This function works similarly to Django's get_object_or_404. However, if
the object is not found it will:
* Return a 403 error for non-staff users
* Return a 404 error for other users
This prevents telling non-staff users if a video id exists or not, which
is somewhat of a security concern (see pculture/amara-enteprise#97)
"""
if isinstance(qs, type) and issubclass(qs, models.Model):
qs = qs.objects.all()
try:
return qs.get(**params)
except ObjectDoesNotExist:
if request.user.is_staff:
raise Http404()
else:
raise PermissionDenied()
def get_video_from_code(func):
"""
Wraps a view with a signature such as view(request, video_id, ...)
to -> view(request, video, ...), where video is a Video instance
and authorization credentials for viewing have been checked
for the user on that request.
"""
def wrapper(request, video_id, *args, **kwargs):
qs = Video.objects.select_related('teamvideo')
video = get_object_or_403(request, qs, video_id=video_id)
if not video.can_user_see(request.user):
raise PermissionDenied()
return func(request, video, *args, **kwargs)
return wraps(func)(wrapper)
def get_cached_video_from_code(cache_pattern):
"""
Like get_video_from_code(), but uses Video.cache.get_instance() to get a
cached version of the video.
"""
def decorator(func):
def wrapper(request, video_id, *args, **kwargs):
try:
video = Video.cache.get_instance_by_video_id(video_id,
cache_pattern)
except Video.DoesNotExist:
raise Http404
request.use_cached_user()
if not video.can_user_see(request.user):
raise PermissionDenied()
return func(request, video, *args, **kwargs)
return wraps(func)(wrapper)
return decorator
def get_video_revision(func):
"""
Wraps a view with a signature such as view(request, pk, ...)
to -> view(request, version, ...), where version is a SubtitleVersion instance
and authorization credentials for viewing have been checked
for the user on that request.
"""
def wrapper(request, video_id=None, pk=None, *args, **kwargs):
version = get_object_or_403(request, SubtitleVersion.objects.extant(),
pk=pk)
id = video_id if video_id else version.video.video_id
video = get_object_or_403(request, Video, video_id=id)
if not video.can_user_see(request.user):
raise Http404
return func(request, version, *args, **kwargs)
return wraps(func)(wrapper)
| agpl-3.0 |
Jarob22/selenium | py/test/selenium/webdriver/safari/conftest.py | 41 | 1087 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver import Safari
@pytest.fixture
def driver_class():
return Safari
@pytest.fixture
def driver_kwargs():
return {}
@pytest.fixture
def driver(driver_class, driver_kwargs):
driver = driver_class(**driver_kwargs)
yield driver
driver.quit()
| apache-2.0 |
AlbertoPeon/invenio | modules/webmessage/lib/webmessage.py | 20 | 19732 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" WebMessage module, messaging system"""
__revision__ = "$Id$"
import invenio.webmessage_dblayer as db
from invenio.webmessage_config import CFG_WEBMESSAGE_STATUS_CODE, \
CFG_WEBMESSAGE_RESULTS_FIELD, \
CFG_WEBMESSAGE_SEPARATOR, \
CFG_WEBMESSAGE_ROLES_WITHOUT_QUOTA, \
InvenioWebMessageError
from invenio.config import CFG_SITE_LANG, \
CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE
from invenio.messages import gettext_set_language
from invenio.dateutils import datetext_default, get_datetext
from invenio.htmlutils import escape_html
from invenio.webuser import list_users_in_roles
try:
import invenio.template
webmessage_templates = invenio.template.load('webmessage')
except:
pass
from invenio.errorlib import register_exception
def perform_request_display_msg(uid, msgid, ln=CFG_SITE_LANG):
"""
Displays a specific message
@param uid: user id
@param msgid: message id
@return: body
"""
_ = gettext_set_language(ln)
body = ""
if (db.check_user_owns_message(uid, msgid) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message in not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
(msg_id,
msg_from_id, msg_from_nickname,
msg_sent_to, msg_sent_to_group,
msg_subject, msg_body,
msg_sent_date, msg_received_date,
msg_status) = db.get_message(uid, msgid)
if (msg_id == ""):
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
try:
raise InvenioWebMessageError(_('This message does not exist.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
if (msg_status == CFG_WEBMESSAGE_STATUS_CODE['NEW']):
db.set_message_status(uid, msgid,
CFG_WEBMESSAGE_STATUS_CODE['READ'])
body = webmessage_templates.tmpl_display_msg(
msg_id,
msg_from_id,
msg_from_nickname,
msg_sent_to,
msg_sent_to_group,
msg_subject,
msg_body,
msg_sent_date,
msg_received_date,
ln)
return body
def perform_request_display(uid, warnings=[], infos=[], ln=CFG_SITE_LANG):
"""
Displays the user's Inbox
@param uid: user id
@return: body with warnings
"""
body = ""
rows = []
rows = db.get_all_messages_for_user(uid)
nb_messages = db.count_nb_messages(uid)
no_quota_users = list_users_in_roles(CFG_WEBMESSAGE_ROLES_WITHOUT_QUOTA)
no_quota = False
if uid in no_quota_users:
no_quota = True
body = webmessage_templates.tmpl_display_inbox(messages=rows,
infos=infos,
warnings=warnings,
nb_messages=nb_messages,
no_quota=no_quota,
ln=ln)
return body
def perform_request_delete_msg(uid, msgid, ln=CFG_SITE_LANG):
"""
Delete a given message from user inbox
@param uid: user id (int)
@param msgid: message id (int)
@param ln: language
@return: body with warnings
"""
_ = gettext_set_language(ln)
warnings = []
infos = []
body = ""
if (db.check_user_owns_message(uid, msgid) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message in not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
if (db.delete_message_from_user_inbox(uid, msgid) == 0):
warnings.append(_("The message could not be deleted."))
else:
infos.append(_("The message was successfully deleted."))
return perform_request_display(uid, warnings, infos, ln)
def perform_request_delete_all(uid, confirmed=False, ln=CFG_SITE_LANG):
"""
Delete every message for a given user
@param uid: user id (int)
@param confirmed: 0 will produce a confirmation message
@param ln: language
@return: body with warnings
"""
infos = []
warnings = []
_ = gettext_set_language(ln)
if confirmed:
db.delete_all_messages(uid)
infos = [_("Your mailbox has been emptied.")]
return perform_request_display(uid, warnings, infos, ln)
else:
body = webmessage_templates.tmpl_confirm_delete(ln)
return body
def perform_request_write(uid,
msg_reply_id="",
msg_to="",
msg_to_group="",
msg_subject="",
msg_body="",
ln=CFG_SITE_LANG):
"""
Display a write a message page.
@param uid: user id.
@type uid: int
@param msg_reply_id: if this message is a reply to another, other's ID.
@type msg_reply_id: int
@param msg_to: comma separated usernames.
@type msg_to: string
@param msg_to_group: comma separated groupnames.
@type msg_to_group: string
@param msg_subject: message subject.
@type msg_subject: string
@param msg_body: message body.
@type msg_body: string
@param ln: language.
@type ln: string
@return: body with warnings.
"""
warnings = []
body = ""
_ = gettext_set_language(ln)
msg_from_nickname = ""
msg_id = 0
if (msg_reply_id):
if (db.check_user_owns_message(uid, msg_reply_id) == 0):
# The user doesn't own this message
try:
raise InvenioWebMessageError(_('Sorry, this message in not in your mailbox.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
# dummy == variable name to make pylint and pychecker happy!
(msg_id,
msg_from_id, msg_from_nickname,
dummy, dummy,
msg_subject, msg_body,
dummy, dummy, dummy) = db.get_message(uid, msg_reply_id)
if (msg_id == ""):
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
try:
raise InvenioWebMessageError(_('This message does not exist.'))
except InvenioWebMessageError, exc:
register_exception()
body = webmessage_templates.tmpl_error(exc.message, ln)
return body
else:
msg_to = msg_from_nickname or str(msg_from_id)
body = webmessage_templates.tmpl_write(msg_to=msg_to,
msg_to_group=msg_to_group,
msg_id=msg_id,
msg_subject=msg_subject,
msg_body=msg_body,
warnings=[],
ln=ln)
return body
def perform_request_write_with_search(
uid,
msg_to_user="",
msg_to_group="",
msg_subject="",
msg_body="",
msg_send_year=0,
msg_send_month=0,
msg_send_day=0,
names_selected=[],
search_pattern="",
results_field=CFG_WEBMESSAGE_RESULTS_FIELD['NONE'],
add_values=0,
ln=CFG_SITE_LANG):
"""
Display a write message page, with prefilled values
@param msg_to_user: comma separated usernames (str)
@param msg_to_group: comma separated groupnames (str)
@param msg_subject: message subject (str)
@param msg_bidy: message body (string)
@param msg_send_year: year to send this message on (int)
@param_msg_send_month: month to send this message on (int)
@param_msg_send_day: day to send this message on (int)
@param users_to_add: list of usernames ['str'] to add to msg_to_user
@param groups_to_add: list of groupnames ['str'] to add to msg_to_group
@param user_search_pattern: will search users with this pattern (str)
@param group_search_pattern: will search groups with this pattern (str)
@param mode_user: if 1 display user search box, else group search box
@param add_values: if 1 users_to_add will be added to msg_to_user field..
@param ln: language
@return: body with warnings
"""
warnings = []
search_results_list = []
def cat_names(name1, name2):
""" name1, name2 => 'name1, name2' """
return name1 + CFG_WEBMESSAGE_SEPARATOR + " " + name2
if results_field == CFG_WEBMESSAGE_RESULTS_FIELD['USER']:
if add_values and len(names_selected):
usernames_to_add = reduce(cat_names, names_selected)
if msg_to_user:
msg_to_user = cat_names(msg_to_user, usernames_to_add)
else:
msg_to_user = usernames_to_add
users_found = db.get_nicknames_like(search_pattern)
if users_found:
for user_name in users_found:
search_results_list.append((user_name[0],
user_name[0] in names_selected))
elif results_field == CFG_WEBMESSAGE_RESULTS_FIELD['GROUP']:
if add_values and len(names_selected):
groupnames_to_add = reduce(cat_names, names_selected)
if msg_to_group:
msg_to_group = cat_names(msg_to_group, groupnames_to_add)
else:
msg_to_group = groupnames_to_add
groups_dict = db.get_groupnames_like(uid, search_pattern)
groups_found = groups_dict.values()
if groups_found:
for group_name in groups_found:
search_results_list.append((group_name,
group_name in names_selected))
body = webmessage_templates.tmpl_write(
msg_to=msg_to_user,
msg_to_group=msg_to_group,
msg_subject=msg_subject,
msg_body=msg_body,
msg_send_year=msg_send_year,
msg_send_month=msg_send_month,
msg_send_day=msg_send_day,
warnings=warnings,
search_results_list=search_results_list,
search_pattern=search_pattern,
results_field=results_field,
ln=ln)
return body
def perform_request_send(uid,
msg_to_user="",
msg_to_group="",
msg_subject="",
msg_body="",
msg_send_year=0,
msg_send_month=0,
msg_send_day=0,
ln=CFG_SITE_LANG,
use_email_address = 0):
"""
send a message. if unable return warnings to write page
@param uid: id of user from (int)
@param msg_to_user: comma separated usernames (recipients) (str)
@param msg_to_group: comma separated groupnames (recipeints) (str)
@param msg_subject: subject of message (str)
@param msg_body: body of message (str)
@param msg_send_year: send this message on year x (int)
@param msg_send_month: send this message on month y (int)
@param msg_send_day: send this message on day z (int)
@param ln: language
@return: (body with warnings, title, navtrail)
"""
_ = gettext_set_language(ln)
def strip_spaces(text):
"""suppress spaces before and after x (str)"""
return text.strip()
# wash user input
users_to = map(strip_spaces, msg_to_user.split(CFG_WEBMESSAGE_SEPARATOR))
groups_to = map(strip_spaces, msg_to_group.split(CFG_WEBMESSAGE_SEPARATOR))
if users_to == ['']:
users_to = []
if groups_to == ['']:
groups_to = []
warnings = []
infos = []
problem = None
users_to_str = CFG_WEBMESSAGE_SEPARATOR.join(users_to)
groups_to_str = CFG_WEBMESSAGE_SEPARATOR.join(groups_to)
send_on_date = get_datetext(msg_send_year, msg_send_month, msg_send_day)
if (msg_send_year == msg_send_month == msg_send_day == 0):
status = CFG_WEBMESSAGE_STATUS_CODE['NEW']
else:
status = CFG_WEBMESSAGE_STATUS_CODE['REMINDER']
if send_on_date == datetext_default:
warning = \
_("The chosen date (%(x_year)i/%(x_month)i/%(x_day)i) is invalid.")
warning = warning % {'x_year': msg_send_year,
'x_month': msg_send_month,
'x_day': msg_send_day}
warnings.append(warning)
problem = True
if not(users_to_str or groups_to_str):
# <=> not(users_to_str) AND not(groups_to_str)
warnings.append(_("Please enter a user name or a group name."))
problem = True
if len(msg_body) > CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE:
warnings.append(_("Your message is too long, please edit it. Maximum size allowed is %i characters.") % \
(CFG_WEBMESSAGE_MAX_SIZE_OF_MESSAGE,))
problem = True
if use_email_address == 0:
users_dict = db.get_uids_from_nicks(users_to)
users_to = users_dict.items() # users_to=[(nick, uid),(nick2, uid2)]
elif use_email_address == 1:
users_dict = db.get_uids_from_emails(users_to)
users_to = users_dict.items() # users_to=[(email, uid),(email2, uid2)]
groups_dict = db.get_gids_from_groupnames(groups_to)
groups_to = groups_dict.items()
gids_to = []
for (group_name, group_id) in groups_to:
if not(group_id):
warnings.append(_("Group %s does not exist.") % \
(escape_html(group_name)))
problem = 1
else:
gids_to.append(group_id)
# Get uids from gids
uids_from_group = db.get_uids_members_of_groups(gids_to)
# Add the original uids, and make sure there is no double values.
tmp_dict = {}
for uid_receiver in uids_from_group:
tmp_dict[uid_receiver] = None
for (user_nick, user_id) in users_to:
if user_id:
if user_id not in tmp_dict:
uids_from_group.append(user_id)
tmp_dict[user_id] = None
else:
if type(user_nick) == int or \
type(user_nick) == str and user_nick.isdigit():
user_nick = int(user_nick)
if db.user_exists(user_nick) and user_nick not in tmp_dict:
uids_from_group.append(user_nick)
tmp_dict[user_nick] = None
else:
warnings.append(_("User %s does not exist.")% \
(escape_html(user_nick)))
problem = True
if problem:
body = webmessage_templates.tmpl_write(msg_to=users_to_str,
msg_to_group=groups_to_str,
msg_subject=msg_subject,
msg_body=msg_body,
msg_send_year=msg_send_year,
msg_send_month=msg_send_month,
msg_send_day=msg_send_day,
warnings=warnings,
ln=ln)
title = _("Write a message")
navtrail = get_navtrail(ln, title)
return (body, title, navtrail)
else:
msg_id = db.create_message(uid,
users_to_str, groups_to_str,
msg_subject, msg_body,
send_on_date)
uid_problem = db.send_message(uids_from_group, msg_id, status)
if len(uid_problem) > 0:
usernames_problem_dict = db.get_nicks_from_uids(uid_problem)
usernames_problem = usernames_problem_dict.values()
def listing(name1, name2):
""" name1, name2 => 'name1, name2' """
return str(name1) + ", " + str(name2)
warning = _("Your message could not be sent to the following recipients due to their quota:") + " "
warnings.append(warning + reduce(listing, usernames_problem))
if len(uids_from_group) != len(uid_problem):
infos.append(_("Your message has been sent."))
else:
db.check_if_need_to_delete_message_permanently([msg_id])
body = perform_request_display(uid, warnings,
infos, ln)
title = _("Your Messages")
return (body, title, get_navtrail(ln))
def account_new_mail(uid, ln=CFG_SITE_LANG):
"""
display new mail info for myaccount.py page.
@param uid: user id (int)
@param ln: language
@return: html body
"""
nb_new_mail = db.get_nb_new_messages_for_user(uid)
total_mail = db.get_nb_readable_messages_for_user(uid)
return webmessage_templates.tmpl_account_new_mail(nb_new_mail,
total_mail, ln)
def get_navtrail(ln=CFG_SITE_LANG, title=""):
"""
gets the navtrail for title...
@param title: title of the page
@param ln: language
@return: HTML output
"""
navtrail = webmessage_templates.tmpl_navtrail(ln, title)
return navtrail
| gpl-2.0 |
rafaelmartins/rst2pdf | rst2pdf/tests/test.py | 10 | 5615 | # -*- coding: utf-8 -*-
from autotest import MD5Info, PathInfo, globjoin
from autotest import run_single, dirname, checkmd5
import sys, os
import nose.plugins.skip
class RunTest:
def __init__(self,f):
basename = os.path.basename(f)
self.description = basename
mprefix = os.path.join(PathInfo.md5dir, basename)[:-4]
md5file = mprefix + '.json'
ignfile = os.path.join(PathInfo.inpdir , basename[:-4])+'.ignore'
info=MD5Info()
self.skip=False
self.openIssue=False
if os.path.exists(ignfile):
self.skip=True
if os.path.exists(md5file):
f = open(md5file, 'rb')
exec f in info
f.close()
if info.good_md5 in [[],['sentinel']]:
# This is an open issue or something that can't be checked automatically
self.openIssue=True
def __call__(self,f):
if self.skip:
raise nose.plugins.skip.SkipTest
elif self.openIssue:
assert False, 'Test has no known good output (Open Issue)'
else:
key, errcode = run_single(f)
if key in ['incomplete']:
raise nose.plugins.skip.SkipTest
assert key == 'good', '%s is not good: %s'%(f,key)
from execmgr import textexec, default_logger as log
import shlex
def run_installed_single(inpfname):
"""Like run_single, but runs the test using the installed version
of rst2pdf"""
iprefix = os.path.splitext(inpfname)[0]
basename = os.path.basename(iprefix)
if os.path.exists(iprefix + '.ignore'):
return 'ignored', 0
oprefix = os.path.join(PathInfo.outdir, basename)
mprefix = os.path.join(PathInfo.md5dir, basename)
outpdf = oprefix + '.pdf'
outtext = oprefix + '.log'
md5file = mprefix + '.json'
inpfname = iprefix + '.txt'
style = iprefix + '.style'
cli = iprefix + '.cli'
if os.path.isfile(cli):
f = open(cli)
extraargs=shlex.split(f.read())
f.close()
else:
extraargs=[]
args = ['rst2pdf'] + ['--date-invariant', '-v', os.path.basename(inpfname)]+extraargs
if os.path.exists(style):
args.extend(('-s', os.path.basename(style)))
args.extend(('-o', outpdf))
errcode, result = textexec(args, cwd=dirname(inpfname), python_proc=None)
checkinfo = checkmd5(outpdf, md5file, result, None, errcode, iprefix)
log(result, '')
outf = open(outtext, 'wb')
outf.write('\n'.join(result))
outf.close()
return checkinfo, errcode
class RunInstalledTest:
def __init__(self,f):
basename = os.path.basename(f)
self.description = basename
mprefix = os.path.join(PathInfo.md5dir, basename)[:-4]
md5file = mprefix + '.json'
ignfile = os.path.join(PathInfo.inpdir , basename[:-4])+'.ignore'
info=MD5Info()
self.skip=False
self.openIssue=False
if os.path.exists(ignfile):
self.skip=True
if os.path.exists(md5file):
f = open(md5file, 'rb')
exec f in info
f.close()
if info.good_md5 in [[],['sentinel']]:
# This is an open issue or something that can't be checked automatically
self.openIssue=True
def __call__(self,f):
if self.skip:
raise nose.plugins.skip.SkipTest
elif self.openIssue:
assert False, 'Test has no known good output (Open Issue)'
else:
key, errcode = run_installed_single(f)
if key in ['incomplete']:
raise nose.plugins.skip.SkipTest
assert key == 'good', '%s is not good: %s'%(f,key)
class RunSphinxTest:
def __init__(self,f):
basename = os.path.basename(f[:-1])
self.description = basename
mprefix = os.path.join(PathInfo.md5dir, basename)
md5file = mprefix + '.json'
ignfile = os.path.join(PathInfo.inpdir , basename)+'.ignore'
info=MD5Info()
self.skip=False
self.openIssue=False
if os.path.exists(ignfile):
self.skip=True
if os.path.exists(md5file):
f = open(md5file, 'rb')
exec f in info
f.close()
if info.good_md5 in [[],['sentinel']]:
# This is an open issue or something that can't be checked automatically
self.openIssue=True
def __call__(self,f):
if self.skip:
raise nose.plugins.skip.SkipTest
elif self.openIssue:
assert False, 'Test has no known good output (Open Issue)'
else:
key, errcode = run_single(f)
if key in ['incomplete']:
raise nose.plugins.skip.SkipTest
assert key == 'good', '%s is not good: %s'%(f,key)
def regulartest():
'''To run these tests (similar to autotest), run
nosetests -i regulartest'''
testfiles = globjoin(PathInfo.inpdir, '*.txt')
results = {}
for fname in testfiles:
yield RunTest(fname), fname
def releasetest():
'''To run these tests (after you run setup.py install), run
nosetests -i releasetest'''
testfiles = globjoin(PathInfo.inpdir, '*.txt')
results = {}
for fname in testfiles:
yield RunInstalledTest(fname), fname
def sphinxtest():
'''To run these tests , run nosetests -i sphinxtest'''
testfiles = globjoin(PathInfo.inpdir, 'sphinx*/')
results = {}
for fname in testfiles:
yield RunSphinxTest(fname), fname
def setup():
PathInfo.add_coverage()
| mit |
charlesmastin/django-simple-cms | simple_cms/contrib/translated_model/haystack/solr.py | 2 | 1654 | from pysolr import SolrError
from haystack.backends.solr_backend import SolrSearchBackend, SolrSearchQuery
from haystack.backends import BaseEngine
from haystack.utils import get_identifier
from simple_cms.contrib.translated_model.models import Language
class MultiLanguageSolrBackend(SolrSearchBackend):
def update(self, index, iterable, commit=True):
docs = []
for language in Language.objects.get_active():
for obj in iterable:
try:
docs.append(index.full_prepare(obj, language))
except UnicodeDecodeError:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(u"UnicodeDecodeError while preparing object for update", exc_info=True, extra={
"data": {
"index": index,
"object": '%s.%s' % (get_identifier(obj), language.code)
}
})
if len(docs) > 0:
try:
self.conn.add(docs, commit=commit, boost=index.get_field_weights())
except (IOError, SolrError), e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Solr: %s", e)
class MultiLanguageSolrEngine(BaseEngine):
backend = MultiLanguageSolrBackend
query = SolrSearchQuery | mit |
stenskjaer/scrapy | scrapy/utils/misc.py | 37 | 3350 | """Helper functions which doesn't fit anywhere else"""
import re
import hashlib
from importlib import import_module
from pkgutil import iter_modules
import six
from w3lib.html import replace_entities
from scrapy.utils.python import flatten, to_unicode
from scrapy.item import BaseItem
_ITERABLE_SINGLE_VALUES = dict, BaseItem, six.text_type, bytes
def arg_to_iter(arg):
"""Convert an argument to an iterable. The argument can be a None, single
value, or an iterable.
Exception: if arg is a dict, [arg] will be returned
"""
if arg is None:
return []
elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
return arg
else:
return [arg]
def load_object(path):
"""Load an object given its absolute object path, and return it.
object can be a class, function, variable o instance.
path ie: 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
"""
try:
dot = path.rindex('.')
except ValueError:
raise ValueError("Error loading object '%s': not a full path" % path)
module, name = path[:dot], path[dot+1:]
mod = import_module(module)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
return obj
def walk_modules(path):
"""Loads a module and all its submodules from a the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('scrapy.utils')
"""
mods = []
mod = import_module(path)
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def extract_regex(regex, text, encoding='utf-8'):
"""Extract a list of unicode strings from the given text/encoding using the following policies:
* if the regex contains a named group called "extract" that will be returned
* if the regex contains multiple numbered groups, all those will be returned (flattened)
* if the regex doesn't contain any group the entire regex matching is returned
"""
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
try:
strings = [regex.search(text).group('extract')] # named group
except:
strings = regex.findall(text) # full regex or numbered groups
strings = flatten(strings)
if isinstance(text, six.text_type):
return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
else:
return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
for s in strings]
def md5sum(file):
"""Calculate the md5 checksum of a file-like object without reading its
whole content in memory.
>>> from io import BytesIO
>>> md5sum(BytesIO(b'file content to hash'))
'784406af91dd5a54fbb9c84c2236595a'
"""
m = hashlib.md5()
while True:
d = file.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
| bsd-3-clause |
vhazali/cs5331 | assignment3/verifier/scapy-2.3.1/scapy/contrib/ikev2.py | 13 | 14616 | #!/usr/bin/env python
# http://trac.secdev.org/scapy/ticket/353
# scapy.contrib.description = IKEv2
# scapy.contrib.status = loads
from scapy.all import *
import logging
## Modified from the original ISAKMP code by Yaron Sheffer <[email protected]>, June 2010.
import struct
from scapy.packet import *
from scapy.fields import *
from scapy.ansmachine import *
from scapy.layers.inet import IP,UDP
from scapy.sendrecv import sr
# see http://www.iana.org/assignments/ikev2-parameters for details
IKEv2AttributeTypes= { "Encryption": (1, { "DES-IV64" : 1,
"DES" : 2,
"3DES" : 3,
"RC5" : 4,
"IDEA" : 5,
"CAST" : 6,
"Blowfish" : 7,
"3IDEA" : 8,
"DES-IV32" : 9,
"AES-CBC" : 12,
"AES-CTR" : 13,
"AES-CCM-8" : 14,
"AES-CCM-12" : 15,
"AES-CCM-16" : 16,
"AES-GCM-8ICV" : 18,
"AES-GCM-12ICV" : 19,
"AES-GCM-16ICV" : 20,
"Camellia-CBC" : 23,
"Camellia-CTR" : 24,
"Camellia-CCM-8ICV" : 25,
"Camellia-CCM-12ICV" : 26,
"Camellia-CCM-16ICV" : 27,
}, 0),
"PRF": (2, {"PRF_HMAC_MD5":1,
"PRF_HMAC_SHA1":2,
"PRF_HMAC_TIGER":3,
"PRF_AES128_XCBC":4,
"PRF_HMAC_SHA2_256":5,
"PRF_HMAC_SHA2_384":6,
"PRF_HMAC_SHA2_512":7,
"PRF_AES128_CMAC":8,
}, 0),
"Integrity": (3, { "HMAC-MD5-96": 1,
"HMAC-SHA1-96": 2,
"DES-MAC": 3,
"KPDK-MD5": 4,
"AES-XCBC-96": 5,
"HMAC-MD5-128": 6,
"HMAC-SHA1-160": 7,
"AES-CMAC-96": 8,
"AES-128-GMAC": 9,
"AES-192-GMAC": 10,
"AES-256-GMAC": 11,
"SHA2-256-128": 12,
"SHA2-384-192": 13,
"SHA2-512-256": 14,
}, 0),
"GroupDesc": (4, { "768MODPgr" : 1,
"1024MODPgr" : 2,
"1536MODPgr" : 5,
"2048MODPgr" : 14,
"3072MODPgr" : 15,
"4096MODPgr" : 16,
"6144MODPgr" : 17,
"8192MODPgr" : 18,
"256randECPgr" : 19,
"384randECPgr" : 20,
"521randECPgr" : 21,
"1024MODP160POSgr" : 22,
"2048MODP224POSgr" : 23,
"2048MODP256POSgr" : 24,
"192randECPgr" : 25,
"224randECPgr" : 26,
}, 0),
"Extended Sequence Number": (5, {"No ESN": 0,
"ESN": 1, }, 0),
}
# the name 'IKEv2TransformTypes' is actually a misnomer (since the table
# holds info for all IKEv2 Attribute types, not just transforms, but we'll
# keep it for backwards compatibility... for now at least
IKEv2TransformTypes = IKEv2AttributeTypes
IKEv2TransformNum = {}
for n in IKEv2TransformTypes:
val = IKEv2TransformTypes[n]
tmp = {}
for e in val[1]:
tmp[val[1][e]] = e
IKEv2TransformNum[val[0]] = (n,tmp, val[2])
IKEv2Transforms = {}
for n in IKEv2TransformTypes:
IKEv2Transforms[IKEv2TransformTypes[n][0]]=n
del(n)
del(e)
del(tmp)
del(val)
# Note: Transform and Proposal can only be used inside the SA payload
IKEv2_payload_type = ["None", "", "Proposal", "Transform"]
IKEv2_payload_type.extend([""] * 29)
IKEv2_payload_type.extend(["SA","KE","IDi","IDr", "CERT","CERTREQ","AUTH","Nonce","Notify","Delete",
"VendorID","TSi","TSr","Encrypted","CP","EAP"])
IKEv2_exchange_type = [""] * 34
IKEv2_exchange_type.extend(["IKE_SA_INIT","IKE_AUTH","CREATE_CHILD_SA",
"INFORMATIONAL", "IKE_SESSION_RESUME"])
class IKEv2_class(Packet):
def guess_payload_class(self, payload):
np = self.next_payload
logging.debug("For IKEv2_class np=%d" % np)
if np == 0:
return conf.raw_layer
elif np < len(IKEv2_payload_type):
pt = IKEv2_payload_type[np]
logging.debug(globals().get("IKEv2_payload_%s" % pt, IKEv2_payload))
return globals().get("IKEv2_payload_%s" % pt, IKEv2_payload)
else:
return IKEv2_payload
class IKEv2(IKEv2_class): # rfc4306
name = "IKEv2"
fields_desc = [
StrFixedLenField("init_SPI","",8),
StrFixedLenField("resp_SPI","",8),
ByteEnumField("next_payload",0,IKEv2_payload_type),
XByteField("version",0x20), # IKEv2, right?
ByteEnumField("exch_type",0,IKEv2_exchange_type),
FlagsField("flags",0, 8, ["res0","res1","res2","Initiator","Version","Response","res6","res7"]),
IntField("id",0),
IntField("length",None)
]
def guess_payload_class(self, payload):
if self.flags & 1:
return conf.raw_layer
return IKEv2_class.guess_payload_class(self, payload)
def answers(self, other):
if isinstance(other, IKEv2):
if other.init_SPI == self.init_SPI:
return 1
return 0
def post_build(self, p, pay):
p += pay
if self.length is None:
p = p[:24]+struct.pack("!I",len(p))+p[28:]
return p
class IKEv2_Key_Length_Attribute(IntField):
# We only support the fixed-length Key Length attribute (the only one currently defined)
name="key length"
def __init__(self, name):
IntField.__init__(self, name, "0x800E0000")
def i2h(self, pkt, x):
return IntField.i2h(self, pkt, x & 0xFFFF)
def h2i(self, pkt, x):
return IntField.h2i(self, pkt, struct.pack("!I", 0x800E0000 | int(x, 0)))
class IKEv2_Transform_ID(ShortField):
def i2h(self, pkt, x):
if pkt == None:
return None
else:
map = IKEv2TransformNum[pkt.transform_type][1]
return map[x]
def h2i(self, pkt, x):
if pkt == None:
return None
else:
map = IKEv2TransformNum[pkt.transform_type][1]
for k in keys(map):
if map[k] == x:
return k
return None
class IKEv2_payload_Transform(IKEv2_class):
name = "IKE Transform"
fields_desc = [
ByteEnumField("next_payload",None,{0:"last", 3:"Transform"}),
ByteField("res",0),
ShortField("length",8),
ByteEnumField("transform_type",None,IKEv2Transforms),
ByteField("res2",0),
IKEv2_Transform_ID("transform_id", 0),
ConditionalField(IKEv2_Key_Length_Attribute("key_length"), lambda pkt: pkt.length > 8),
]
class IKEv2_payload_Proposal(IKEv2_class):
name = "IKEv2 Proposal"
fields_desc = [
ByteEnumField("next_payload",None,{0:"last", 2:"Proposal"}),
ByteField("res",0),
FieldLenField("length",None,"trans","H", adjust=lambda pkt,x:x+8),
ByteField("proposal",1),
ByteEnumField("proto",1,{1:"IKEv2"}),
FieldLenField("SPIsize",None,"SPI","B"),
ByteField("trans_nb",None),
StrLenField("SPI","",length_from=lambda x:x.SPIsize),
PacketLenField("trans",conf.raw_layer(),IKEv2_payload_Transform,length_from=lambda x:x.length-8),
]
class IKEv2_payload(IKEv2_class):
name = "IKEv2 Payload"
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
FlagsField("flags",0, 8, ["critical","res1","res2","res3","res4","res5","res6","res7"]),
FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
StrLenField("load","",length_from=lambda x:x.length-4),
]
class IKEv2_payload_VendorID(IKEv2_class):
name = "IKEv2 Vendor ID"
overload_fields = { IKEv2: { "next_payload":43 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"vendorID","H", adjust=lambda pkt,x:x+4),
StrLenField("vendorID","",length_from=lambda x:x.length-4),
]
class IKEv2_payload_Delete(IKEv2_class):
name = "IKEv2 Vendor ID"
overload_fields = { IKEv2: { "next_payload":42 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"vendorID","H", adjust=lambda pkt,x:x+4),
StrLenField("vendorID","",length_from=lambda x:x.length-4),
]
class IKEv2_payload_SA(IKEv2_class):
name = "IKEv2 SA"
overload_fields = { IKEv2: { "next_payload":33 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"prop","H", adjust=lambda pkt,x:x+4),
PacketLenField("prop",conf.raw_layer(),IKEv2_payload_Proposal,length_from=lambda x:x.length-4),
]
class IKEv2_payload_Nonce(IKEv2_class):
name = "IKEv2 Nonce"
overload_fields = { IKEv2: { "next_payload":40 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
StrLenField("load","",length_from=lambda x:x.length-4),
]
class IKEv2_payload_Notify(IKEv2_class):
name = "IKEv2 Notify"
overload_fields = { IKEv2: { "next_payload":41 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+4),
StrLenField("load","",length_from=lambda x:x.length-4),
]
class IKEv2_payload_KE(IKEv2_class):
name = "IKEv2 Key Exchange"
overload_fields = { IKEv2: { "next_payload":34 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"load","H", adjust=lambda pkt,x:x+6),
ShortEnumField("group", 0, IKEv2TransformTypes['GroupDesc'][1]),
StrLenField("load","",length_from=lambda x:x.length-6),
]
class IKEv2_payload_IDi(IKEv2_class):
name = "IKEv2 Identification - Initiator"
overload_fields = { IKEv2: { "next_payload":35 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+8),
ByteEnumField("IDtype",1,{1:"IPv4_addr", 11:"Key"}),
ByteEnumField("ProtoID",0,{0:"Unused"}),
ShortEnumField("Port",0,{0:"Unused"}),
# IPField("IdentData","127.0.0.1"),
StrLenField("load","",length_from=lambda x:x.length-8),
]
class IKEv2_payload_IDr(IKEv2_class):
name = "IKEv2 Identification - Responder"
overload_fields = { IKEv2: { "next_payload":36 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+8),
ByteEnumField("IDtype",1,{1:"IPv4_addr", 11:"Key"}),
ByteEnumField("ProtoID",0,{0:"Unused"}),
ShortEnumField("Port",0,{0:"Unused"}),
# IPField("IdentData","127.0.0.1"),
StrLenField("load","",length_from=lambda x:x.length-8),
]
class IKEv2_payload_Encrypted(IKEv2_class):
name = "IKEv2 Encrypted and Authenticated"
overload_fields = { IKEv2: { "next_payload":46 }}
fields_desc = [
ByteEnumField("next_payload",None,IKEv2_payload_type),
ByteField("res",0),
FieldLenField("length",None,"load","H",adjust=lambda pkt,x:x+4),
StrLenField("load","",length_from=lambda x:x.length-4),
]
IKEv2_payload_type_overload = {}
for i in range(len(IKEv2_payload_type)):
name = "IKEv2_payload_%s" % IKEv2_payload_type[i]
if name in globals():
IKEv2_payload_type_overload[globals()[name]] = {"next_payload":i}
del(i)
del(name)
IKEv2_class.overload_fields = IKEv2_payload_type_overload.copy()
split_layers(UDP, ISAKMP, sport=500)
split_layers(UDP, ISAKMP, dport=500)
bind_layers( UDP, IKEv2, dport=500, sport=500) # TODO: distinguish IKEv1/IKEv2
bind_layers( UDP, IKEv2, dport=4500, sport=4500)
def ikev2scan(ip):
return sr(IP(dst=ip)/UDP()/IKEv2(init_SPI=RandString(8),
exch_type=34)/IKEv2_payload_SA(prop=IKEv2_payload_Proposal()))
# conf.debug_dissector = 1
if __name__ == "__main__":
interact(mydict=globals(), mybanner="IKEv2 alpha-level protocol implementation")
| mit |
ClaudeZoo/iDashBoardWeb | Web/views.py | 2 | 4833 | from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.shortcuts import HttpResponseRedirect
from django.contrib.auth.models import User, Group
from django.shortcuts import render_to_response
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from Web.models import DUser
# Create your views here.
def home(request):
if request.user.is_authenticated():
is_administer = is_admin(request.user)
return render_to_response('home.html', locals())
else:
return render_to_response('index.html', locals())
def sign_up(request):
if request.method == 'POST':
if request.POST.get('username', '') and request.POST.get('password', '') and request.POST.get('email', ''):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
email = request.POST.get('email', '')
if User.objects.filter(username=username).count() == 0:
new_user = User.objects.create_user(username=username, password=password, email=email)
duser = DUser(user=new_user)
if request.POST.get('phone', '') and request.POST.get('department', ''):
phone = request.POST.get('phone', '')
department = request.POST.get('department', '')
duser.phone = phone
duser.department = department
duser.save()
else:
form = UserCreationForm()
return render_to_response('signup.html', {'form': form})
return HttpResponseRedirect("/")
else:
form = UserCreationForm()
return render_to_response('signup.html', {'form': form})
@login_required
def settings(request):
is_administer = is_admin(request.user)
duser = request.user.duser
username = request.user.username
email = request.user.email
if duser.phone:
phone = duser.phone
if duser.department:
department = duser.department.encode('utf-8')
return render_to_response('settings.html', locals())
def is_admin(user):
try:
user.groups.get(name='administers')
except ObjectDoesNotExist:
return False
else:
return True
def user_login(request):
errors = []
if request.method == 'POST':
if not request.POST.get('username', ''):
errors.append('Enter a subject.')
if not request.POST.get('password', ''):
errors.append('Enter a message.')
if not errors:
username = request.POST.get('username','')
password = request.POST.get('password','')
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
login(request, user)
redirect_url = request.GET.get('next', '')
print(redirect_url)
if redirect_url:
return HttpResponseRedirect(redirect_url)
else:
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
return HttpResponseRedirect('/')
def user_logout(request):
logout(request)
return HttpResponseRedirect("/")
@login_required
def save_user_info(request):
duser = request.user.duser
if User.objects.filter(username=request.POST.get('username', '')).count == 0:
if request.POST.get('username', '') and request.POST.get('email', ''):
request.user.username = request.POST.get('username', '')
request.user.email = request.POST.get('email', '')
request.user.save()
if request.POST.get('phone', '') and request.POST.get('department', ''):
duser.phone = request.POST.get('phone', '')
duser.department = request.POST.get('department', '').encode('utf-8')
duser.save()
return HttpResponseRedirect('/settings/')
@login_required
def change_password(request):
if request.POST.get('password', '') and request.POST.get('newpassword', ''):
password = request.POST.get('password', '')
new_password = request.POST.get('newpassword', '')
if request.user.check_password(password):
request.user.set_password(new_password)
request.user.save()
return HttpResponseRedirect('/settings/')
def validate_username(request):
user = User.objects.filter(username=request.GET.get('userName',''))
if len(user) == 0:
message = 'yes'
else:
message = 'no'
return HttpResponse(message)
| unlicense |
ChameleonCloud/blazar | blazar/api/v1/request_log.py | 3 | 2908 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple middleware for request logging."""
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class RequestLog(object):
"""Middleware to write a simple request log to.
Borrowed from Paste Translogger
"""
format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" '
'status: %(status)s len: %(bytes)s ')
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
LOG.debug('Starting request: %s "%s %s"',
environ['REMOTE_ADDR'], environ['REQUEST_METHOD'],
self._get_uri(environ))
# Set the accept header if it is not otherwise set or is '*/*'. This
# ensures that error responses will be in JSON.
accept = environ.get('HTTP_ACCEPT')
if accept:
environ['HTTP_ACCEPT'] = 'application/json'
if LOG.isEnabledFor(logging.INFO):
return self._log_app(environ, start_response)
@staticmethod
def _get_uri(environ):
req_uri = (environ.get('SCRIPT_NAME', '')
+ environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
req_uri += '?' + environ['QUERY_STRING']
return req_uri
def _log_app(self, environ, start_response):
req_uri = self._get_uri(environ)
def replacement_start_response(status, headers, exc_info=None):
"""We need to gaze at the content-length, if set to write log info.
"""
size = None
for name, value in headers:
if name.lower() == 'content-length':
size = value
self.write_log(environ, req_uri, status, size)
return start_response(status, headers, exc_info)
return self.application(environ, replacement_start_response)
def write_log(self, environ, req_uri, status, size):
"""Write the log info out in a formatted form to ``LOG.info``.
"""
if size is None:
size = '-'
log_format = {
'REMOTE_ADDR': environ.get('REMOTE_ADDR', '-'),
'REQUEST_METHOD': environ['REQUEST_METHOD'],
'REQUEST_URI': req_uri,
'status': status.split(None, 1)[0],
'bytes': size
}
LOG.info(self.format, log_format)
| apache-2.0 |
knehez/edx-platform | common/djangoapps/terrain/stubs/video_source.py | 181 | 1368 | """
Serve HTML5 video sources for acceptance tests
"""
from SimpleHTTPServer import SimpleHTTPRequestHandler
from .http import StubHttpService
from contextlib import contextmanager
import os
from logging import getLogger
LOGGER = getLogger(__name__)
class VideoSourceRequestHandler(SimpleHTTPRequestHandler):
"""
Request handler for serving video sources locally.
"""
def translate_path(self, path):
"""
Remove any extra parameters from the path.
For example /gizmo.mp4?1397160769634
becomes /gizmo.mp4
"""
root_dir = self.server.config.get('root_dir')
path = '{}{}'.format(root_dir, path)
return path.split('?')[0]
class VideoSourceHttpService(StubHttpService):
"""
Simple HTTP server for serving HTML5 Video sources locally for tests
"""
HANDLER_CLASS = VideoSourceRequestHandler
def __init__(self, port_num=0):
@contextmanager
def _remember_cwd():
"""
Files are automatically served from the current directory
so we need to change it, start the server, then set it back.
"""
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
with _remember_cwd():
StubHttpService.__init__(self, port_num=port_num)
| agpl-3.0 |
jam891/omim | 3party/protobuf/python/google/protobuf/pyext/descriptor_cpp2_test.py | 73 | 2506 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.pyext behavior."""
__author__ = '[email protected] (Anuraag Agrawal)'
import os
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION'] = '2'
# We must set the implementation version above before the google3 imports.
# pylint: disable=g-import-not-at-top
from google.apputils import basetest
from google.protobuf.internal import api_implementation
# Run all tests from the original module by putting them in our namespace.
# pylint: disable=wildcard-import
from google.protobuf.internal.descriptor_test import *
class ConfirmCppApi2Test(basetest.TestCase):
def testImplementationSetting(self):
self.assertEqual('cpp', api_implementation.Type())
self.assertEqual(2, api_implementation.Version())
if __name__ == '__main__':
basetest.main()
| apache-2.0 |
waseem18/oh-mainline | vendor/packages/Django/django/contrib/gis/db/models/manager.py | 505 | 3578 | from django.db.models.manager import Manager
from django.contrib.gis.db.models.query import GeoQuerySet
class GeoManager(Manager):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
def get_query_set(self):
return GeoQuerySet(self.model, using=self._db)
def area(self, *args, **kwargs):
return self.get_query_set().area(*args, **kwargs)
def centroid(self, *args, **kwargs):
return self.get_query_set().centroid(*args, **kwargs)
def collect(self, *args, **kwargs):
return self.get_query_set().collect(*args, **kwargs)
def difference(self, *args, **kwargs):
return self.get_query_set().difference(*args, **kwargs)
def distance(self, *args, **kwargs):
return self.get_query_set().distance(*args, **kwargs)
def envelope(self, *args, **kwargs):
return self.get_query_set().envelope(*args, **kwargs)
def extent(self, *args, **kwargs):
return self.get_query_set().extent(*args, **kwargs)
def extent3d(self, *args, **kwargs):
return self.get_query_set().extent3d(*args, **kwargs)
def force_rhr(self, *args, **kwargs):
return self.get_query_set().force_rhr(*args, **kwargs)
def geohash(self, *args, **kwargs):
return self.get_query_set().geohash(*args, **kwargs)
def geojson(self, *args, **kwargs):
return self.get_query_set().geojson(*args, **kwargs)
def gml(self, *args, **kwargs):
return self.get_query_set().gml(*args, **kwargs)
def intersection(self, *args, **kwargs):
return self.get_query_set().intersection(*args, **kwargs)
def kml(self, *args, **kwargs):
return self.get_query_set().kml(*args, **kwargs)
def length(self, *args, **kwargs):
return self.get_query_set().length(*args, **kwargs)
def make_line(self, *args, **kwargs):
return self.get_query_set().make_line(*args, **kwargs)
def mem_size(self, *args, **kwargs):
return self.get_query_set().mem_size(*args, **kwargs)
def num_geom(self, *args, **kwargs):
return self.get_query_set().num_geom(*args, **kwargs)
def num_points(self, *args, **kwargs):
return self.get_query_set().num_points(*args, **kwargs)
def perimeter(self, *args, **kwargs):
return self.get_query_set().perimeter(*args, **kwargs)
def point_on_surface(self, *args, **kwargs):
return self.get_query_set().point_on_surface(*args, **kwargs)
def reverse_geom(self, *args, **kwargs):
return self.get_query_set().reverse_geom(*args, **kwargs)
def scale(self, *args, **kwargs):
return self.get_query_set().scale(*args, **kwargs)
def snap_to_grid(self, *args, **kwargs):
return self.get_query_set().snap_to_grid(*args, **kwargs)
def svg(self, *args, **kwargs):
return self.get_query_set().svg(*args, **kwargs)
def sym_difference(self, *args, **kwargs):
return self.get_query_set().sym_difference(*args, **kwargs)
def transform(self, *args, **kwargs):
return self.get_query_set().transform(*args, **kwargs)
def translate(self, *args, **kwargs):
return self.get_query_set().translate(*args, **kwargs)
def union(self, *args, **kwargs):
return self.get_query_set().union(*args, **kwargs)
def unionagg(self, *args, **kwargs):
return self.get_query_set().unionagg(*args, **kwargs)
| agpl-3.0 |
anandology/pyjamas | pygtkweb/demos/022-spinbutton.py | 7 | 5150 | #!/usr/bin/env python
# example spinbutton.py
import pygtk
pygtk.require('2.0')
import gtk
class SpinButtonExample:
def toggle_snap(self, widget, spin):
spin.set_snap_to_ticks(widget.get_active())
def toggle_numeric(self, widget, spin):
spin.set_numeric(widget.get_active())
def change_digits(self, widget, spin, spin1):
spin1.set_digits(spin.get_value_as_int())
def get_value(self, widget, data, spin, spin2, label):
if data == 1:
buf = "%d" % spin.get_value_as_int()
else:
buf = "%0.*f" % (spin2.get_value_as_int(),
spin.get_value())
label.set_text(buf)
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("destroy", lambda w: gtk.main_quit())
window.set_title("Spin Button")
main_vbox = gtk.VBox(False, 5)
main_vbox.set_border_width(10)
window.add(main_vbox)
frame = gtk.Frame("Not accelerated")
main_vbox.pack_start(frame, True, True, 0)
vbox = gtk.VBox(False, 0)
vbox.set_border_width(5)
frame.add(vbox)
# Day, month, year spinners
hbox = gtk.HBox(False, 0)
vbox.pack_start(hbox, True, True, 5)
vbox2 = gtk.VBox(False, 0)
hbox.pack_start(vbox2, True, True, 5)
label = gtk.Label("Day :")
label.set_alignment(0, 0.5)
vbox2.pack_start(label, False, True, 0)
adj = gtk.Adjustment(1.0, 1.0, 31.0, 1.0, 5.0, 0.0)
spinner = gtk.SpinButton(adj, 0, 0)
spinner.set_wrap(True)
vbox2.pack_start(spinner, False, True, 0)
vbox2 = gtk.VBox(False, 0)
hbox.pack_start(vbox2, True, True, 5)
label = gtk.Label("Month :")
label.set_alignment(0, 0.5)
vbox2.pack_start(label, False, True, 0)
adj = gtk.Adjustment(1.0, 1.0, 12.0, 1.0, 5.0, 0.0)
spinner = gtk.SpinButton(adj, 0, 0)
spinner.set_wrap(True)
vbox2.pack_start(spinner, False, True, 0)
vbox2 = gtk.VBox(False, 0)
hbox.pack_start(vbox2, True, True, 5)
label = gtk.Label("Year :")
label.set_alignment(0, 0.5)
vbox2.pack_start(label, False, True, 0)
adj = gtk.Adjustment(1998.0, 0.0, 2100.0, 1.0, 100.0, 0.0)
spinner = gtk.SpinButton(adj, 0, 0)
spinner.set_wrap(False)
spinner.set_size_request(55, -1)
vbox2.pack_start(spinner, False, True, 0)
frame = gtk.Frame("Accelerated")
main_vbox.pack_start(frame, True, True, 0)
vbox = gtk.VBox(False, 0)
vbox.set_border_width(5)
frame.add(vbox)
hbox = gtk.HBox(False, 0)
vbox.pack_start(hbox, False, True, 5)
vbox2 = gtk.VBox(False, 0)
hbox.pack_start(vbox2, True, True, 5)
label = gtk.Label("Value :")
label.set_alignment(0, 0.5)
vbox2.pack_start(label, False, True, 0)
adj = gtk.Adjustment(0.0, -10000.0, 10000.0, 0.5, 100.0, 0.0)
spinner1 = gtk.SpinButton(adj, 1.0, 2)
spinner1.set_wrap(True)
spinner1.set_size_request(100, -1)
vbox2.pack_start(spinner1, False, True, 0)
vbox2 = gtk.VBox(False, 0)
hbox.pack_start(vbox2, True, True, 5)
label = gtk.Label("Digits :")
label.set_alignment(0, 0.5)
vbox2.pack_start(label, False, True, 0)
adj = gtk.Adjustment(2, 1, 5, 1, 1, 0)
spinner2 = gtk.SpinButton(adj, 0.0, 0)
spinner2.set_wrap(True)
adj.connect("value_changed", self.change_digits, spinner2, spinner1)
vbox2.pack_start(spinner2, False, True, 0)
hbox = gtk.HBox(False, 0)
vbox.pack_start(hbox, False, True, 5)
button = gtk.CheckButton("Snap to 0.5-ticks")
button.connect("clicked", self.toggle_snap, spinner1)
vbox.pack_start(button, True, True, 0)
button.set_active(True)
button = gtk.CheckButton("Numeric only input mode")
button.connect("clicked", self.toggle_numeric, spinner1)
vbox.pack_start(button, True, True, 0)
button.set_active(True)
val_label = gtk.Label("")
hbox = gtk.HBox(False, 0)
vbox.pack_start(hbox, False, True, 5)
button = gtk.Button("Value as Int")
button.connect("clicked", self.get_value, 1, spinner1, spinner2,
val_label)
hbox.pack_start(button, True, True, 5)
button = gtk.Button("Value as Float")
button.connect("clicked", self.get_value, 2, spinner1, spinner2,
val_label)
hbox.pack_start(button, True, True, 5)
vbox.pack_start(val_label, True, True, 0)
val_label.set_text("0")
hbox = gtk.HBox(False, 0)
main_vbox.pack_start(hbox, False, True, 0)
button = gtk.Button("Close")
button.connect("clicked", lambda w: gtk.main_quit())
hbox.pack_start(button, True, True, 5)
window.show_all()
def main():
gtk.main()
return 0
if __name__ == "__main__":
SpinButtonExample()
main()
| apache-2.0 |
thodoris/djangoPharma | djangoPharma/env/Lib/encodings/gb2312.py | 816 | 1027 | #
# gb2312.py: Python Unicode Codec for GB2312
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb2312')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb2312',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
alextreme/Django-Bingo | contrib/xlwt/xlwt/Formatting.py | 87 | 8347 | #!/usr/bin/env python
'''
The XF record is able to store explicit cell formatting attributes or the
attributes of a cell style. Explicit formatting includes the reference to
a cell style XF record. This allows to extend a defined cell style with
some explicit attributes. The formatting attributes are divided into
6 groups:
Group Attributes
-------------------------------------
Number format Number format index (index to FORMAT record)
Font Font index (index to FONT record)
Alignment Horizontal and vertical alignment, text wrap, indentation,
orientation/rotation, text direction
Border Border line styles and colours
Background Background area style and colours
Protection Cell locked, formula hidden
For each group a flag in the cell XF record specifies whether to use the
attributes contained in that XF record or in the referenced style
XF record. In style XF records, these flags specify whether the attributes
will overwrite explicit cell formatting when the style is applied to
a cell. Changing a cell style (without applying this style to a cell) will
change all cells which already use that style and do not contain explicit
cell attributes for the changed style attributes. If a cell XF record does
not contain explicit attributes in a group (if the attribute group flag
is not set), it repeats the attributes of its style XF record.
'''
import BIFFRecords
class Font(object):
ESCAPEMENT_NONE = 0x00
ESCAPEMENT_SUPERSCRIPT = 0x01
ESCAPEMENT_SUBSCRIPT = 0x02
UNDERLINE_NONE = 0x00
UNDERLINE_SINGLE = 0x01
UNDERLINE_SINGLE_ACC = 0x21
UNDERLINE_DOUBLE = 0x02
UNDERLINE_DOUBLE_ACC = 0x22
FAMILY_NONE = 0x00
FAMILY_ROMAN = 0x01
FAMILY_SWISS = 0x02
FAMILY_MODERN = 0x03
FAMILY_SCRIPT = 0x04
FAMILY_DECORATIVE = 0x05
CHARSET_ANSI_LATIN = 0x00
CHARSET_SYS_DEFAULT = 0x01
CHARSET_SYMBOL = 0x02
CHARSET_APPLE_ROMAN = 0x4D
CHARSET_ANSI_JAP_SHIFT_JIS = 0x80
CHARSET_ANSI_KOR_HANGUL = 0x81
CHARSET_ANSI_KOR_JOHAB = 0x82
CHARSET_ANSI_CHINESE_GBK = 0x86
CHARSET_ANSI_CHINESE_BIG5 = 0x88
CHARSET_ANSI_GREEK = 0xA1
CHARSET_ANSI_TURKISH = 0xA2
CHARSET_ANSI_VIETNAMESE = 0xA3
CHARSET_ANSI_HEBREW = 0xB1
CHARSET_ANSI_ARABIC = 0xB2
CHARSET_ANSI_BALTIC = 0xBA
CHARSET_ANSI_CYRILLIC = 0xCC
CHARSET_ANSI_THAI = 0xDE
CHARSET_ANSI_LATIN_II = 0xEE
CHARSET_OEM_LATIN_I = 0xFF
def __init__(self):
# twip = 1/20 of a point = 1/1440 of a inch
# usually resolution == 96 pixels per 1 inch
# (rarely 120 pixels per 1 inch or another one)
self.height = 0x00C8 # 200: this is font with height 10 points
self.italic = False
self.struck_out = False
self.outline = False
self.shadow = False
self.colour_index = 0x7FFF
self.bold = False
self._weight = 0x0190 # 0x02BC gives bold font
self.escapement = self.ESCAPEMENT_NONE
self.underline = self.UNDERLINE_NONE
self.family = self.FAMILY_NONE
self.charset = self.CHARSET_SYS_DEFAULT
self.name = 'Arial'
def get_biff_record(self):
height = self.height
options = 0x00
if self.bold:
options |= 0x01
self._weight = 0x02BC
if self.italic:
options |= 0x02
if self.underline != self.UNDERLINE_NONE:
options |= 0x04
if self.struck_out:
options |= 0x08
if self.outline:
options |= 0x010
if self.shadow:
options |= 0x020
colour_index = self.colour_index
weight = self._weight
escapement = self.escapement
underline = self.underline
family = self.family
charset = self.charset
name = self.name
return BIFFRecords.FontRecord(height, options, colour_index, weight, escapement,
underline, family, charset,
name)
def _search_key(self):
return (
self.height,
self.italic,
self.struck_out,
self.outline,
self.shadow,
self.colour_index,
self.bold,
self._weight,
self.escapement,
self.underline,
self.family,
self.charset,
self.name,
)
class Alignment(object):
HORZ_GENERAL = 0x00
HORZ_LEFT = 0x01
HORZ_CENTER = 0x02
HORZ_RIGHT = 0x03
HORZ_FILLED = 0x04
HORZ_JUSTIFIED = 0x05 # BIFF4-BIFF8X
HORZ_CENTER_ACROSS_SEL = 0x06 # Centred across selection (BIFF4-BIFF8X)
HORZ_DISTRIBUTED = 0x07 # Distributed (BIFF8X)
VERT_TOP = 0x00
VERT_CENTER = 0x01
VERT_BOTTOM = 0x02
VERT_JUSTIFIED = 0x03 # Justified (BIFF5-BIFF8X)
VERT_DISTRIBUTED = 0x04 # Distributed (BIFF8X)
DIRECTION_GENERAL = 0x00 # BIFF8X
DIRECTION_LR = 0x01
DIRECTION_RL = 0x02
ORIENTATION_NOT_ROTATED = 0x00
ORIENTATION_STACKED = 0x01
ORIENTATION_90_CC = 0x02
ORIENTATION_90_CW = 0x03
ROTATION_0_ANGLE = 0x00
ROTATION_STACKED = 0xFF
WRAP_AT_RIGHT = 0x01
NOT_WRAP_AT_RIGHT = 0x00
SHRINK_TO_FIT = 0x01
NOT_SHRINK_TO_FIT = 0x00
def __init__(self):
self.horz = self.HORZ_GENERAL
self.vert = self.VERT_BOTTOM
self.dire = self.DIRECTION_GENERAL
self.orie = self.ORIENTATION_NOT_ROTATED
self.rota = self.ROTATION_0_ANGLE
self.wrap = self.NOT_WRAP_AT_RIGHT
self.shri = self.NOT_SHRINK_TO_FIT
self.inde = 0
self.merg = 0
def _search_key(self):
return (
self.horz, self.vert, self.dire, self.orie, self.rota,
self.wrap, self.shri, self.inde, self.merg,
)
class Borders(object):
NO_LINE = 0x00
THIN = 0x01
MEDIUM = 0x02
DASHED = 0x03
DOTTED = 0x04
THICK = 0x05
DOUBLE = 0x06
HAIR = 0x07
#The following for BIFF8
MEDIUM_DASHED = 0x08
THIN_DASH_DOTTED = 0x09
MEDIUM_DASH_DOTTED = 0x0A
THIN_DASH_DOT_DOTTED = 0x0B
MEDIUM_DASH_DOT_DOTTED = 0x0C
SLANTED_MEDIUM_DASH_DOTTED = 0x0D
NEED_DIAG1 = 0x01
NEED_DIAG2 = 0x01
NO_NEED_DIAG1 = 0x00
NO_NEED_DIAG2 = 0x00
def __init__(self):
self.left = self.NO_LINE
self.right = self.NO_LINE
self.top = self.NO_LINE
self.bottom = self.NO_LINE
self.diag = self.NO_LINE
self.left_colour = 0x40
self.right_colour = 0x40
self.top_colour = 0x40
self.bottom_colour = 0x40
self.diag_colour = 0x40
self.need_diag1 = self.NO_NEED_DIAG1
self.need_diag2 = self.NO_NEED_DIAG2
def _search_key(self):
return (
self.left, self.right, self.top, self.bottom, self.diag,
self.left_colour, self.right_colour, self.top_colour,
self.bottom_colour, self.diag_colour,
self.need_diag1, self.need_diag2,
)
class Pattern(object):
# patterns 0x00 - 0x12
NO_PATTERN = 0x00
SOLID_PATTERN = 0x01
def __init__(self):
self.pattern = self.NO_PATTERN
self.pattern_fore_colour = 0x40
self.pattern_back_colour = 0x41
def _search_key(self):
return (
self.pattern,
self.pattern_fore_colour,
self.pattern_back_colour,
)
class Protection(object):
def __init__(self):
self.cell_locked = 1
self.formula_hidden = 0
def _search_key(self):
return (
self.cell_locked,
self.formula_hidden,
)
| bsd-3-clause |
chialiang-8/cloudbase-init | cloudbaseinit/tests/plugins/common/test_createuser.py | 2 | 4376 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from oslo_config import cfg
from cloudbaseinit.plugins.common import base
from cloudbaseinit.plugins.common import createuser
from cloudbaseinit.tests import testutils
CONF = cfg.CONF
class CreateUserPlugin(createuser.BaseCreateUserPlugin):
def create_user(self, username, password, osutils):
pass
def post_create_user(self, username, password, osutils):
pass
class CreateUserPluginTests(unittest.TestCase):
def setUp(self):
self._create_user = CreateUserPlugin()
def test_get_password(self):
mock_osutils = mock.MagicMock()
mock_osutils.generate_random_password.return_value = 'fake password'
response = self._create_user._get_password(mock_osutils)
mock_osutils.get_maximum_password_length.assert_called_once_with()
length = mock_osutils.get_maximum_password_length()
mock_osutils.generate_random_password.assert_called_once_with(length)
self.assertEqual('fake password', response)
@testutils.ConfPatcher('groups', ['Admins'])
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.plugins.common.createuser.'
'BaseCreateUserPlugin._get_password')
@mock.patch.object(CreateUserPlugin, 'create_user')
@mock.patch.object(CreateUserPlugin, 'post_create_user')
def _test_execute(self, mock_post_create_user, mock_create_user,
mock_get_password, mock_get_os_utils,
user_exists=True,
group_adding_works=True):
shared_data = {}
mock_osutils = mock.MagicMock()
mock_service = mock.MagicMock()
mock_get_password.return_value = 'password'
mock_get_os_utils.return_value = mock_osutils
mock_osutils.user_exists.return_value = user_exists
if not group_adding_works:
mock_osutils.add_user_to_local_group.side_effect = Exception
with testutils.LogSnatcher("cloudbaseinit.plugins.common."
"createuser") as snatcher:
response = self._create_user.execute(mock_service, shared_data)
mock_get_os_utils.assert_called_once_with()
mock_get_password.assert_called_once_with(mock_osutils)
mock_osutils.user_exists.assert_called_once_with(CONF.username)
if user_exists:
mock_osutils.set_user_password.assert_called_once_with(
CONF.username, 'password')
expected_logging = ["Setting password for existing user \"%s\""
% CONF.username]
else:
mock_create_user.assert_called_once_with(
CONF.username, 'password',
mock_osutils)
expected_logging = ["Creating user \"%s\" and setting password"
% CONF.username]
mock_post_create_user.assert_called_once_with(
CONF.username, 'password',
mock_osutils)
self.assertEqual(expected_logging, snatcher.output[:1])
if not group_adding_works:
failed = snatcher.output[1].startswith(
"Cannot add user to group \"Admins\"")
self.assertTrue(failed)
mock_osutils.add_user_to_local_group.assert_called_once_with(
CONF.username, CONF.groups[0])
self.assertEqual((base.PLUGIN_EXECUTION_DONE, False), response)
def test_execute_user_exists(self):
self._test_execute(user_exists=True)
def test_execute_no_user(self):
self._test_execute(user_exists=False)
def test_execute_add_to_group_fails(self):
self._test_execute(group_adding_works=False)
| apache-2.0 |
ios-xr/iosxr-ansible | local/library/xr32_install_package.py | 1 | 9451 | #!/usr/bin/python
#------------------------------------------------------------------------------
#
# Copyright (C) 2016 Cisco Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------
from ansible.module_utils.basic import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from iosxr_common import *
from iosxr import *
DOCUMENTATION = """
---
module: xr32_install_package
author: Adisorn Ermongkonchai
short_description: Run install commands on IOS-XR devices.
description:
- Install IOS-XR package or SMU (Software Maintenance Updates)
on the IOS-XR node.
options:
host:
description:
- IP address or hostname (resolvable by Ansible control host) of
the target IOS-XR node.
required: true
username:
description:
- username used to login to IOS-XR
required: true
default: none
password:
description:
- password used to login to IOS-XR
required: true
default: none
pkgpath:
description:
- path to where the package file is stored
e.g. tftp://192.168.1.1
ftp://192.168.1.1
/disk0:
required: Only when state is 'present'
pkgname:
description:
- IOS-XR software package without file extension
e.g. The package name for 'xrv9k-ospf-1.0.0.0-r61102I.x86_64.rpm'
is 'xrv9k-ospf-1.0.0.0-r61102I'
required: true
state:
description:
- represent state of the package being installed
required: false
default: 'present'
choices: ['present', 'absent', 'activated', 'deactivated', 'committed']
"""
EXAMPLES = """
- xr32_install_package:
host: '{{ ansible_ssh_host }}'
username: cisco
password: cisco
pkgpath: "tftp://192.168.1.1"
pkgname: "xrv9k-ospf-1.0.0.0-r61102I"
state: present
- xr32_install_package:
host: '{{ ansible_ssh_host }}'
username: cisco
password: cisco
pkgname: "xrv9k-ospf-1.0.0.0-r61102I"
state: activated
"""
RETURN = """
stdout:
description: raw response
returned: always
stdout_lines:
description: list of response lines
returned: always
"""
# check if another install command in progress
def is_legacy_iosxr(module):
command = "show version"
response = execute_command(module, command)
return "Build Information:" not in response[0]
# check if another install command in progress
def is_install_in_progress(module):
command = "show install request"
response = execute_command(module, command)
return "no install requests" not in response[0]
# check if the package is already added
def is_package_already_added(module, pkg_name):
command = "show install inactive"
response = execute_command(module, command)
return pkg_name in response[0]
# check if the package is already active
def is_package_already_active(module, pkg_name):
command = "show install active"
response = execute_command(module, command)
return pkg_name in response[0]
# wait for install command to complete
def wait_install_response(module, oper_id):
retries = 100
while retries > 0:
if is_install_in_progress(module):
retries -= 1
time.sleep(3)
else:
command = "show install log " + oper_id.group(1) + " detail"
response = execute_command(module, command)
if 'Error: ' in response[0]:
module.fail_json(msg=response)
return response
else:
module.fail_json(msg="timeout waiting for install to complete")
# get install operation id from log
def get_operation_id(response):
pattern = re.compile(r"Install operation (\d+)")
return pattern.search(response[0])
# add package only when it is not already added or activated
def install_add(module, pkg_path, pkg_name):
result = dict(changed=False)
if is_package_already_active(module, pkg_name):
response = [pkg_name + " package is already active\n"]
elif is_package_already_added(module, pkg_name):
response = [pkg_name + " package is already added\n"]
elif pkg_path == None:
module.fail_json(msg="package path required")
else:
command = ("install add source " +
pkg_path + " " + pkg_name)
response = execute_command(module, command)
oper_id = get_operation_id(response)
response = wait_install_response(module, oper_id)
result['changed'] = True
result['stdout'] = response
result['stdout_lines'] = str(result['stdout']).split(r'\n')
return result
# remove package only when it is in inactive state
def install_remove(module, pkg_path, pkg_name):
result = dict(changed=False)
if is_package_already_active(module, pkg_name):
error = pkg_name + " is active, please deactivate first"
module.fail_json(msg=error)
elif is_package_already_added(module, pkg_name):
command = "install remove " + pkg_name + "prompt-level none"
response = execute_command(module, command)
oper_id = get_operation_id(response)
response = wait_install_response(module, oper_id)
result['changed'] = True
else:
response = [pkg_name + " package has already been removed\n"]
result['stdout'] = response
result['stdout_lines'] = str(result['stdout']).split(r'\n')
return result
# activate package only when it has been added
def install_activate(module, pkg_path, pkg_name):
result = dict(changed=False)
if is_package_already_active(module, pkg_name):
response = [pkg_name + " package is already active\n"]
elif is_package_already_added(module, pkg_name):
command = "install activate " + pkg_name
response = execute_command(module, command)
oper_id = get_operation_id(response)
response = wait_install_response(module, oper_id)
result['changed'] = True
else:
error = pkg_name + " must be present before activate"
module.fail_json(msg=error)
result['stdout'] = response
result['stdout_lines'] = str(result['stdout']).split(r'\n')
return result
# deactivate package only when it is in active state
def install_deactivate(module, pkg_path, pkg_name):
result = dict(changed=False)
if is_package_already_active(module, pkg_name):
command = "install deactivate " + pkg_name
response = execute_command(module, command)
oper_id = get_operation_id(response)
response = wait_install_response(module, oper_id)
result['changed'] = True
elif is_package_already_added(module, pkg_name):
response = [pkg_name + " package is already deactivated\n"]
else:
response = [pkg_name + " package has already been removed\n"]
result['stdout'] = response
result['stdout_lines'] = str(result['stdout']).split(r'\n')
return result
# commit active packages
def install_commit(module, pkg_path, pkg_name):
command = "install commit"
response = execute_command(module, command)
oper_id = get_operation_id(response)
response = wait_install_response(module, oper_id)
result = dict(changed=True)
result['stdout'] = response
result['stdout_lines'] = str(result['stdout']).split(r'\n')
return result
def main():
module = get_module(
argument_spec = dict(
username = dict(required=False, default=None),
password = dict(required=False, default=None),
pkgpath = dict(required=False, default=None),
pkgname = dict(required=True, default=None),
state = dict(required=False, default='present',
choices = ['present',
'absent',
'activated',
'deactivated',
'committed'])
),
supports_check_mode = False
)
args = module.params
state = args['state']
legacy = is_legacy_iosxr(module)
# cannot run on 64-bit XR or run 'updated'
if not legacy:
module.fail_json(msg="cannot run on 64-bit IOS-XR")
# make sure no other install in progress
if is_install_in_progress(module):
module.fail_json(msg="other install op in progress")
install = {
'present': install_add,
'absent': install_remove,
'activated': install_activate,
'deactivated': install_deactivate,
'committed': install_commit
}
# need to be in "admin" mode for classic XR
command = "admin"
response = execute_command(module, command)
result = install[state](module, args['pkgpath'], args['pkgname'])
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
mr-daydream/boss | collector/serial/rs485.py | 37 | 3265 | #!/usr/bin/env python
# RS485 support
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
The settings for RS485 are stored in a dedicated object that can be applied to
serial ports (where supported).
NOTE: Some implementations may only support a subset of the settings.
"""
import time
import serial
class RS485Settings(object):
def __init__(
self,
rts_level_for_tx=True,
rts_level_for_rx=False,
loopback=False,
delay_before_tx=None,
delay_before_rx=None):
self.rts_level_for_tx = rts_level_for_tx
self.rts_level_for_rx = rts_level_for_rx
self.loopback = loopback
self.delay_before_tx = delay_before_tx
self.delay_before_rx = delay_before_rx
class RS485(serial.Serial):
"""\
A subclass that replaces the write method with one that toggles RTS
according to the RS485 settings.
NOTE: This may work unreliably on some serial ports (control signals not
synchronized or delayed compared to data). Using delays may be
unreliable (varying times, larger than expected) as the OS may not
support very fine grained delays (no smaller than in the order of
tens of milliseconds).
NOTE: Some implementations support this natively. Better performance
can be expected when the native version is used.
NOTE: The loopback property is ignored by this implementation. The actual
behavior depends on the used hardware.
Usage:
ser = RS485(...)
ser.rs485_mode = RS485Settings(...)
ser.write(b'hello')
"""
def __init__(self, *args, **kwargs):
super(RS485, self).__init__(*args, **kwargs)
self._alternate_rs485_settings = None
def write(self, b):
"""Write to port, controlling RTS before and after transmitting."""
if self._alternate_rs485_settings is not None:
# apply level for TX and optional delay
self.setRTS(self._alternate_rs485_settings.rts_level_for_tx)
if self._alternate_rs485_settings.delay_before_tx is not None:
time.sleep(self._alternate_rs485_settings.delay_before_tx)
# write and wait for data to be written
super(RS485, self).write(b)
super(RS485, self).flush()
# optional delay and apply level for RX
if self._alternate_rs485_settings.delay_before_rx is not None:
time.sleep(self._alternate_rs485_settings.delay_before_rx)
self.setRTS(self._alternate_rs485_settings.rts_level_for_rx)
else:
super(RS485, self).write(b)
# redirect where the property stores the settings so that underlying Serial
# instance does not see them
@property
def rs485_mode(self):
"""\
Enable RS485 mode and apply new settings, set to None to disable.
See serial.rs485.RS485Settings for more info about the value.
"""
return self._alternate_rs485_settings
@rs485_mode.setter
def rs485_mode(self, rs485_settings):
self._alternate_rs485_settings = rs485_settings
| gpl-3.0 |
TridevGuha/django | tests/gis_tests/distapp/models.py | 259 | 1365 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
from ..utils import gisfield_may_be_null
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class SouthTexasCity(NamedModel):
"City model on projected coordinate system for South Texas."
point = models.PointField(srid=32140)
class SouthTexasCityFt(NamedModel):
"Same City model as above, but U.S. survey feet are the units."
point = models.PointField(srid=2278)
class AustraliaCity(NamedModel):
"City model for Australia, using WGS84."
point = models.PointField()
class CensusZipcode(NamedModel):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
poly = models.PolygonField(srid=4269)
class SouthTexasZipcode(NamedModel):
"Model for a few South Texas ZIP codes."
poly = models.PolygonField(srid=32140, null=gisfield_may_be_null)
class Interstate(NamedModel):
"Geodetic model for U.S. Interstates."
path = models.LineStringField()
class SouthTexasInterstate(NamedModel):
"Projected model for South Texas Interstates."
path = models.LineStringField(srid=32140)
| bsd-3-clause |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/social/tests/backends/test_dummy.py | 80 | 3932 | import json
import datetime
import time
from httpretty import HTTPretty
from social.actions import do_disconnect
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthForbidden
from social.tests.models import User
from social.tests.backends.oauth import OAuth2Test
class DummyOAuth2(BaseOAuth2):
name = 'dummy'
AUTHORIZATION_URL = 'http://dummy.com/oauth/authorize'
ACCESS_TOKEN_URL = 'http://dummy.com/oauth/access_token'
REVOKE_TOKEN_URL = 'https://dummy.com/oauth/revoke'
REVOKE_TOKEN_METHOD = 'GET'
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires'),
('empty', 'empty', True),
'url'
]
def get_user_details(self, response):
"""Return user details from Github account"""
return {'username': response.get('username'),
'email': response.get('email', ''),
'first_name': response.get('first_name', ''),
'last_name': response.get('last_name', '')}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('http://dummy.com/user', params={
'access_token': access_token
})
class DummyOAuth2Test(OAuth2Test):
backend_path = 'social.tests.backends.test_dummy.DummyOAuth2'
user_data_url = 'http://dummy.com/user'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'id': 1,
'username': 'foobar',
'url': 'http://dummy.com/user/foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'email': '[email protected]'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_tokens(self):
user = self.do_login()
self.assertEqual(user.social[0].access_token, 'foobar')
def test_revoke_token(self):
self.strategy.set_settings({
'SOCIAL_AUTH_REVOKE_TOKENS_ON_DISCONNECT': True
})
self.do_login()
user = User.get(self.expected_username)
user.password = 'password'
HTTPretty.register_uri(self._method(self.backend.REVOKE_TOKEN_METHOD),
self.backend.REVOKE_TOKEN_URL,
status=200)
do_disconnect(self.backend, user)
class WhitelistEmailsTest(DummyOAuth2Test):
def test_valid_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_WHITELISTED_EMAILS': ['[email protected]']
})
self.do_login()
def test_invalid_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_WHITELISTED_EMAILS': ['[email protected]']
})
with self.assertRaises(AuthForbidden):
self.do_login()
class WhitelistDomainsTest(DummyOAuth2Test):
def test_valid_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_WHITELISTED_DOMAINS': ['bar.com']
})
self.do_login()
def test_invalid_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_WHITELISTED_EMAILS': ['bar2.com']
})
with self.assertRaises(AuthForbidden):
self.do_login()
DELTA = datetime.timedelta(days=1)
class ExpirationTimeTest(DummyOAuth2Test):
user_data_body = json.dumps({
'id': 1,
'username': 'foobar',
'url': 'http://dummy.com/user/foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'email': '[email protected]',
'expires': time.mktime((datetime.datetime.utcnow() +
DELTA).timetuple())
})
def test_expires_time(self):
user = self.do_login()
social = user.social[0]
expiration = social.expiration_datetime()
self.assertEqual(expiration <= DELTA, True)
| agpl-3.0 |
yinquan529/platform-external-chromium_org | tools/nocompile_driver.py | 105 | 16982 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a simple "negative compile" test for C++ on linux.
Sometimes a C++ API needs to ensure that various usages cannot compile. To
enable unittesting of these assertions, we use this python script to
invoke gcc on a source file and assert that compilation fails.
For more info, see:
http://dev.chromium.org/developers/testing/no-compile-tests
"""
import ast
import locale
import os
import re
import select
import shlex
import subprocess
import sys
import time
# Matches lines that start with #if and have the substring TEST in the
# conditional. Also extracts the comment. This allows us to search for
# lines like the following:
#
# #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
# #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
# #if NCTEST_NAME_OF_TEST // [r'expected output']
# #elif NCTEST_NAME_OF_TEST // [r'expected output']
# #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
#
# inside the unittest file.
NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
# Matches and removes the defined() preprocesor predicate. This is useful
# for test cases that use the preprocessor if-statement form:
#
# #if defined(NCTEST_NAME_OF_TEST)
#
# Should be used to post-process the results found by NCTEST_CONFIG_RE.
STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
# Used to grab the expectation from comment at the end of an #ifdef. See
# NCTEST_CONFIG_RE's comment for examples of what the format should look like.
#
# The extracted substring should be a python array of regular expressions.
EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
# The header for the result file so that it can be compiled.
RESULT_FILE_HEADER = """
// This file is generated by the no compile test from:
// %s
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
"""
# The GUnit test function to output on a successful test completion.
SUCCESS_GUNIT_TEMPLATE = """
TEST(%s, %s) {
LOG(INFO) << "Took %f secs. Started at %f, ended at %f";
}
"""
# The GUnit test function to output for a disabled test.
DISABLED_GUNIT_TEMPLATE = """
TEST(%s, %s) { }
"""
# Timeout constants.
NCTEST_TERMINATE_TIMEOUT_SEC = 60
NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path):
"""Make sure the arguments being passed in are sane."""
assert parallelism >= 1
assert type(sourcefile_path) is str
assert type(cflags) is str
assert type(resultfile_path) is str
def ParseExpectation(expectation_string):
"""Extracts expectation definition from the trailing comment on the ifdef.
See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
Args:
expectation_string: A string like "// [r'some_regex']"
Returns:
A list of compiled regular expressions indicating all possible valid
compiler outputs. If the list is empty, all outputs are considered valid.
"""
assert expectation_string is not None
match = EXTRACT_EXPECTATION_RE.match(expectation_string)
assert match
raw_expectation = ast.literal_eval(match.group(1))
assert type(raw_expectation) is list
expectation = []
for regex_str in raw_expectation:
assert type(regex_str) is str
expectation.append(re.compile(regex_str))
return expectation
def ExtractTestConfigs(sourcefile_path):
"""Parses the soruce file for test configurations.
Each no-compile test in the file is separated by an ifdef macro. We scan
the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
they demark one no-compile test and try to extract the test configuration
from that.
Args:
sourcefile_path: The path to the source file.
Returns:
A list of test configurations. Each test configuration is a dictionary of
the form:
{ name: 'NCTEST_NAME'
suite_name: 'SOURCE_FILE_NAME'
expectations: [re.Pattern, re.Pattern] }
The |suite_name| is used to generate a pretty gtest output on successful
completion of the no compile test.
The compiled regexps in |expectations| define the valid outputs of the
compiler. If any one of the listed patterns matches either the stderr or
stdout from the compilation, and the compilation failed, then the test is
considered to have succeeded. If the list is empty, than we ignore the
compiler output and just check for failed compilation. If |expectations|
is actually None, then this specifies a compiler sanity check test, which
should expect a SUCCESSFUL compilation.
"""
sourcefile = open(sourcefile_path, 'r')
# Convert filename from underscores to CamelCase.
words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
words = [w.capitalize() for w in words]
suite_name = 'NoCompile' + ''.join(words)
# Start with at least the compiler sanity test. You need to always have one
# sanity test to show that compiler flags and configuration are not just
# wrong. Otherwise, having a misconfigured compiler, or an error in the
# shared portions of the .nc file would cause all tests to erroneously pass.
test_configs = [{'name': 'NCTEST_SANITY',
'suite_name': suite_name,
'expectations': None}]
for line in sourcefile:
match_result = NCTEST_CONFIG_RE.match(line)
if not match_result:
continue
groups = match_result.groups()
# Grab the name and remove the defined() predicate if there is one.
name = groups[0]
strip_result = STRIP_DEFINED_RE.match(name)
if strip_result:
name = strip_result.group(1)
# Read expectations if there are any.
test_configs.append({'name': name,
'suite_name': suite_name,
'expectations': ParseExpectation(groups[1])})
sourcefile.close()
return test_configs
def StartTest(sourcefile_path, cflags, config):
"""Start one negative compile test.
Args:
sourcefile_path: The path to the source file.
cflags: A string with all the CFLAGS to give to gcc. This string will be
split by shelex so be careful with escaping.
config: A dictionary describing the test. See ExtractTestConfigs
for a description of the config format.
Returns:
A dictionary containing all the information about the started test. The
fields in the dictionary are as follows:
{ 'proc': A subprocess object representing the compiler run.
'cmdline': The exectued command line.
'name': The name of the test.
'suite_name': The suite name to use when generating the gunit test
result.
'terminate_timeout': The timestamp in seconds since the epoch after
which the test should be terminated.
'kill_timeout': The timestamp in seconds since the epoch after which
the test should be given a hard kill signal.
'started_at': A timestamp in seconds since the epoch for when this test
was started.
'aborted_at': A timestamp in seconds since the epoch for when this test
was aborted. If the test completed successfully,
this value is 0.
'finished_at': A timestamp in seconds since the epoch for when this
test was successfully complete. If the test is aborted,
or running, this value is 0.
'expectations': A dictionary with the test expectations. See
ParseExpectation() for the structure.
}
"""
# TODO(ajwong): Get the compiler from gyp.
cmdline = ['g++']
cmdline.extend(shlex.split(cflags))
name = config['name']
expectations = config['expectations']
if expectations is not None:
cmdline.append('-D%s' % name)
cmdline.extend(['-o', '/dev/null', '-c', '-x', 'c++', sourcefile_path])
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
now = time.time()
return {'proc': process,
'cmdline': ' '.join(cmdline),
'name': name,
'suite_name': config['suite_name'],
'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
'started_at': now,
'aborted_at': 0,
'finished_at': 0,
'expectations': expectations}
def PassTest(resultfile, test):
"""Logs the result of a test started by StartTest(), or a disabled test
configuration.
Args:
resultfile: File object for .cc file that results are written to.
test: An instance of the dictionary returned by StartTest(), a
configuration from ExtractTestConfigs().
"""
# The 'started_at' key is only added if a test has been started.
if 'started_at' in test:
resultfile.write(SUCCESS_GUNIT_TEMPLATE % (
test['suite_name'], test['name'],
test['finished_at'] - test['started_at'],
test['started_at'], test['finished_at']))
else:
resultfile.write(DISABLED_GUNIT_TEMPLATE % (
test['suite_name'], test['name']))
def FailTest(resultfile, test, error, stdout=None, stderr=None):
"""Logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
test: An instance of the dictionary returned by StartTest()
error: The printable reason for the failure.
stdout: The test's output to stdout.
stderr: The test's output to stderr.
"""
resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error))
resultfile.write('#error "compile line: %s"\n' % test['cmdline'])
if stdout and len(stdout) != 0:
resultfile.write('#error "%s stdout:"\n' % test['name'])
for line in stdout.split('\n'):
resultfile.write('#error " %s:"\n' % line)
if stderr and len(stderr) != 0:
resultfile.write('#error "%s stderr:"\n' % test['name'])
for line in stderr.split('\n'):
resultfile.write('#error " %s"\n' % line)
resultfile.write('\n')
def WriteStats(resultfile, suite_name, timings):
"""Logs the peformance timings for each stage of the script into a fake test.
Args:
resultfile: File object for .cc file that results are written to.
suite_name: The name of the GUnit suite this test belongs to.
timings: Dictionary with timestamps for each stage of the script run.
"""
stats_template = ("Started %f, Ended %f, Total %fs, Extract %fs, "
"Compile %fs, Process %fs")
total_secs = timings['results_processed'] - timings['started']
extract_secs = timings['extract_done'] - timings['started']
compile_secs = timings['compile_done'] - timings['extract_done']
process_secs = timings['results_processed'] - timings['compile_done']
resultfile.write('TEST(%s, Stats) { LOG(INFO) << "%s"; }\n' % (
suite_name, stats_template % (
timings['started'], timings['results_processed'], total_secs,
extract_secs, compile_secs, process_secs)))
def ProcessTestResult(resultfile, test):
"""Interprets and logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
test: The dictionary from StartTest() to process.
"""
# Snap a copy of stdout and stderr into the test dictionary immediately
# cause we can only call this once on the Popen object, and lots of stuff
# below will want access to it.
proc = test['proc']
(stdout, stderr) = proc.communicate()
if test['aborted_at'] != 0:
FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
(test['started_at'], test['aborted_at']))
return
if test['expectations'] is None:
# This signals a compiler sanity check test. Fail iff compilation failed.
if proc.poll() == 0:
PassTest(resultfile, test)
return
else:
FailTest(resultfile, test, 'Sanity compile failed. Is compiler borked?',
stdout, stderr)
return
elif proc.poll() == 0:
# Handle failure due to successful compile.
FailTest(resultfile, test,
'Unexpected successful compilation.',
stdout, stderr)
return
else:
# Check the output has the right expectations. If there are no
# expectations, then we just consider the output "matched" by default.
if len(test['expectations']) == 0:
PassTest(resultfile, test)
return
# Otherwise test against all expectations.
for regexp in test['expectations']:
if (regexp.search(stdout) is not None or
regexp.search(stderr) is not None):
PassTest(resultfile, test)
return
expectation_str = ', '.join(
["r'%s'" % regexp.pattern for regexp in test['expectations']])
FailTest(resultfile, test,
'Expectations [%s] did not match output.' % expectation_str,
stdout, stderr)
return
def CompleteAtLeastOneTest(resultfile, executing_tests):
"""Blocks until at least one task is removed from executing_tests.
This function removes completed tests from executing_tests, logging failures
and output. If no tests can be removed, it will enter a poll-loop until one
test finishes or times out. On a timeout, this function is responsible for
terminating the process in the appropriate fashion.
Args:
executing_tests: A dict mapping a string containing the test name to the
test dict return from StartTest().
Returns:
A list of tests that have finished.
"""
finished_tests = []
busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
while len(finished_tests) == 0:
# If we don't make progress for too long, assume the code is just dead.
assert busy_loop_timeout > time.time()
# Select on the output pipes.
read_set = []
for test in executing_tests.values():
read_set.extend([test['proc'].stderr, test['proc'].stdout])
result = select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
# Now attempt to process results.
now = time.time()
for test in executing_tests.values():
proc = test['proc']
if proc.poll() is not None:
test['finished_at'] = now
finished_tests.append(test)
elif test['terminate_timeout'] < now:
proc.terminate()
test['aborted_at'] = now
elif test['kill_timeout'] < now:
proc.kill()
test['aborted_at'] = now
for test in finished_tests:
del executing_tests[test['name']]
return finished_tests
def main():
if len(sys.argv) != 5:
print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' %
sys.argv[0])
sys.exit(1)
# Force us into the "C" locale so the compiler doesn't localize its output.
# In particular, this stops gcc from using smart quotes when in english UTF-8
# locales. This makes the expectation writing much easier.
os.environ['LC_ALL'] = 'C'
parallelism = int(sys.argv[1])
sourcefile_path = sys.argv[2]
cflags = sys.argv[3]
resultfile_path = sys.argv[4]
timings = {'started': time.time()}
ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path)
test_configs = ExtractTestConfigs(sourcefile_path)
timings['extract_done'] = time.time()
resultfile = open(resultfile_path, 'w')
resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
# Run the no-compile tests, but ensure we do not run more than |parallelism|
# tests at once.
timings['header_written'] = time.time()
executing_tests = {}
finished_tests = []
for config in test_configs:
# CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
# acts as a semaphore. We cannot use threads + a real semaphore because
# subprocess forks, which can cause all sorts of hilarity with threads.
if len(executing_tests) >= parallelism:
finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
if config['name'].startswith('DISABLED_'):
PassTest(resultfile, config)
else:
test = StartTest(sourcefile_path, cflags, config)
assert test['name'] not in executing_tests
executing_tests[test['name']] = test
# If there are no more test to start, we still need to drain the running
# ones.
while len(executing_tests) > 0:
finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
timings['compile_done'] = time.time()
for test in finished_tests:
ProcessTestResult(resultfile, test)
timings['results_processed'] = time.time()
# We always know at least a sanity test was run.
WriteStats(resultfile, finished_tests[0]['suite_name'], timings)
resultfile.close()
if __name__ == '__main__':
main()
| bsd-3-clause |
Vudentz/zephyr | boards/xtensa/intel_adsp_cavs15/tools/lib/platforms.py | 6 | 1736 | #!/usr/bin/env python3
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
from enum import Enum
# BXT
# CORE ID MASK
CORE_0 = 0x1
CORE_1 = 0x2
CORE_MASK = 0x3
# Number of Input Streams Supported (GCAP[11:8])
NUM_ISS = 6
# Number of Output Streams Supported (GCAP[15:12])
NUM_OSS = 7
# Total Number of Streams supported
NUM_STREAMS = NUM_ISS + NUM_OSS
# DMA Index for FW download
DMA_ID = 7
# DMA Page Size
DMA_PAGE_SIZE = 0x1000
# FW Registers in SRAM
FW_SRAM = 0x80000
FW_REGS = FW_SRAM + 0x00
FW_MBOX_UPLINK = FW_SRAM + 0x1000
FW_MBOX_DWLINK = FW_SRAM + 0x20000
FW_MBOX_SIZE = 0x1000
# FW Status Register
FW_STATUS = FW_REGS + 0x0000
FW_STATUS_BOOT_STATE = 0x00FFFFFF
FW_STATUS_BOOT_STATE_OFFSET = 0
FW_STATUS_WAIT_STATE = 0x0F000000
FW_STATUS_WAIT_STATE_OFFSET = 24
FW_STATUS_MODULE = 0x70000000
FW_STATUS_MODULE_OFFSET = 28
FW_STATUS_ERROR = 0x80000000
FW_STATUS_ERROR_OFFSET = 31
class BOOT_STATUS(Enum):
INIT = 0
INIT_DONE = 1
FW_ENTERED = 5
def BOOT_STATUS_STR(status):
try:
e = BOOT_STATUS(status)
except Exception:
return "UNKNOWN"
return e.name
# Boot Status
BOOT_STATUS_INIT = 0x00
BOOT_STATUS_INIT_DONE = 0x01
BOOT_STATUS_FW_ENTERED = 0x05
class WAIT_STATUS(Enum):
DMA_BUFFER_FULL = 5
def WAIT_STATUS_STR(status):
try:
e = WAIT_STATUS(status)
except Exception:
return "UNKNOWN"
return e.name
# Wait Status
WAIT_STATUS_DMA_BUFFER_FULL = 0x05
# FW Error Status
FW_ERR_CODE = FW_SRAM + 0x0004
# IPC Purge FW message
FW_IPC_PURGE = 0x01004000
# IPC GLOBAL LENGTH register
IPC_GLOBAL_LEN = 0x00
IPC_GLOBAL_CMD = 0x04
| apache-2.0 |
davisein/jitsudone | django/django/db/models/__init__.py | 248 | 1500 | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.db import connection
from django.db.models.loading import get_apps, get_app, get_models, get_model, register_models
from django.db.models.query import Q
from django.db.models.expressions import F
from django.db.models.manager import Manager
from django.db.models.base import Model
from django.db.models.aggregates import *
from django.db.models.fields import *
from django.db.models.fields.subclassing import SubfieldBase
from django.db.models.fields.files import FileField, ImageField
from django.db.models.fields.related import ForeignKey, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel
from django.db.models.deletion import CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError
from django.db.models import signals
from django.utils.decorators import wraps
# Admin stages.
ADD, CHANGE, BOTH = 1, 2, 3
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
| bsd-3-clause |
caseydavenport/calico-docker | tests/st/bgp/peer.py | 3 | 1090 | # Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def create_bgp_peer(host, scope, ip, asNum):
assert scope in ('node', 'global')
node = host.get_hostname() if scope == 'node' else ""
testdata = {
'apiVersion': 'v1',
'kind': 'bgpPeer',
'metadata': {
'scope': scope,
'node': node,
'peerIP': ip,
},
'spec': {
'asNumber': asNum
}
}
host.writefile("testfile.yaml", testdata)
host.calicoctl("create -f testfile.yaml")
| apache-2.0 |
pelya/commandergenius | project/jni/python/src/Lib/email/parser.py | 392 | 3300 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: [email protected]
"""A parser of RFC 2822 and MIME email messages."""
__all__ = ['Parser', 'HeaderParser']
import warnings
from cStringIO import StringIO
from email.feedparser import FeedParser
from email.message import Message
class Parser:
def __init__(self, *args, **kws):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
if len(args) >= 1:
if '_class' in kws:
raise TypeError("Multiple values for keyword arg '_class'")
kws['_class'] = args[0]
if len(args) == 2:
if 'strict' in kws:
raise TypeError("Multiple values for keyword arg 'strict'")
kws['strict'] = args[1]
if len(args) > 2:
raise TypeError('Too many arguments')
if '_class' in kws:
self._class = kws['_class']
del kws['_class']
else:
self._class = Message
if 'strict' in kws:
warnings.warn("'strict' argument is deprecated (and ignored)",
DeprecationWarning, 2)
del kws['strict']
if kws:
raise TypeError('Unexpected keyword arguments')
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
feedparser = FeedParser(self._class)
if headersonly:
feedparser._set_headersonly()
while True:
data = fp.read(8192)
if not data:
break
feedparser.feed(data)
return feedparser.close()
def parsestr(self, text, headersonly=False):
"""Create a message structure from a string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
return self.parse(StringIO(text), headersonly=headersonly)
class HeaderParser(Parser):
def parse(self, fp, headersonly=True):
return Parser.parse(self, fp, True)
def parsestr(self, text, headersonly=True):
return Parser.parsestr(self, text, True)
| lgpl-2.1 |
gengwg/leetcode | 474_ones_and_zeros.py | 1 | 3159 | # 474. Ones and Zeroes
# In the computer world, use restricted resource you have to generate maximum benefit is what we always want to pursue.
#
# For now, suppose you are a dominator of m 0s and n 1s respectively.
# On the other hand, there is an array with strings consisting of only 0s and 1s.
#
# Now your task is to find the maximum number of strings that you can form with given m 0s and n 1s.
# Each 0 and 1 can be used at most once.
#
# Note:
#
# The given numbers of 0s and 1s will both not exceed 100
# The size of given string array won't exceed 600.
#
# Example 1:
#
# Input: Array = {"10", "0001", "111001", "1", "0"}, m = 5, n = 3
# Output: 4
#
# Explanation: This are totally 4 strings can be formed by the using of 5 0s and 3 1s, which are “10,”0001”,”1”,”0”
#
# Example 2:
#
# Input: Array = {"10", "0", "1"}, m = 1, n = 1
# Output: 2
#
# Explanation: You could form "10", but then you'd have nothing left. Better form "0" and "1".
# https://leetcode.com/problems/ones-and-zeroes/discuss/95814/c++-DP-solution-with-comments
class Solution(object):
# https://blog.csdn.net/fuxuemingzhu/article/details/82825032
# 看到这个题第一个感觉是贪心,但是想了想,
# 无论是贪心少的还是贪心多的,都会影响到后面选取的变化,所以不行。
# 遇到这种求最多或最少的次数的,并且不用求具体的解决方案,一般都是使用DP。
# 这个DP很明白了,定义一个数组dp[m+1][n+1],代表m个0, n个1能组成的最长字符串。
# 遍历每个字符串统计出现的0和1得到zeros和ones,
# 所以第dp[i][j]的位置等于dp[i][j]和dp[i - zeros][j - ones] + 1。
# 其中dp[i - zeros][j - ones]表示如果取了当前的这个字符串,那么剩下的可以取的最多的数字。
# 时间复杂度有点难计算,大致是O(MN * L), L 是数组长度,空间复杂度是O(MN).
# TLE
def findMaxForm(self, strs, m, n):
"""
:type strs: List[str]
:type m: int
:type n: int
:rtype: int
"""
dp = [[0 for _ in range(n+1)] for _ in range(m+1)]
for s in strs:
zeros, ones = 0, 0
# count zeros and ones
for c in s:
if c == '0':
zeros += 1
elif c == '1':
ones += 1
# dp[i][j] = the max number of strings that can be formed with i 0's and j 1's
# from the first few strings up to the current string s
# Catch: have to go from bottom right to top left
# Why? If a cell in dp is updated (because s is selected),
# we should be adding 1 to dp[i][j] from the previous iteration (when we were not considering s)
# If we go from top left to bottom right, we would be using results from this iteration => overcounting
for i in range(m, zeros-1, -1):
for j in range(n, ones-1, -1):
dp[i][j] = max(dp[i][j], dp[i-zeros][j-ones] + 1)
return dp[m][n]
print(Solution().findMaxForm(["10", "0001", "111001", "1", "0"], 5, 3))
print(Solution().findMaxForm(["10", "0", "1"], 1, 1))
| apache-2.0 |
rvalyi/geraldo | site/newsite/django_1_0/django/contrib/databrowse/sites.py | 11 | 5648 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class DatabrowsePlugin(object):
def urls(self, plugin_name, easy_instance_field):
"""
Given an EasyInstanceField object, returns a list of URLs for this
plugin's views of this object. These URLs should be absolute.
Returns None if the EasyInstanceField object doesn't get a
list of plugin-specific URLs.
"""
return None
def model_index_html(self, request, model, site):
"""
Returns a snippet of HTML to include on the model index page.
"""
return ''
def model_view(self, request, model_databrowse, url):
"""
Handles main URL routing for a plugin's model-specific pages.
"""
raise NotImplementedError
class ModelDatabrowse(object):
plugins = {}
def __init__(self, model, site):
self.model = model
self.site = site
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.main_view(request)
try:
plugin_name, rest_of_url = url.split('/', 1)
except ValueError: # need more than 1 value to unpack
plugin_name, rest_of_url = url, None
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise http.Http404('A plugin with the requested name does not exist.')
return plugin.model_view(request, self, rest_of_url)
def main_view(self, request):
easy_model = EasyModel(self.site, self.model)
html_snippets = mark_safe(u'\n'.join([p.model_index_html(request, self.model, self.site) for p in self.plugins.values()]))
return render_to_response('databrowse/model_detail.html', {
'model': easy_model,
'root_url': self.site.root_url,
'plugin_html': html_snippets,
})
class DatabrowseSite(object):
def __init__(self):
self.registry = {} # model_class -> databrowse_class
self.root_url = None
def register(self, model_or_iterable, databrowse_class=None, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = databrowse_class or DefaultModelDatabrowse
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' % model.__class__.__name__)
self.registry[model] = databrowse_class
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' % model.__class__.__name__)
del self.registry[model]
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
"""
self.root_url = request.path[:len(request.path) - len(url)]
url = url.rstrip('/') # Trim trailing slash, if it exists.
if url == '':
return self.index(request)
elif '/' in url:
return self.model_page(request, *url.split('/', 2))
raise http.Http404('The requested databrowse page does not exist.')
def index(self, request):
m_list = [EasyModel(self, m) for m in self.registry.keys()]
return render_to_response('databrowse/homepage.html', {'model_list': m_list, 'root_url': self.root_url})
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site, delegating
to the appropriate ModelDatabrowse class.
"""
model = models.get_model(app_label, model_name)
if model is None:
raise http.Http404("App %r, model %r, not found." % (app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
site = DatabrowseSite()
from django.contrib.databrowse.plugins.calendars import CalendarPlugin
from django.contrib.databrowse.plugins.objects import ObjectDetailPlugin
from django.contrib.databrowse.plugins.fieldchoices import FieldChoicePlugin
class DefaultModelDatabrowse(ModelDatabrowse):
plugins = {'objects': ObjectDetailPlugin(), 'calendars': CalendarPlugin(), 'fields': FieldChoicePlugin()}
| lgpl-3.0 |
daniel-leschkowski/generateDSv2 | tests/people_procincl1_sub.py | 2 | 11411 | #!/usr/bin/env python
#
# Generated by generateDS.py.
#
import sys
import people_procincl2_sup as supermod
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class peopleSub(supermod.people):
def __init__(self, comments=None, person=None, specialperson=None, programmer=None, python_programmer=None, java_programmer=None):
super(peopleSub, self).__init__(comments, person, specialperson, programmer, python_programmer, java_programmer, )
supermod.people.subclass = peopleSub
# end class peopleSub
class commentsSub(supermod.comments):
def __init__(self, emp=None, bold=None, valueOf_=None, mixedclass_=None, content_=None):
super(commentsSub, self).__init__(emp, bold, valueOf_, mixedclass_, content_, )
supermod.comments.subclass = commentsSub
# end class commentsSub
class personSub(supermod.person):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, extensiontype_=None):
super(personSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, extensiontype_, )
supermod.person.subclass = personSub
# end class personSub
class specialpersonSub(supermod.specialperson):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None):
super(specialpersonSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, )
supermod.specialperson.subclass = specialpersonSub
# end class specialpersonSub
class paramSub(supermod.param):
def __init__(self, semantic=None, name=None, flow=None, sid=None, type_=None, id=None, valueOf_=None):
super(paramSub, self).__init__(semantic, name, flow, sid, type_, id, valueOf_, )
supermod.param.subclass = paramSub
# end class paramSub
class agentSub(supermod.agent):
def __init__(self, firstname=None, lastname=None, priority=None, info=None, vehicle=None):
super(agentSub, self).__init__(firstname, lastname, priority, info, vehicle, )
supermod.agent.subclass = agentSub
# end class agentSub
class special_agentSub(supermod.special_agent):
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
super(special_agentSub, self).__init__(firstname, lastname, priority, info, )
supermod.special_agent.subclass = special_agentSub
# end class special_agentSub
class boosterSub(supermod.booster):
def __init__(self, member_id=None, firstname=None, lastname=None, other_name=None, classxx=None, other_value=None, type_=None, client_handler=None):
super(boosterSub, self).__init__(member_id, firstname, lastname, other_name, classxx, other_value, type_, client_handler, )
supermod.booster.subclass = boosterSub
# end class boosterSub
class infoSub(supermod.info):
def __init__(self, rating=None, type_=None, name=None):
super(infoSub, self).__init__(rating, type_, name, )
supermod.info.subclass = infoSub
# end class infoSub
class vehicleSub(supermod.vehicle):
def __init__(self, wheelcount=None, extensiontype_=None):
super(vehicleSub, self).__init__(wheelcount, extensiontype_, )
supermod.vehicle.subclass = vehicleSub
# end class vehicleSub
class automobileSub(supermod.automobile):
def __init__(self, wheelcount=None, drivername=None):
super(automobileSub, self).__init__(wheelcount, drivername, )
supermod.automobile.subclass = automobileSub
# end class automobileSub
class airplaneSub(supermod.airplane):
def __init__(self, wheelcount=None, pilotname=None):
super(airplaneSub, self).__init__(wheelcount, pilotname, )
supermod.airplane.subclass = airplaneSub
# end class airplaneSub
class programmerSub(supermod.programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, extensiontype_=None):
super(programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, extensiontype_, )
supermod.programmer.subclass = programmerSub
# end class programmerSub
class client_handlerTypeSub(supermod.client_handlerType):
def __init__(self, fullname=None, refid=None):
super(client_handlerTypeSub, self).__init__(fullname, refid, )
supermod.client_handlerType.subclass = client_handlerTypeSub
# end class client_handlerTypeSub
class java_programmerSub(supermod.java_programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, status=None, nick_name=None, favorite_editor=None):
super(java_programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, status, nick_name, favorite_editor, )
supermod.java_programmer.subclass = java_programmerSub
# end class java_programmerSub
class python_programmerSub(supermod.python_programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, nick_name=None, favorite_editor=None):
super(python_programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, nick_name, favorite_editor, )
supermod.python_programmer.subclass = python_programmerSub
# end class python_programmerSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
rootClass = supermod.GDSClassesMapping.get(tag)
if rootClass is None and hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='',
## pretty_print=True)
doc = None
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from people_procincl2_sup import *\n\n')
## sys.stdout.write('import people_procincl2_sup as model_\n\n')
## sys.stdout.write('rootObj = model_.people(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_="people")
## sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
| mit |
PsychoTV/PsychoTeam.repository | plugin.video.specto/resources/lib/resolvers/v_vids.py | 23 | 1385 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib
from resources.lib.libraries import client
def resolve(url):
try:
result = client.request(url)
post = {}
f = client.parseDOM(result, 'Form', attrs = {'name': 'F1'})[0]
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post.update({'method_free': '', 'method_premium': ''})
result = client.request(url, post=post)
url = client.parseDOM(result, 'a', ret='href', attrs = {'id': 'downloadbutton'})[0]
return url
except:
return
| gpl-2.0 |
joe-parente/CheckRecon | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | 3506 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
| bsd-3-clause |
hackerbot/DjangoDev | django/contrib/gis/db/models/query.py | 4 | 36670 | import warnings
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import (
GeometryField, LineStringField, PointField, get_srid_info,
)
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.sql import (
AreaField, DistanceField, GeomField, GMLField,
)
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.db import connections
from django.db.models.expressions import RawSQL
from django.db.models.fields import Field
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
# ### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Performing setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup(
'area', field_name=kwargs.get('field_name', None))
s = {'procedure_args': procedure_args,
'geo_field': geo_field,
'setup': False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analogous to a union operation, but much faster because
boundaries are not dissolved.
"""
warnings.warn(
"The collect GeoQuerySet method is deprecated. Use the Collect() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
warnings.warn(
"The extent GeoQuerySet method is deprecated. Use the Extent() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
warnings.warn(
"The extent3d GeoQuerySet method is deprecated. Use the Extent3D() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geometry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the user wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
s = {'desc': 'GeoJSON',
'procedure_args': {'precision': precision, 'options': options},
'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc': 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc': 'GML', 'procedure_args': {'precision': precision}}
if backend.postgis:
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
s['procedure_args'] = {'precision': precision, 'version': version}
if backend.oracle:
s['select_field'] = GMLField()
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc': 'KML',
'procedure_fmt': '%(geo_col)s,%(precision)s',
'procedure_args': {'precision': kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
warnings.warn(
"The make_line GeoQuerySet method is deprecated. Use the MakeLine() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field': GeomField()}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size': size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize': xsize, 'ysize': ysize,
'xorigin': xorigin, 'yorigin': yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt': procedure_fmt,
'procedure_args': procedure_args,
'select_field': GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {
'desc': 'SVG',
'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args': {
'rel': relative,
'precision': precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s',
'procedure_args': {'x': x, 'y': y},
'select_field': GeomField(),
}
else:
s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args': {'x': x, 'y': y, 'z': z},
'select_field': GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name', None)
self._spatial_setup('transform', field_name=field_name)
self.query.add_context('transformed_srid', srid)
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
warnings.warn(
"The unionagg GeoQuerySet method is deprecated. Use the Union() "
"aggregate in an aggregate() or annotate() method.",
RemovedInDjango20Warning, stacklevel=2
)
return self._spatial_aggregate(aggregates.Union, **kwargs)
# ### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None:
desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function': func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if geo_field_type is not None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle:
agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
warnings.warn(
"The %s GeoQuerySet method is deprecated. See GeoDjango Functions "
"documentation to find the expression-based replacement." % att,
RemovedInDjango21Warning, stacklevel=2
)
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(
att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type', None))
for k, v in six.iteritems(default_args):
settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types):
model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, None, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field', False):
select_field = settings['select_field']
if connection.ops.oracle:
select_field.empty_strings_allowed = False
else:
select_field = Field()
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
self.query.add_annotation(
RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field),
model_att)
return self
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
srid = self.query.get_context('transformed_srid')
if srid:
u, unit_name, s = get_srid_info(srid, connection)
geodetic = unit_name.lower() in geo_field.geodetic_units
if geodetic and not connection.features.supports_distance_geodetic:
raise ValueError(
'This database does not support linear distance '
'calculations on geodetic coordinate systems.'
)
if distance:
if srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid)
if geom.srid is None or geom.srid == srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % (
backend.transform, backend.from_text,
geom.srid, srid))
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError(
'Spherical distance calculation only supported with '
'Point Geometry parameters'
)
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]})
else:
procedure_args.update({'function': backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]})
elif geom_3d and connection.features.supports_3d_functions:
# Use 3D variants of perimeter and length routines on supported backends.
if perimeter:
procedure_args.update({'function': backend.perimeter3d})
elif length:
procedure_args.update({'function': backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field': DistanceField(dist_att),
'setup': False,
'geo_field': geo_field,
'procedure_args': procedure_args,
'procedure_fmt': procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field': GeomField()}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance': tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {
'geom_args': ('geom',),
'select_field': GeomField(),
'procedure_fmt': '%(geo_col)s,%(geom)s',
'procedure_args': {'geom': geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
compiler = self.query.get_compiler(self.db)
opts = self.model._meta
if geo_field not in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
# Note: the operation really is defined as "must add select related!"
self.query.add_select_related([field_name])
# Call pre_sql_setup() so that compiler.select gets populated.
compiler.pre_sql_setup()
for col, _, _ in compiler.select:
if col.output_field == geo_field:
return col.as_sql(compiler, compiler.connection)[0]
raise ValueError("%r not in compiler's related_select_cols" % geo_field)
elif geo_field not in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
parent_model = geo_field.model._meta.concrete_model
return self._field_column(compiler, geo_field, parent_model._meta.db_table)
else:
return self._field_column(compiler, geo_field)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered or the one specified via
the `field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuerySet's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for field in self.model._meta.fields:
if isinstance(field, GeometryField):
return field
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GISLookup._check_geo_field(self.model._meta, field_name)
def _field_column(self, compiler, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuerySet` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None:
table_alias = compiler.query.get_meta().db_table
return "%s.%s" % (compiler.quote_name_unless_alias(table_alias),
compiler.connection.ops.quote_name(column or field.column))
| bsd-3-clause |
yorvic/.vim | bundle/python-mode/pylibs/ropemode/environment.py | 9 | 1925 | class Environment(object):
def ask(self, prompt, default=None, starting=None):
pass
def ask_values(self, prompt, values, default=None, starting=None):
pass
def ask_directory(self, prompt, default=None, starting=None):
pass
def ask_completion(self, prompt, values, starting=None):
pass
def message(self, message):
pass
def yes_or_no(self, prompt):
pass
def y_or_n(self, prompt):
pass
def get(self, name, default=None):
pass
def get_offset(self):
pass
def get_text(self):
pass
def get_region(self):
pass
def filename(self):
pass
def is_modified(self):
pass
def goto_line(self, lineno):
pass
def insert_line(self, line, lineno):
pass
def insert(self, text):
pass
def delete(self, start, end):
pass
def filenames(self):
pass
def save_files(self, filenames):
pass
def reload_files(self, filenames, moves={}):
pass
def find_file(self, filename, readonly=False, other=False):
pass
def create_progress(self, name):
pass
def current_word(self):
pass
def push_mark(self):
pass
def pop_mark(self):
pass
def prefix_value(self, prefix):
pass
def show_occurrences(self, locations):
pass
def show_doc(self, docs, altview=False):
pass
def preview_changes(self, diffs):
pass
def local_command(self, name, callback, key=None, prefix=False):
pass
def global_command(self, name, callback, key=None, prefix=False):
pass
def add_hook(self, name, callback, hook):
pass
def _completion_text(self, proposal):
return proposal.name
def _completion_data(self, proposal):
return self._completion_text(proposal)
| gpl-3.0 |
nightjean/Deep-Learning | tensorflow/python/estimator/canned/dnn.py | 3 | 15021 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _add_hidden_layer_summary(value, tag):
summary.scalar('%s_fraction_of_zero_values' % tag, nn.zero_fraction(value))
summary.histogram('%s_activation' % tag, value)
def _dnn_model_fn(
features, labels, mode, head, hidden_units, feature_columns,
optimizer='Adagrad', activation_fn=nn.relu, dropout=None,
input_layer_partitioner=None, config=None):
"""Deep Neural Net model_fn.
Args:
features: Dict of `Tensor` (depends on data passed to `train`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
hidden_units: Iterable of integer number of hidden units per layer.
feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
optimizer: String, `tf.Optimizer` object, or callable that creates the
optimizer to use for training. If not specified, will use the Adagrad
optimizer with a default learning rate of 0.05.
activation_fn: Activation function applied to each layer.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'dnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'input_from_feature_columns',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
net = feature_column_lib.input_layer(
features=features,
feature_columns=feature_columns)
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
'hiddenlayer_%d' % layer_id,
values=(net,)) as hidden_layer_scope:
net = core_layers.dense(
net,
units=num_hidden_units,
activation=activation_fn,
kernel_initializer=init_ops.glorot_uniform_initializer(),
name=hidden_layer_scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = core_layers.dropout(net, rate=dropout, training=True)
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope(
'logits',
values=(net,)) as logits_scope:
logits = core_layers.dense(
net,
units=head.logits_dimension,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer(),
name=logits_scope)
_add_hidden_layer_summary(logits, logits_scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_feature_key` is not `None`, a feature with
`key=weight_feature_key` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_feature_key=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
"""Initializes a `DNNClassifier` instance.
Args:
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_feature_key: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
"""
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_feature_key=weight_feature_key)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes, weight_feature_key=weight_feature_key)
def _model_fn(features, labels, mode, config):
return _dnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class DNNRegressor(estimator.Estimator):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_feature_key` is not `None`, a feature with
`key=weight_feature_key` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_feature_key=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_feature_key: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
"""
def _model_fn(features, labels, mode, config):
return _dnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head_lib._regression_head_with_mean_squared_error_loss( # pylint: disable=protected-access
label_dimension=label_dimension,
weight_feature_key=weight_feature_key),
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNRegressor, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| apache-2.0 |
harisbal/pandas | pandas/tests/series/test_asof.py | 4 | 5228 | # coding=utf-8
import numpy as np
import pytest
from pandas import Series, Timestamp, date_range, isna, notna, offsets
import pandas.util.testing as tm
class TestSeriesAsof():
def test_basic(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
val = result[result.index[result.index >= ub][0]]
assert ts[ub] == val
def test_scalar(self):
N = 30
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.arange(N), index=rng)
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
result = ts.asof(ts.index[3])
assert result == ts[3]
# no as of value
d = ts.index[0] - offsets.BDay()
assert np.isnan(ts.asof(d))
def test_with_nan(self):
# basic asof test
rng = date_range('1/1/2000', '1/2/2000', freq='4h')
s = Series(np.arange(len(rng)), index=rng)
r = s.resample('2h').mean()
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
ts[5:10] = np.nan
ts[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
assert ts.asof(ts.index[3]) == ts[3]
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
assert isna(ts.asof(d))
def test_errors(self):
s = Series([1, 2, 3],
index=[Timestamp('20130101'),
Timestamp('20130103'),
Timestamp('20130102')])
# non-monotonic
assert not s.index.is_monotonic
with pytest.raises(ValueError):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
s = Series(np.random.randn(N), index=rng)
with pytest.raises(ValueError):
s.asof(s.index[0], subset='foo')
def test_all_nans(self):
# GH 15713
# series is all nans
result = Series([np.nan]).asof([0])
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
# testing non-default indexes
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = Series(np.nan, index=rng).asof(dates)
expected = Series(np.nan, index=dates)
tm.assert_series_equal(result, expected)
# testing scalar input
date = date_range('1/1/1990', periods=N * 3, freq='25s')[0]
result = Series(np.nan, index=rng).asof(date)
assert isna(result)
# test name is propagated
result = Series(np.nan, index=[1, 2, 3, 4], name='test').asof([4, 5])
expected = Series(np.nan, index=[4, 5], name='test')
tm.assert_series_equal(result, expected)
| bsd-3-clause |
gsb-eng/asyncio | asyncio/selectors.py | 16 | 18697 | """Selectors module.
This module allows high-level and efficient I/O multiplexing, built upon the
`select` module primitives.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple, Mapping
import math
import select
import sys
# generic events, that must be mapped to implementation-specific ones
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
"{!r}".format(fileobj)) from None
if fd < 0:
raise ValueError("Invalid file descriptor: {}".format(fd))
return fd
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
"""Object used to associate a file object to its backing file descriptor,
selected event mask and attached data."""
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(metaclass=ABCMeta):
"""Selector abstract base class.
A selector supports registering file objects to be monitored for specific
I/O events.
A file object is a file descriptor or any object with a `fileno()` method.
An arbitrary object can be attached to the file object, which can be used
for example to store context information, a callback, etc.
A selector can use various implementations (select(), poll(), epoll()...)
depending on the platform. The default `Selector` class uses the most
efficient implementation on the current platform.
"""
@abstractmethod
def register(self, fileobj, events, data=None):
"""Register a file object.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
ValueError if events is invalid
KeyError if fileobj is already registered
OSError if fileobj is closed or otherwise is unacceptable to
the underlying system call (if a system call is made)
Note:
OSError may or may not be raised
"""
raise NotImplementedError
@abstractmethod
def unregister(self, fileobj):
"""Unregister a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
SelectorKey instance
Raises:
KeyError if fileobj is not registered
Note:
If fileobj is registered but has since been closed this does
*not* raise OSError (even if the wrapped syscall does)
"""
raise NotImplementedError
def modify(self, fileobj, events, data=None):
"""Change a registered file object monitored events or attached data.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
Anything that unregister() or register() raises
"""
self.unregister(fileobj)
return self.register(fileobj, events, data)
@abstractmethod
def select(self, timeout=None):
"""Perform the actual selection, until some monitored file objects are
ready or a timeout expires.
Parameters:
timeout -- if timeout > 0, this specifies the maximum wait time, in
seconds
if timeout <= 0, the select() call won't block, and will
report the currently ready file objects
if timeout is None, select() will block until a monitored
file object becomes ready
Returns:
list of (key, events) for ready file objects
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
"""
raise NotImplementedError
def close(self):
"""Close the selector.
This must be called to make sure that any underlying resource is freed.
"""
pass
def get_key(self, fileobj):
"""Return the key associated to a registered file object.
Returns:
SelectorKey for this file object
"""
mapping = self.get_map()
if mapping is None:
raise RuntimeError('Selector is closed')
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
@abstractmethod
def get_map(self):
"""Return a mapping of file objects to selector keys."""
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class _BaseSelectorImpl(BaseSelector):
"""Base selector implementation."""
def __init__(self):
# this maps file descriptors to keys
self._fd_to_key = {}
# read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
"""Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{!r} (FD {}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
return key
def modify(self, fileobj, events, data=None):
# TODO: Subclasses can probably optimize this even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def close(self):
self._fd_to_key.clear()
self._map = None
def get_map(self):
return self._map
def _key_from_fd(self, fd):
"""Return the key associated to a given file descriptor.
Parameters:
fd -- file descriptor
Returns:
corresponding key, or None if not found
"""
try:
return self._fd_to_key[fd]
except KeyError:
return None
class SelectSelector(_BaseSelectorImpl):
"""Select-based selector."""
def __init__(self):
super().__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
if sys.platform == 'win32':
def _select(self, r, w, _, timeout=None):
r, w, x = select.select(r, w, w, timeout)
return r, w + x, []
else:
_select = select.select
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
ready = []
try:
r, w, _ = self._select(self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'poll'):
class PollSelector(_BaseSelectorImpl):
"""Poll-based selector."""
def __init__(self):
super().__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._poll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
self._poll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
fd_event_list = self._poll.poll(timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'epoll'):
class EpollSelector(_BaseSelectorImpl):
"""Epoll-based selector."""
def __init__(self):
super().__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
epoll_events = 0
if events & EVENT_READ:
epoll_events |= select.EPOLLIN
if events & EVENT_WRITE:
epoll_events |= select.EPOLLOUT
self._epoll.register(key.fd, epoll_events)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
try:
self._epoll.unregister(key.fd)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
return key
def select(self, timeout=None):
if timeout is None:
timeout = -1
elif timeout <= 0:
timeout = 0
else:
# epoll_wait() has a resolution of 1 millisecond, round away
# from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3) * 1e-3
# epoll_wait() expects `maxevents` to be greater than zero;
# we want to make sure that `select()` can be called when no
# FD is registered.
max_ev = max(len(self._fd_to_key), 1)
ready = []
try:
fd_event_list = self._epoll.poll(timeout, max_ev)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.EPOLLIN:
events |= EVENT_WRITE
if event & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super().close()
if hasattr(select, 'devpoll'):
class DevpollSelector(_BaseSelectorImpl):
"""Solaris /dev/poll selector."""
def __init__(self):
super().__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# devpoll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
fd_event_list = self._devpoll.poll(timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super().close()
if hasattr(select, 'kqueue'):
class KqueueSelector(_BaseSelectorImpl):
"""Kqueue-based selector."""
def __init__(self):
super().__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
if events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
if events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
if key.events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
if key.events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# See comment above.
pass
return key
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
max_ev = len(self._fd_to_key)
ready = []
try:
kev_list = self._kqueue.control(None, max_ev, timeout)
except InterruptedError:
return ready
for kev in kev_list:
fd = kev.ident
flag = kev.filter
events = 0
if flag == select.KQ_FILTER_READ:
events |= EVENT_READ
if flag == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._kqueue.close()
super().close()
# Choose the best implementation, roughly:
# epoll|kqueue|devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals():
DefaultSelector = KqueueSelector
elif 'EpollSelector' in globals():
DefaultSelector = EpollSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'PollSelector' in globals():
DefaultSelector = PollSelector
else:
DefaultSelector = SelectSelector
| apache-2.0 |
supracd/pygal | pygal/svg.py | 3 | 13554 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Svg helper"""
from __future__ import division
from pygal._compat import to_str, u, quote_plus
from pygal.etree import etree
import io
import os
import json
from datetime import date, datetime
from numbers import Number
from math import cos, sin, pi
from pygal.util import template, minify_css
from pygal import __version__
class Svg(object):
"""Svg related methods"""
ns = 'http://www.w3.org/2000/svg'
xlink_ns = 'http://www.w3.org/1999/xlink'
def __init__(self, graph):
"""Create the svg helper with the chart instance"""
self.graph = graph
if not graph.no_prefix:
self.id = '#chart-%s ' % graph.uuid
else:
self.id = ''
self.processing_instructions = [
etree.ProcessingInstruction(
u('xml'), u("version='1.0' encoding='utf-8'"))]
if etree.lxml:
attrs = {
'nsmap': {
None: self.ns,
'xlink': self.xlink_ns
}
}
else:
attrs = {
'xmlns': self.ns
}
if hasattr(etree, 'register_namespace'):
etree.register_namespace('xlink', self.xlink_ns)
else:
etree._namespace_map[self.xlink_ns] = 'xlink'
self.root = etree.Element('svg', **attrs)
self.root.attrib['id'] = self.id.lstrip('#').rstrip()
self.root.attrib['class'] = 'pygal-chart'
self.root.append(
etree.Comment(u(
'Generated with pygal %s (%s) ©Kozea 2011-2015 on %s' % (
__version__,
'lxml' if etree.lxml else 'etree',
date.today().isoformat()))))
self.root.append(etree.Comment(u('http://pygal.org')))
self.root.append(etree.Comment(u('http://github.com/Kozea/pygal')))
self.defs = self.node(tag='defs')
self.title = self.node(tag='title')
self.title.text = graph.title or 'Pygal'
def add_styles(self):
"""Add the css to the svg"""
colors = self.graph.style.get_colors(self.id, self.graph._order)
strokes = self.get_strokes()
all_css = []
auto_css = ['file://base.css']
if self.graph.style._google_fonts:
auto_css.append(
'//fonts.googleapis.com/css?family=%s' % quote_plus(
'|'.join(self.graph.style._google_fonts))
)
for css in auto_css + list(self.graph.css):
css_text = None
if css.startswith('inline:'):
css_text = css[len('inline:'):]
elif css.startswith('file://'):
if not os.path.exists(css):
css = os.path.join(
os.path.dirname(__file__), 'css', css[len('file://'):])
with io.open(css, encoding='utf-8') as f:
css_text = template(
f.read(),
style=self.graph.style,
colors=colors,
strokes=strokes,
id=self.id)
if css_text is not None:
if not self.graph.pretty_print:
css_text = minify_css(css_text)
all_css.append(css_text)
else:
if css.startswith('//') and self.graph.force_uri_protocol:
css = '%s:%s' % (self.graph.force_uri_protocol, css)
self.processing_instructions.append(
etree.PI(
u('xml-stylesheet'), u('href="%s"' % css)))
self.node(
self.defs, 'style', type='text/css').text = '\n'.join(all_css)
def add_scripts(self):
"""Add the js to the svg"""
common_script = self.node(self.defs, 'script', type='text/javascript')
def get_js_dict():
return dict(
(k, getattr(self.graph.state, k))
for k in dir(self.graph.config)
if not k.startswith('_') and hasattr(self.graph.state, k) and
not hasattr(getattr(self.graph.state, k), '__call__'))
def json_default(o):
if isinstance(o, (datetime, date)):
return o.isoformat()
if hasattr(o, 'to_dict'):
return o.to_dict()
return json.JSONEncoder().default(o)
dct = get_js_dict()
# Config adds
dct['legends'] = [
l.get('title') if isinstance(l, dict) else l
for l in self.graph._legends + self.graph._secondary_legends]
common_js = 'window.pygal = window.pygal || {};'
common_js += 'window.pygal.config = window.pygal.config || {};'
if self.graph.no_prefix:
common_js += 'window.pygal.config = '
else:
common_js += 'window.pygal.config[%r] = ' % self.graph.uuid
common_script.text = common_js + json.dumps(dct, default=json_default)
for js in self.graph.js:
if js.startswith('file://'):
script = self.node(self.defs, 'script', type='text/javascript')
with io.open(js[len('file://'):], encoding='utf-8') as f:
script.text = f.read()
else:
if js.startswith('//') and self.graph.force_uri_protocol:
js = '%s:%s' % (self.graph.force_uri_protocol, js)
self.node(self.defs, 'script', type='text/javascript', href=js)
def node(self, parent=None, tag='g', attrib=None, **extras):
"""Make a new svg node"""
if parent is None:
parent = self.root
attrib = attrib or {}
attrib.update(extras)
def in_attrib_and_number(key):
return key in attrib and isinstance(attrib[key], Number)
for pos, dim in (('x', 'width'), ('y', 'height')):
if in_attrib_and_number(dim) and attrib[dim] < 0:
attrib[dim] = - attrib[dim]
if in_attrib_and_number(pos):
attrib[pos] = attrib[pos] - attrib[dim]
for key, value in dict(attrib).items():
if value is None:
del attrib[key]
attrib[key] = to_str(value)
if key.endswith('_'):
attrib[key.rstrip('_')] = attrib[key]
del attrib[key]
elif key == 'href':
attrib[etree.QName(
'http://www.w3.org/1999/xlink', key)] = attrib[key]
del attrib[key]
return etree.SubElement(parent, tag, attrib)
def transposable_node(self, parent=None, tag='g', attrib=None, **extras):
"""Make a new svg node which can be transposed if horizontal"""
if self.graph.horizontal:
for key1, key2 in (('x', 'y'), ('width', 'height'), ('cx', 'cy')):
attr1 = extras.get(key1, None)
attr2 = extras.get(key2, None)
extras[key1], extras[key2] = attr2, attr1
return self.node(parent, tag, attrib, **extras)
def serie(self, serie):
"""Make serie node"""
return dict(
plot=self.node(
self.graph.nodes['plot'],
class_='series serie-%d color-%d' % (
serie.index, serie.index)),
overlay=self.node(
self.graph.nodes['overlay'],
class_='series serie-%d color-%d' % (
serie.index, serie.index)),
text_overlay=self.node(
self.graph.nodes['text_overlay'],
class_='series serie-%d color-%d' % (
serie.index, serie.index)))
def line(self, node, coords, close=False, **kwargs):
"""Draw a svg line"""
line_len = len(coords)
if line_len < 2:
return
root = 'M%s L%s Z' if close else 'M%s L%s'
origin_index = 0
while origin_index < line_len and None in coords[origin_index]:
origin_index += 1
if origin_index == line_len:
return
if self.graph.horizontal:
coord_format = lambda xy: '%f %f' % (xy[1], xy[0])
else:
coord_format = lambda xy: '%f %f' % xy
origin = coord_format(coords[origin_index])
line = ' '.join([coord_format(c)
for c in coords[origin_index + 1:]
if None not in c])
return self.node(
node, 'path', d=root % (origin, line), **kwargs)
def slice(
self, serie_node, node, radius, small_radius,
angle, start_angle, center, val, i, metadata):
"""Draw a pie slice"""
project = lambda rho, alpha: (
rho * sin(-alpha), rho * cos(-alpha))
diff = lambda x, y: (x[0] - y[0], x[1] - y[1])
fmt = lambda x: '%f %f' % x
get_radius = lambda r: fmt(tuple([r] * 2))
absolute_project = lambda rho, theta: fmt(
diff(center, project(rho, theta)))
if angle == 2 * pi:
rv = self.node(
node, 'circle',
cx=center[0],
cy=center[1],
r=radius,
class_='slice reactive tooltip-trigger')
elif angle > 0:
to = [absolute_project(radius, start_angle),
absolute_project(radius, start_angle + angle),
absolute_project(small_radius, start_angle + angle),
absolute_project(small_radius, start_angle)]
rv = self.node(
node, 'path',
d='M%s A%s 0 %d 1 %s L%s A%s 0 %d 0 %s z' % (
to[0],
get_radius(radius), int(angle > pi), to[1],
to[2],
get_radius(small_radius), int(angle > pi), to[3]),
class_='slice reactive tooltip-trigger')
else:
rv = None
x, y = diff(center, project(
(radius + small_radius) / 2, start_angle + angle / 2))
self.graph._tooltip_data(
node, val, x, y, "centered",
self.graph._x_labels and self.graph._x_labels[i][0])
if angle >= 0.3: # 0.3 radians is about 17 degrees
self.graph._static_value(serie_node, val, x, y, metadata)
return rv
def pre_render(self):
"""Last things to do before rendering"""
self.add_styles()
self.add_scripts()
self.root.set(
'viewBox', '0 0 %d %d' % (self.graph.width, self.graph.height))
if self.graph.explicit_size:
self.root.set('width', str(self.graph.width))
self.root.set('height', str(self.graph.height))
def draw_no_data(self):
"""Write the no data text to the svg"""
no_data = self.node(self.graph.nodes['text_overlay'], 'text',
x=self.graph.view.width / 2,
y=self.graph.view.height / 2,
class_='no_data')
no_data.text = self.graph.no_data_text
def render(self, is_unicode=False, pretty_print=False):
"""Last thing to do before rendering"""
for f in self.graph.xml_filters:
self.root = f(self.root)
args = {
'encoding': 'utf-8'
}
if etree.lxml:
args['pretty_print'] = pretty_print
svg = etree.tostring(
self.root, **args)
if not self.graph.disable_xml_declaration:
svg = b'\n'.join(
[etree.tostring(
pi, **args)
for pi in self.processing_instructions]
) + b'\n' + svg
if self.graph.disable_xml_declaration or is_unicode:
svg = svg.decode('utf-8')
return svg
def get_strokes(self):
"""Return a css snippet containing all stroke style options"""
def stroke_dict_to_css(stroke, i=None):
"""Return a css style for the given option"""
css = ['%s.series%s {\n' % (
self.id, '.serie-%d' % i if i is not None else '')]
for key in (
'width', 'linejoin', 'linecap',
'dasharray', 'dashoffset'):
if stroke.get(key):
css.append(' stroke-%s: %s;\n' % (
key, stroke[key]))
css.append('}')
return '\n'.join(css)
css = []
if self.graph.stroke_style is not None:
css.append(stroke_dict_to_css(self.graph.stroke_style))
for serie in self.graph.series:
if serie.stroke_style is not None:
css.append(stroke_dict_to_css(serie.stroke_style, serie.index))
return '\n'.join(css)
| lgpl-3.0 |
glennw/servo | tests/wpt/web-platform-tests/webvtt/webvtt-file-format-parsing/webvtt-cue-text-parsing-rules/buildtests.py | 75 | 1952 | #!/usr/bin/python
import os
import urllib
import hashlib
doctmpl = """<!doctype html>
<title>WebVTT cue data parser test %s</title>
<style>video { display:none }</style>
<script src=/resources/testharness.js></script>
<script src=/resources/testharnessreport.js></script>
<script src=/html/syntax/parsing/template.js></script>
<script src=/html/syntax/parsing/common.js></script>
<script src=../common.js></script>
<div id=log></div>
<script>
runTests([
%s
]);
</script>"""
testobj = "{name:'%s', input:'%s', expected:'%s'}"
def appendtest(tests, input, expected):
tests.append(testobj % (hashlib.sha1(input).hexdigest(), urllib.quote(input[:-1]), urllib.quote(expected[:-1])))
files = os.listdir('dat/')
for file in files:
if os.path.isdir('dat/'+file) or file[0] == ".":
continue
tests = []
input = ""
expected = ""
state = ""
f = open('dat/'+file, "r")
while 1:
line = f.readline()
if not line:
if state != "":
appendtest(tests, input, expected)
input = ""
expected = ""
state = ""
break
if line[0] == "#":
state = line
if line == "#document-fragment\n":
expected = expected + line
elif state == "#data\n":
input = input + line
elif state == "#errors\n":
pass
elif state == "#document-fragment\n":
if line == "\n":
appendtest(tests, input, expected)
input = ""
expected = ""
state = ""
else:
expected = expected + line
else:
raise Exception("failed to parse file "+file+" line:"+line+" (state: "+state+")")
f.close()
barename = file.replace(".dat", "")
out = open('tests/'+barename+".html", "w")
out.write(doctmpl % (barename, ",\n".join(tests)))
out.close()
| mpl-2.0 |
manelore/django-haystack | tests/whoosh_tests/tests/forms.py | 10 | 1285 | # To ensure spelling suggestions work...
from django.conf import settings
from django.http import HttpRequest
from haystack.forms import SearchForm
from haystack.views import SearchView
from whoosh_tests.tests.whoosh_backend import LiveWhooshRoundTripTestCase
# Whoosh appears to flail on providing a useful suggestion, but since it's
# not ``None``, we know the backend is doing something. Whee.
class SpellingSuggestionTestCase(LiveWhooshRoundTripTestCase):
def setUp(self):
self.old_spelling_setting = settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING']
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = True
super(SpellingSuggestionTestCase, self).setUp()
def tearDown(self):
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = self.old_spelling_setting
super(SpellingSuggestionTestCase, self).tearDown()
def test_form_suggestion(self):
form = SearchForm({'q': 'exampl'})
self.assertEqual(form.get_suggestion(), '')
def test_view_suggestion(self):
view = SearchView(template='test_suggestion.html')
mock = HttpRequest()
mock.GET['q'] = 'exampl'
resp = view(mock)
self.assertEqual(resp.content, 'Suggestion: ')
| bsd-3-clause |
sunqb/oa_qian | flask/Lib/site-packages/openid/extensions/draft/pape5.py | 156 | 16391 | """An implementation of the OpenID Provider Authentication Policy
Extension 1.0, Draft 5
@see: http://openid.net/developers/specs/
@since: 2.1.0
"""
__all__ = [
'Request',
'Response',
'ns_uri',
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
'LEVELS_NIST',
'LEVELS_JISA',
]
from openid.extension import Extension
import warnings
import re
ns_uri = "http://specs.openid.net/extensions/pape/1.0"
AUTH_MULTI_FACTOR_PHYSICAL = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical'
AUTH_MULTI_FACTOR = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor'
AUTH_PHISHING_RESISTANT = \
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
AUTH_NONE = \
'http://schemas.openid.net/pape/policies/2007/06/none'
TIME_VALIDATOR = re.compile('^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ$')
LEVELS_NIST = 'http://csrc.nist.gov/publications/nistpubs/800-63/SP800-63V1_0_2.pdf'
LEVELS_JISA = 'http://www.jisa.or.jp/spec/auth_level.html'
class PAPEExtension(Extension):
_default_auth_level_aliases = {
'nist': LEVELS_NIST,
'jisa': LEVELS_JISA,
}
def __init__(self):
self.auth_level_aliases = self._default_auth_level_aliases.copy()
def _addAuthLevelAlias(self, auth_level_uri, alias=None):
"""Add an auth level URI alias to this request.
@param auth_level_uri: The auth level URI to send in the
request.
@param alias: The namespace alias to use for this auth level
in this message. May be None if the alias is not
important.
"""
if alias is None:
try:
alias = self._getAlias(auth_level_uri)
except KeyError:
alias = self._generateAlias()
else:
existing_uri = self.auth_level_aliases.get(alias)
if existing_uri is not None and existing_uri != auth_level_uri:
raise KeyError('Attempting to redefine alias %r from %r to %r',
alias, existing_uri, auth_level_uri)
self.auth_level_aliases[alias] = auth_level_uri
def _generateAlias(self):
"""Return an unused auth level alias"""
for i in xrange(1000):
alias = 'cust%d' % (i,)
if alias not in self.auth_level_aliases:
return alias
raise RuntimeError('Could not find an unused alias (tried 1000!)')
def _getAlias(self, auth_level_uri):
"""Return the alias for the specified auth level URI.
@raises KeyError: if no alias is defined
"""
for (alias, existing_uri) in self.auth_level_aliases.iteritems():
if auth_level_uri == existing_uri:
return alias
raise KeyError(auth_level_uri)
class Request(PAPEExtension):
"""A Provider Authentication Policy request, sent from a relying
party to a provider
@ivar preferred_auth_policies: The authentication policies that
the relying party prefers
@type preferred_auth_policies: [str]
@ivar max_auth_age: The maximum time, in seconds, that the relying
party wants to allow to have elapsed before the user must
re-authenticate
@type max_auth_age: int or NoneType
@ivar preferred_auth_level_types: Ordered list of authentication
level namespace URIs
@type preferred_auth_level_types: [str]
"""
ns_alias = 'pape'
def __init__(self, preferred_auth_policies=None, max_auth_age=None,
preferred_auth_level_types=None):
super(Request, self).__init__()
if preferred_auth_policies is None:
preferred_auth_policies = []
self.preferred_auth_policies = preferred_auth_policies
self.max_auth_age = max_auth_age
self.preferred_auth_level_types = []
if preferred_auth_level_types is not None:
for auth_level in preferred_auth_level_types:
self.addAuthLevel(auth_level)
def __nonzero__(self):
return bool(self.preferred_auth_policies or
self.max_auth_age is not None or
self.preferred_auth_level_types)
def addPolicyURI(self, policy_uri):
"""Add an acceptable authentication policy URI to this request
This method is intended to be used by the relying party to add
acceptable authentication types to the request.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-05.html#auth_policies
"""
if policy_uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(policy_uri)
def addAuthLevel(self, auth_level_uri, alias=None):
self._addAuthLevelAlias(auth_level_uri, alias)
if auth_level_uri not in self.preferred_auth_level_types:
self.preferred_auth_level_types.append(auth_level_uri)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
ns_args = {
'preferred_auth_policies':' '.join(self.preferred_auth_policies),
}
if self.max_auth_age is not None:
ns_args['max_auth_age'] = str(self.max_auth_age)
if self.preferred_auth_level_types:
preferred_types = []
for auth_level_uri in self.preferred_auth_level_types:
alias = self._getAlias(auth_level_uri)
ns_args['auth_level.ns.%s' % (alias,)] = auth_level_uri
preferred_types.append(alias)
ns_args['preferred_auth_level_types'] = ' '.join(preferred_types)
return ns_args
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
is_openid1 = request.message.isOpenID1()
if args == {}:
return None
self.parseExtensionArgs(args, is_openid1)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args, is_openid1, strict=False):
"""Set the state of this request to be that expressed in these
PAPE arguments
@param args: The PAPE arguments without a namespace
@param strict: Whether to raise an exception if the input is
out of spec or otherwise malformed. If strict is false,
malformed input will be ignored.
@param is_openid1: Whether the input should be treated as part
of an OpenID1 request
@rtype: None
@raises ValueError: When the max_auth_age is not parseable as
an integer
"""
# preferred_auth_policies is a space-separated list of policy URIs
self.preferred_auth_policies = []
policies_str = args.get('preferred_auth_policies')
if policies_str:
for uri in policies_str.split(' '):
if uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(uri)
# max_auth_age is base-10 integer number of seconds
max_auth_age_str = args.get('max_auth_age')
self.max_auth_age = None
if max_auth_age_str:
try:
self.max_auth_age = int(max_auth_age_str)
except ValueError:
if strict:
raise
# Parse auth level information
preferred_auth_level_types = args.get('preferred_auth_level_types')
if preferred_auth_level_types:
aliases = preferred_auth_level_types.strip().split()
for alias in aliases:
key = 'auth_level.ns.%s' % (alias,)
try:
uri = args[key]
except KeyError:
if is_openid1:
uri = self._default_auth_level_aliases.get(alias)
else:
uri = None
if uri is None:
if strict:
raise ValueError('preferred auth level %r is not '
'defined in this message' % (alias,))
else:
self.addAuthLevel(uri, alias)
def preferredTypes(self, supported_types):
"""Given a list of authentication policy URIs that a provider
supports, this method returns the subsequence of those types
that are preferred by the relying party.
@param supported_types: A sequence of authentication policy
type URIs that are supported by a provider
@returns: The sub-sequence of the supported types that are
preferred by the relying party. This list will be ordered
in the order that the types appear in the supported_types
sequence, and may be empty if the provider does not prefer
any of the supported authentication types.
@returntype: [str]
"""
return filter(self.preferred_auth_policies.__contains__,
supported_types)
Request.ns_uri = ns_uri
class Response(PAPEExtension):
"""A Provider Authentication Policy response, sent from a provider
to a relying party
@ivar auth_policies: List of authentication policies conformed to
by this OpenID assertion, represented as policy URIs
"""
ns_alias = 'pape'
def __init__(self, auth_policies=None, auth_time=None,
auth_levels=None):
super(Response, self).__init__()
if auth_policies:
self.auth_policies = auth_policies
else:
self.auth_policies = []
self.auth_time = auth_time
self.auth_levels = {}
if auth_levels is None:
auth_levels = {}
for uri, level in auth_levels.iteritems():
self.setAuthLevel(uri, level)
def setAuthLevel(self, level_uri, level, alias=None):
"""Set the value for the given auth level type.
@param level: string representation of an authentication level
valid for level_uri
@param alias: An optional namespace alias for the given auth
level URI. May be omitted if the alias is not
significant. The library will use a reasonable default for
widely-used auth level types.
"""
self._addAuthLevelAlias(level_uri, alias)
self.auth_levels[level_uri] = level
def getAuthLevel(self, level_uri):
"""Return the auth level for the specified auth level
identifier
@returns: A string that should map to the auth levels defined
for the auth level type
@raises KeyError: If the auth level type is not present in
this message
"""
return self.auth_levels[level_uri]
def _getNISTAuthLevel(self):
try:
return int(self.getAuthLevel(LEVELS_NIST))
except KeyError:
return None
nist_auth_level = property(
_getNISTAuthLevel,
doc="Backward-compatibility accessor for the NIST auth level")
def addPolicyURI(self, policy_uri):
"""Add a authentication policy to this response
This method is intended to be used by the provider to add a
policy that the provider conformed to when authenticating the user.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri == AUTH_NONE:
raise RuntimeError(
'To send no policies, do not set any on the response.')
if policy_uri not in self.auth_policies:
self.auth_policies.append(policy_uri)
def fromSuccessResponse(cls, success_response):
"""Create a C{L{Response}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@rtype: Response or None
@returns: A provider authentication policy response from the
data that was supplied with the C{id_res} response or None
if the provider sent no signed PAPE response arguments.
"""
self = cls()
# PAPE requires that the args be signed.
args = success_response.getSignedNS(self.ns_uri)
is_openid1 = success_response.isOpenID1()
# Only try to construct a PAPE response if the arguments were
# signed in the OpenID response. If not, return None.
if args is not None:
self.parseExtensionArgs(args, is_openid1)
return self
else:
return None
def parseExtensionArgs(self, args, is_openid1, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str:
auth_policies = policies_str.split(' ')
elif strict:
raise ValueError('Missing auth_policies')
else:
auth_policies = []
if (len(auth_policies) > 1 and strict and AUTH_NONE in auth_policies):
raise ValueError('Got some auth policies, as well as the special '
'"none" URI: %r' % (auth_policies,))
if 'none' in auth_policies:
msg = '"none" used as a policy URI (see PAPE draft < 5)'
if strict:
raise ValueError(msg)
else:
warnings.warn(msg, stacklevel=2)
auth_policies = [u for u in auth_policies
if u not in ['none', AUTH_NONE]]
self.auth_policies = auth_policies
for (key, val) in args.iteritems():
if key.startswith('auth_level.'):
alias = key[11:]
# skip the already-processed namespace declarations
if alias.startswith('ns.'):
continue
try:
uri = args['auth_level.ns.%s' % (alias,)]
except KeyError:
if is_openid1:
uri = self._default_auth_level_aliases.get(alias)
else:
uri = None
if uri is None:
if strict:
raise ValueError(
'Undefined auth level alias: %r' % (alias,))
else:
self.setAuthLevel(uri, val, alias)
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format")
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
if len(self.auth_policies) == 0:
ns_args = {
'auth_policies': AUTH_NONE,
}
else:
ns_args = {
'auth_policies':' '.join(self.auth_policies),
}
for level_type, level in self.auth_levels.iteritems():
alias = self._getAlias(level_type)
ns_args['auth_level.ns.%s' % (alias,)] = level_type
ns_args['auth_level.%s' % (alias,)] = str(level)
if self.auth_time is not None:
if not TIME_VALIDATOR.match(self.auth_time):
raise ValueError('auth_time must be in RFC3339 format')
ns_args['auth_time'] = self.auth_time
return ns_args
Response.ns_uri = ns_uri
| apache-2.0 |
fernandezcuesta/ansible | lib/ansible/modules/system/debconf.py | 9 | 5418 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Brian Coca <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
- debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
# set to generate locales:
- debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
# Accept oracle license
- debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: true
vtype: select
# Specifying package you can register/return the list of questions and current values
- debconf:
name: tzdata
'''
from ansible.module_utils.basic import AnsibleModule
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['pkg'], type='str'),
question=dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype=dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title',
'text', 'seen']),
value=dict(required=False, type='str', aliases=['answer']),
unseen=dict(required=False, type='bool'),
),
required_together=(['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
if __name__ == '__main__':
main()
| gpl-3.0 |
aalmah/pylearn2 | pylearn2/config/old_config.py | 45 | 1417 | import yaml
import inspect
import types
from pylearn2.utils.exc import reraise_as
global resolvers
def load(json_file_path):
""" given a file path to a json file, returns the dictionary definde by the json file """
f = open(json_file_path)
lines = f.readlines()
f.close()
content = ''.join(lines)
return yaml.load(content)
def get_field(d, key):
try:
rval = d[key]
except KeyError:
reraise_as(ValueError('Could not access "'+key+'" in \n'+str(d)))
return rval
def get_str(d, key):
rval = get_field(d, key)
if not isinstance(rval,str):
raise TypeError('"'+key+'" entry is not a string in the following: \n'+str(d))
return rval
def get_tag(d):
return get_str(d, 'tag')
def resolve(d):
""" given a dictionary d, returns the object described by the dictionary """
tag = get_tag(d)
try:
resolver = resolvers[tag]
except KeyError:
reraise_as(TypeError('config does not know of any object type "'+tag+'"'))
return resolver(d)
def resolve_model(d):
assert False
def resolve_dataset(d):
import pylearn2.datasets.config
return pylearn2.datasets.config.resolve(d)
def resolve_train_algorithm(d):
assert False
resolvers = {
'model' : resolve_model,
'dataset' : resolve_dataset,
'train_algorithm' : resolve_train_algorithm
}
| bsd-3-clause |
trosa/forca | gluon/contrib/pyrtf/Renderer.py | 2 | 26037 | from types import StringType, ListType, TupleType
from copy import deepcopy
from Elements import *
DEFAULT_TAB_WIDTH = 720
ParagraphAlignmentMap = { ParagraphPropertySet.LEFT : 'ql',
ParagraphPropertySet.RIGHT : 'qr',
ParagraphPropertySet.CENTER : 'qc',
ParagraphPropertySet.JUSTIFY : 'qj',
ParagraphPropertySet.DISTRIBUTE : 'qd' }
TabAlignmentMap = { TabPropertySet.LEFT : '',
TabPropertySet.RIGHT : 'tqr',
TabPropertySet.CENTER : 'tqc',
TabPropertySet.DECIMAL : 'tqdec' }
TableAlignmentMap = { Table.LEFT : 'trql',
Table.RIGHT : 'trqr',
Table.CENTER : 'trqc' }
CellAlignmentMap = { Cell.ALIGN_TOP : '', # clvertalt
Cell.ALIGN_CENTER : 'clvertalc',
Cell.ALIGN_BOTTOM : 'clvertalb' }
CellFlowMap = { Cell.FLOW_LR_TB : '', # cltxlrtb, Text in a cell flows from left to right and top to bottom (default)
Cell.FLOW_RL_TB : 'cltxtbrl', # Text in a cell flows right to left and top to bottom
Cell.FLOW_LR_BT : 'cltxbtlr', # Text in a cell flows left to right and bottom to top
Cell.FLOW_VERTICAL_LR_TB : 'cltxlrtbv', # Text in a cell flows left to right and top to bottom, vertical
Cell.FLOW_VERTICAL_TB_RL : 'cltxtbrlv' } # Text in a cell flows top to bottom and right to left, vertical
ShadingPatternMap = { ShadingPropertySet.HORIZONTAL : 'bghoriz',
ShadingPropertySet.VERTICAL : 'bgvert',
ShadingPropertySet.FORWARD_DIAGONAL : 'bgfdiag',
ShadingPropertySet.BACKWARD_DIAGONAL : 'bgbdiag',
ShadingPropertySet.VERTICAL_CROSS : 'bgcross',
ShadingPropertySet.DIAGONAL_CROSS : 'bgdcross',
ShadingPropertySet.DARK_HORIZONTAL : 'bgdkhoriz',
ShadingPropertySet.DARK_VERTICAL : 'bgdkvert',
ShadingPropertySet.DARK_FORWARD_DIAGONAL : 'bgdkfdiag',
ShadingPropertySet.DARK_BACKWARD_DIAGONAL : 'bgdkbdiag',
ShadingPropertySet.DARK_VERTICAL_CROSS : 'bgdkcross',
ShadingPropertySet.DARK_DIAGONAL_CROSS : 'bgdkdcross' }
TabLeaderMap = { TabPropertySet.DOTS : 'tldot',
TabPropertySet.HYPHENS : 'tlhyph',
TabPropertySet.UNDERLINE : 'tlul',
TabPropertySet.THICK_LINE : 'tlth',
TabPropertySet.EQUAL_SIGN : 'tleq' }
BorderStyleMap = { BorderPropertySet.SINGLE : 'brdrs',
BorderPropertySet.DOUBLE : 'brdrth',
BorderPropertySet.SHADOWED : 'brdrsh',
BorderPropertySet.DOUBLED : 'brdrdb',
BorderPropertySet.DOTTED : 'brdrdot',
BorderPropertySet.DASHED : 'brdrdash',
BorderPropertySet.HAIRLINE : 'brdrhair' }
SectionBreakTypeMap = { Section.NONE : 'sbknone',
Section.COLUMN : 'sbkcol',
Section.PAGE : 'sbkpage',
Section.EVEN : 'sbkeven',
Section.ODD : 'sbkodd' }
class Settings( list ) :
def __init__( self ) :
super( Settings, self ).__init__()
self._append = super( Settings, self ).append
def append( self, value, mask=None, fallback=None ) :
if (value is not 0) and value in [ False, None, '' ] :
if fallback : self._append( self, fallback )
else :
if mask :
if value is True :
value = mask
else :
value = mask % value
self._append( value )
def Join( self ) :
if self : return r'\%s' % '\\'.join( self )
return ''
def __repr__( self ) :
return self.Join()
class Renderer :
def __init__( self, write_custom_element_callback=None ) :
self.character_style_map = {}
self.paragraph_style_map = {}
self.WriteCustomElement = write_custom_element_callback
#
# All of the Rend* Functions populate a Settings object with values
#
def _RendPageProperties( self, section, settings, in_section ) :
# this one is different from the others as it takes the settings from a
if in_section :
#paper_size_code = 'psz%s'
paper_width_code = 'pgwsxn%s'
paper_height_code = 'pghsxn%s'
landscape = 'lndscpsxn'
margin_suffix = 'sxn'
else :
#paper_size_code = 'psz%s'
paper_width_code = 'paperw%s'
paper_height_code = 'paperh%s'
landscape = 'landscape'
margin_suffix = ''
#settings.append( section.Paper.Code, paper_size_code )
settings.append( section.Paper.Width, paper_width_code )
settings.append( section.Paper.Height, paper_height_code )
if section.Landscape :
settings.append( landscape )
if section.FirstPageNumber :
settings.append( section.FirstPageNumber, 'pgnstarts%s' )
settings.append( 'pgnrestart' )
self._RendMarginsPropertySet( section.Margins, settings, margin_suffix )
def _RendShadingPropertySet( self, shading_props, settings, prefix='' ) :
if not shading_props : return
settings.append( shading_props.Shading, prefix + 'shading%s' )
settings.append( ShadingPatternMap.get( shading_props.Pattern, False ) )
settings.append( self._colour_map.get( shading_props.Foreground, False ), prefix + 'cfpat%s' )
settings.append( self._colour_map.get( shading_props.Background, False ), prefix + 'cbpat%s' )
def _RendBorderPropertySet( self, edge_props, settings ) :
settings.append( BorderStyleMap[ edge_props.Style ] )
settings.append( edge_props.Width , 'brdrw%s' )
settings.append( self._colour_map.get( edge_props.Colour, False ), 'brdrcf%s' )
settings.append( edge_props.Spacing or False , 'brsp%s' )
def _RendFramePropertySet( self, frame_props, settings, tag_prefix='' ) :
if not frame_props : return
if frame_props.Top :
settings.append( tag_prefix + 'brdrt' )
self._RendBorderPropertySet( frame_props.Top, settings )
if frame_props.Left :
settings.append( tag_prefix + 'brdrl' )
self._RendBorderPropertySet( frame_props.Left, settings )
if frame_props.Bottom :
settings.append( tag_prefix + 'brdrb' )
self._RendBorderPropertySet( frame_props.Bottom, settings )
if frame_props.Right :
settings.append( tag_prefix + 'brdrr' )
self._RendBorderPropertySet( frame_props.Right, settings )
def _RendMarginsPropertySet( self, margin_props, settings, suffix='' ) :
if not margin_props : return
settings.append( margin_props.Top, 'margt' + suffix + '%s' )
settings.append( margin_props.Left, 'margl' + suffix + '%s' )
settings.append( margin_props.Bottom, 'margb' + suffix + '%s' )
settings.append( margin_props.Right, 'margr' + suffix + '%s' )
def _RendParagraphPropertySet( self, paragraph_props, settings ) :
if not paragraph_props : return
settings.append( ParagraphAlignmentMap[ paragraph_props.Alignment ] )
settings.append( paragraph_props.SpaceBefore, 'sb%s' )
settings.append( paragraph_props.SpaceAfter, 'sa%s' )
# then we have to find out all of the tabs
width = 0
for tab in paragraph_props.Tabs :
settings.append( TabAlignmentMap[ tab.Alignment ] )
settings.append( TabLeaderMap.get( tab.Leader, '' ) )
width += tab.Width or DEFAULT_TAB_WIDTH
settings.append( 'tx%s' % width )
settings.append( paragraph_props.PageBreakBefore, 'pagebb' )
settings.append( paragraph_props.FirstLineIndent, 'fi%s' )
settings.append( paragraph_props.LeftIndent, 'li%s' )
settings.append( paragraph_props.RightIndent, 'ri%s' )
if paragraph_props.SpaceBetweenLines :
if paragraph_props.SpaceBetweenLines < 0 :
settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult0' )
else :
settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult1' )
def _RendTextPropertySet( self, text_props, settings ) :
if not text_props : return
if text_props.Expansion :
settings.append( text_props.Expansion, 'expndtw%s' )
settings.append( text_props.Bold, 'b' )
settings.append( text_props.Italic, 'i' )
settings.append( text_props.Underline, 'ul' )
settings.append( text_props.DottedUnderline, 'uld' )
settings.append( text_props.DoubleUnderline, 'uldb' )
settings.append( text_props.WordUnderline, 'ulw' )
settings.append( self._font_map.get( text_props.Font, False ), 'f%s' )
settings.append( text_props.Size, 'fs%s' )
settings.append( self._colour_map.get( text_props.Colour, False ), 'cf%s' )
if text_props.Frame :
frame = text_props.Frame
settings.append( 'chbrdr' )
settings.append( BorderStyleMap[ frame.Style ] )
settings.append( frame.Width , 'brdrw%s' )
settings.append( self._colour_map.get( frame.Colour, False ), 'brdrcf%s' )
#
# All of the Write* functions will write to the internal file object
#
# the _ ones probably don't need to be used by anybody outside
# but the other ones like WriteTextElement could be used in the Custom
# callback.
def Write( self, document, fout ) :
# write all of the standard stuff based upon the first document
self._doc = document
self._fout = fout
self._WriteDocument ()
self._WriteColours ()
self._WriteFonts ()
self._WriteStyleSheet()
settings = Settings()
self._RendPageProperties( self._doc.Sections[ 0 ], settings, in_section=False )
self._write( repr( settings ) )
# handle the simplest case first, we don't need to do anymore mucking around
# with section headers, etc we can just rip the document out
if len( document.Sections ) == 1 :
self._WriteSection( document.Sections[ 0 ],
is_first = True,
add_header = False )
else :
for section_idx, section in enumerate( document.Sections ) :
is_first = section_idx == 0
add_header = True
self._WriteSection( section, is_first, add_header )
self._write( '}' )
del self._fout, self._doc, self._CurrentStyle
def _write( self, data, *params ) :
#----------------------------------
# begin modification
# by Herbert Weinhandl
# to convert accented characters
# to their rtf-compatible form
#for c in range( 128, 256 ) :
# data = data.replace( chr(c), "\'%x" % c)
# end modification
#
# This isn't the right place for this as it is going to do
# this loop for all sorts of writes, including settings, control codes, etc.
#
# I will create a def _WriteText (or something) method that is used when the
# actual string that is to be viewed in the document is written, this can then
# do the final accented character check.
#
# I left it here so that I remember to do the right thing when I have time
#----------------------------------
if params : data = data % params
self._fout.write( data )
def _WriteDocument( self ) :
settings = Settings()
assert Languages.IsValid ( self._doc.DefaultLanguage )
assert ViewKind.IsValid ( self._doc.ViewKind )
assert ViewZoomKind.IsValid( self._doc.ViewZoomKind )
assert ViewScale.IsValid ( self._doc.ViewScale )
settings.append( self._doc.DefaultLanguage, 'deflang%s' )
settings.append( self._doc.ViewKind , 'viewkind%s' )
settings.append( self._doc.ViewZoomKind , 'viewzk%s' )
settings.append( self._doc.ViewScale , 'viewscale%s' )
self._write( "{\\rtf1\\ansi\\ansicpg1252\\deff0%s\n" % settings )
def _WriteColours( self ) :
self._write( r"{\colortbl ;" )
self._colour_map = {}
offset = 0
for colour in self._doc.StyleSheet.Colours :
self._write( r'\red%s\green%s\blue%s;', colour.Red, colour.Green, colour.Blue )
self._colour_map[ colour ] = offset + 1
offset += 1
self._write( "}\n" )
def _WriteFonts( self ) :
self._write( r'{\fonttbl' )
self._font_map = {}
offset = 0
for font in self._doc.StyleSheet.Fonts :
pitch = ''
panose = ''
alternate = ''
if font.Pitch : pitch = r'\fprq%s' % font.Pitch
if font.Panose : panose = r'{\*\panose %s}' % font.Panose
if font.Alternate : alternate = r'{\*\falt %s}' % font.Alternate.Name
self._write( r'{\f%s\f%s%s\fcharset%s%s %s%s;}',
offset,
font.Family,
pitch,
font.CharacterSet,
panose,
font.Name,
alternate )
self._font_map[ font ] = offset
offset += 1
self._write( "}\n" )
def _WriteStyleSheet( self ) :
self._write( r"{\stylesheet" )
# TO DO: character styles, does anybody actually use them?
offset_map = {}
for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) :
offset_map[ style ] = idx
# paragraph styles
self.paragraph_style_map = {}
for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) :
if idx == 0 :
default = style
else :
self._write( '\n' )
settings = Settings()
# paragraph properties
self._RendParagraphPropertySet( style.ParagraphPropertySet, settings )
self._RendFramePropertySet ( style.FramePropertySet, settings )
self._RendShadingPropertySet ( style.ShadingPropertySet, settings )
# text properties
self._RendTextPropertySet ( style.TextStyle.TextPropertySet, settings )
self._RendShadingPropertySet( style.TextStyle.ShadingPropertySet, settings )
# have to take
based_on = '\\sbasedon%s' % offset_map.get( style.BasedOn, 0 )
next = '\\snext%s' % offset_map.get( style.Next, 0 )
inln = '\\s%s%s' % ( idx, settings )
self._write( "{%s%s%s %s;}", inln, based_on, next, style.Name )
self.paragraph_style_map[ style ] = inln
# if now style is specified for the first paragraph to be written, this one
# will be used
self._CurrentStyle = self.paragraph_style_map[ default ]
self._write( "}\n" )
def _WriteSection( self, section, is_first, add_header ) :
def WriteHF( hf, rtfword ) :
#if not hf : return
# if we don't have anything in the header/footer then include
# a blank paragraph, this stops it from picking up the header/footer
# from the previous section
# if not hf : hf = [ Paragraph( '' ) ]
if not hf : hf = []
self._write( '{\\%s' % rtfword )
self._WriteElements( hf )
self._write( '}\n' )
settings = Settings()
if not is_first :
# we need to finish off the preceding section
# and reset all of our defaults back to standard
settings.append( 'sect' )
# reset to our defaults
settings.append( 'sectd' )
if add_header :
settings.append( SectionBreakTypeMap[ section.BreakType ] )
self._RendPageProperties( section, settings, in_section=True )
settings.append( section.HeaderY, 'headery%s' )
settings.append( section.FooterY, 'footery%s' )
# write all of these out now as we need to do a write elements in the
# next section
self._write( repr( settings ) )
# finally after all that has settled down we can do the
# headers and footers
if section.FirstHeader or section.FirstFooter :
# include the titlepg flag if the first page has a special format
self._write( r'\titlepg' )
WriteHF( section.FirstHeader, 'headerf' )
WriteHF( section.FirstFooter, 'footerf' )
WriteHF( section.Header, 'header' )
WriteHF( section.Footer, 'footer' )
# and at last the contents of the section that actually appear on the page
self._WriteElements( section )
def _WriteElements( self, elements ) :
new_line = ''
for element in elements :
self._write( new_line )
new_line = '\n'
clss = element.__class__
if clss == Paragraph :
self.WriteParagraphElement( element )
elif clss == Table :
self.WriteTableElement( element )
elif clss == StringType :
self.WriteParagraphElement( Paragraph( element ) )
elif clss in [ RawCode, Image ] :
self.WriteRawCode( element )
#elif clss == List :
# self._HandleListElement( element )
elif self.WriteCustomElement :
self.WriteCustomElement( self, element )
else :
raise Exception( "Don't know how to handle elements of type %s" % clss )
def WriteParagraphElement( self, paragraph_elem, tag_prefix='', tag_suffix=r'\par', opening='{', closing='}' ) :
# the tag_prefix and the tag_suffix take care of paragraphs in tables. A
# paragraph in a table requires and extra tag at the front (intbl) and we
# don't want the ending tag everytime. We want it for all paragraphs but
# the last.
overrides = Settings()
self._RendParagraphPropertySet( paragraph_elem.Properties, overrides )
self._RendFramePropertySet ( paragraph_elem.Frame, overrides )
self._RendShadingPropertySet ( paragraph_elem.Shading, overrides )
# when writing the RTF the style is carried from the previous paragraph to the next,
# so if the currently written paragraph has a style then make it the current one,
# otherwise leave it as it was
self._CurrentStyle = self.paragraph_style_map.get( paragraph_elem.Style, self._CurrentStyle )
self._write( r'%s\pard\plain%s %s%s ' % ( opening, tag_prefix, self._CurrentStyle, overrides ) )
for element in paragraph_elem :
if isinstance( element, StringType ) :
self._write( element )
elif isinstance( element, RawCode ) :
self._write( element.Data )
elif isinstance( element, Text ) :
self.WriteTextElement( element )
elif isinstance( element, Inline ) :
self.WriteInlineElement( element )
elif element == TAB :
self._write( r'\tab ' )
elif element == LINE :
self._write( r'\line ' )
elif self.WriteCustomElement :
self.WriteCustomElement( self, element )
else :
raise Exception( 'Don\'t know how to handle %s' % element )
self._write( tag_suffix + closing )
def WriteRawCode( self, raw_elem ) :
self._write( raw_elem.Data )
def WriteTextElement( self, text_elem ) :
overrides = Settings()
self._RendTextPropertySet ( text_elem.Properties, overrides )
self._RendShadingPropertySet( text_elem.Shading, overrides, 'ch' )
# write the wrapper and then let the custom handler have a go
if overrides : self._write( '{%s ' % repr( overrides ) )
# if the data is just a string then we can now write it
if isinstance( text_elem.Data, StringType ) :
self._write( text_elem.Data or '' )
elif text_elem.Data == TAB :
self._write( r'\tab ' )
else :
self.WriteCustomElement( self, text_elem.Data )
if overrides : self._write( '}' )
def WriteInlineElement( self, inline_elem ) :
overrides = Settings()
self._RendTextPropertySet ( inline_elem.Properties, overrides )
self._RendShadingPropertySet( inline_elem.Shading, overrides, 'ch' )
# write the wrapper and then let the custom handler have a go
if overrides : self._write( '{%s ' % repr( overrides ) )
for element in inline_elem :
# if the data is just a string then we can now write it
if isinstance( element, StringType ) :
self._write( element )
elif isinstance( element, RawCode ) :
self._write( element.Data )
elif element == TAB :
self._write( r'\tab ' )
elif element == LINE :
self._write( r'\line ' )
else :
self.WriteCustomElement( self, element )
if overrides : self._write( '}' )
def WriteText( self, text ) :
self._write( text or '' )
def WriteTableElement( self, table_elem ) :
vmerge = [ False ] * table_elem.ColumnCount
for height, cells in table_elem.Rows :
# calculate the right hand edge of the cells taking into account the spans
offset = table_elem.LeftOffset or 0
cellx = []
cell_idx = 0
for cell in cells :
cellx.append( offset + sum( table_elem.ColumnWidths[ : cell_idx + cell.Span ] ) )
cell_idx += cell.Span
self._write( r'{\trowd' )
settings = Settings()
# the spec says that this value is mandatory and I think that 108 is the default value
# so I'll take care of it here
settings.append( table_elem.GapBetweenCells or 108, 'trgaph%s' )
settings.append( TableAlignmentMap[ table_elem.Alignment ] )
settings.append( height, 'trrh%s' )
settings.append( table_elem.LeftOffset, 'trleft%s' )
width = table_elem.LeftOffset or 0
for idx, cell in enumerate( cells ) :
self._RendFramePropertySet ( cell.Frame, settings, 'cl' )
# cells don't have margins so I don't know why I was doing this
# I think it might have an affect in some versions of some WPs.
#self._RendMarginsPropertySet( cell.Margins, settings, 'cl' )
# if we are starting to merge or if this one is the first in what is
# probably a series of merges then start the vertical merging
if cell.StartVerticalMerge or (cell.VerticalMerge and not vmerge[ idx ]) :
settings.append( 'clvmgf' )
vmerge[ idx ] = True
elif cell.VerticalMerge :
#..continuing a merge
settings.append( 'clvmrg' )
else :
#..no merging going on so make sure that it is off
vmerge[ idx ] = False
# for any cell in the next row that is covered by this span we
# need to run off the vertical merging as we don't want them
# merging up into this spanned cell
for vmerge_idx in range( idx + 1, idx + cell.Span - 1 ) :
vmerge[ vmerge_idx ] = False
settings.append( CellAlignmentMap[ cell.Alignment ] )
settings.append( CellFlowMap[ cell.Flow ] )
# this terminates the definition of a cell and represents the right most edge of the cell from the left margin
settings.append( cellx[ idx ], 'cellx%s' )
self._write( repr( settings ) )
for cell in cells :
if len( cell ) :
last_idx = len( cell ) - 1
for element_idx, element in enumerate( cell ) :
# wrap plain strings in paragraph tags
if isinstance( element, StringType ) :
element = Paragraph( element )
# don't forget the prefix or else word crashes and does all sorts of strange things
if element_idx == last_idx :
self.WriteParagraphElement( element, tag_prefix=r'\intbl', tag_suffix='', opening='', closing='' )
else :
self.WriteParagraphElement( element, tag_prefix=r'\intbl', opening='', closing='' )
self._write( r'\cell' )
else :
self._write( r'\pard\intbl\cell' )
self._write( '\\row}\n' )
| gpl-2.0 |
MoamerEncsConcordiaCa/tensorflow | tensorflow/python/util/future_api_test.py | 173 | 1177 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for future_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=unused-import
from tensorflow.python.util import future_api
# pylint: enable=unused-import
class ExampleParserConfigurationTest(tf.test.TestCase):
def testBasic(self):
self.assertFalse(hasattr(tf, 'arg_max'))
self.assertTrue(hasattr(tf, 'argmax'))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
nerdvegas/rez | src/rez/cli/help.py | 4 | 2625 | """
Utility for displaying help for the given package.
"""
from __future__ import print_function
def setup_parser(parser, completions=False):
parser.add_argument("-m", "--manual", dest="manual", action="store_true",
default=False,
help="Load the rez technical user manual")
parser.add_argument("-e", "--entries", dest="entries", action="store_true",
default=False,
help="Just print each help entry")
PKG_action = parser.add_argument(
"PKG", metavar='PACKAGE', nargs='?',
help="package name")
parser.add_argument("SECTION", type=int, default=1, nargs='?',
help="Help section to view (1..N)")
if completions:
from rez.cli._complete_util import PackageCompleter
PKG_action.completer = PackageCompleter
def command(opts, parser=None, extra_arg_groups=None):
from rez.utils.formatting import PackageRequest
from rez.package_help import PackageHelp
import sys
if opts.manual or not opts.PKG:
PackageHelp.open_rez_manual()
sys.exit(0)
request = PackageRequest(opts.PKG)
if request.conflict:
raise ValueError("Expected a non-conflicting package")
help_ = PackageHelp(request.name, request.range, verbose=opts.verbose)
if not help_.success:
msg = "Could not find a package with help for %r." % request
print(msg, file=sys.stderr)
sys.exit(1)
package = help_.package
print("Help found for:")
print(package.uri)
if package.description:
print()
print("Description:")
print(package.description.strip())
print()
if opts.entries:
help_.print_info()
else:
try:
help_.open(opts.SECTION - 1)
except IndexError:
print("No such help section.", file=sys.stderr)
sys.exit(2)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 |
ttindell2/openshift-ansible | roles/lib_openshift/src/class/oc_adm_registry.py | 45 | 17506 | # pylint: skip-file
# flake8: noqa
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results or ('returncode' in results and results['returncode'] != 0):
raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
if self.deploymentconfig and self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and 'items' not in results['results']:
raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# the dry-run doesn't apply the selector correctly
if self.service:
service.put('spec.selector', self.service.get_selector())
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
self.needs_update()
# if the object is none, then we need to create it
# if the object needs an update, then we should call replace
# Handle the deploymentconfig
if self.deploymentconfig is None:
results.append(self._create(self.prepared_registry['deployment_file']))
elif self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
# Handle the service
if self.service is None:
results.append(self._create(self.prepared_registry['service_file']))
elif self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# The environment variable for REGISTRY_HTTP_SECRET is autogenerated
# We should set the generated deploymentconfig to the in memory version
# the following modifications will overwrite if needed
if self.deploymentconfig:
result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
if result:
deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars'].get('value', {}).items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if self.service is None or \
not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
registry_options = {'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
}
# Do not always pass the daemonset and enforce-quota parameters because they are not understood
# by old versions of oc.
# Default value is false. So, it's safe to not pass an explicit false value to oc versions which
# understand these parameters.
if params['daemonset']:
registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
if params['enforce_quota']:
registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
registry_options)
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
| apache-2.0 |
appleseedhq/gaffer | startup/GafferScene/parentCompatibility.py | 8 | 2548 | ##########################################################################
#
# Copyright (c) 2019 Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import types
import Gaffer
import GafferScene
def __parentChildrenSetInput( self, input ) :
if isinstance( input, GafferScene.ScenePlug ) :
# Old connection into "child" plug. Reroute
# it into "children[0]".
self[0].setInput( input )
else :
Gaffer.ArrayPlug.setInput( self, input )
def __parentGetItem( originalGetItem ) :
def getItem( self, key ) :
# Redirect old "child" ScenePlug plug to new
# "children" ArrayPlug.
key = "children" if key == "child" else key
result = originalGetItem( self, key )
if key == "children" :
result.setInput = types.MethodType( __parentChildrenSetInput, result )
return result
return getItem
GafferScene.Parent.__getitem__ = __parentGetItem( GafferScene.Parent.__getitem__ )
| bsd-3-clause |
jaggu303619/asylum | openerp/addons/account/report/account_aged_partner_balance.py | 13 | 21106 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = []
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' >= %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' <= %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('''SELECT l.partner_id, SUM(l.debit-l.credit), l.reconcile_partial_id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)
AND (am.state IN %s)
AND (account_account.type IN %s)
AND (l.partner_id IN %s)
AND ((l.reconcile_id IS NULL)
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))
AND ''' + self.query + '''
AND account_account.active
AND ''' + dates_query + '''
AND (l.date <= %s)
GROUP BY l.partner_id, l.reconcile_partial_id''', args_list)
partners_partial = self.cr.fetchall()
partners_amount = dict((i[0],0) for i in partners_partial)
for partner_info in partners_partial:
if partner_info[2]:
# in case of partial reconciliation, we want to keep the left amount in the oldest period
self.cr.execute('''SELECT MIN(COALESCE(date_maturity,date)) FROM account_move_line WHERE reconcile_partial_id = %s''', (partner_info[2],))
date = self.cr.fetchall()
partial = False
if 'BETWEEN' in dates_query:
partial = date and args_list[-3] <= date[0][0] <= args_list[-2]
elif '>=' in dates_query:
partial = date and date[0][0] >= form[str(i)]['start']
else:
partial = date and date[0][0] <= form[str(i)]['stop']
if partial:
# partial reconcilation
limit_date = 'COALESCE(l.date_maturity,l.date) %s %%s' % '<=' if self.direction_selection == 'past' else '>='
self.cr.execute('''SELECT SUM(l.debit-l.credit)
FROM account_move_line AS l, account_move AS am
WHERE l.move_id = am.id AND am.state in %s
AND l.reconcile_partial_id = %s
AND ''' + limit_date, (tuple(move_state), partner_info[2], self.date_from))
unreconciled_amount = self.cr.fetchall()
partners_amount[partner_info[0]] += unreconciled_amount[0][0]
else:
partners_amount[partner_info[0]] += partner_info[1]
history.append(partners_amount)
for partner in partners:
values = {}
## If choise selection is in the future
if self.direction_selection == 'future':
# Query here is replaced by one query which gets the all the partners their 'before' value
before = False
if future_past.has_key(partner['id']):
before = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past': # Changed this so people could in the future create new direction_selections
# Query here is replaced by one query which gets the all the partners their 'after' value
after = False
if future_past.has_key(partner['id']): # Making sure this partner actually was found by the query
after = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key(partner['id']):
during = [ history[i][partner['id']] ]
# Ajout du compteur
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( partner['id'] ):
total = [ totals[partner['id']] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = partner['name']
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_lines_with_out_partner(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
totals = {}
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND ((l.reconcile_id IS NULL) \
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND (l.date <= %s)\
AND account_account.active ',(tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals['Unknown Partner'] = i[0]
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('SELECT SUM(l.debit-l.credit)\
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IS NULL)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND ' + dates_query + '\
AND (l.date <= %s)\
GROUP BY l.partner_id', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d['Unknown Partner'] = i[0]
history.append(d)
values = {}
if self.direction_selection == 'future':
before = False
if future_past.has_key('Unknown Partner'):
before = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past':
after = False
if future_past.has_key('Unknown Partner'):
after = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key('Unknown Partner'):
during = [ history[i]['Unknown Partner'] ]
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( 'Unknown Partner' ):
total = [ totals['Unknown Partner'] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = 'Unknown Partner'
if values['total']:
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_total(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_direction(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_for_period(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_partners(self,data):
# TODO: deprecated, to remove in trunk
if data['form']['result_selection'] == 'customer':
return self._translate('Receivable Accounts')
elif data['form']['result_selection'] == 'supplier':
return self._translate('Payable Accounts')
elif data['form']['result_selection'] == 'customer_supplier':
return self._translate('Receivable and Payable Accounts')
return ''
report_sxw.report_sxw('report.account.aged_trial_balance', 'res.partner',
'addons/account/report/account_aged_partner_balance.rml',parser=aged_trial_report, header="internal landscape")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nicfit/nicfit.py | tests/test_console.py | 1 | 4219 | import os
import types
import logging
from unittest.mock import patch
import pytest
from nicfit.console import pout, perr
from nicfit.console.ansi import Fg, Bg, Style, init as ansi_init
import contextlib
@contextlib.contextmanager
def envvars(*vars):
saved = {}
for v in vars:
saved[v] = os.environ[v] if v in os.environ else None
try:
yield
finally:
for v in saved:
if saved[v]:
os.environ[v] = saved[v]
elif v in os.environ:
del os.environ[v]
def test_pout(capfd):
msg = "There's a war outside!"
pout(msg)
out, _ = capfd.readouterr()
assert out == msg + "\n"
def test_perr(capfd):
msg = "There's a war outside!"
perr(msg)
_, err = capfd.readouterr()
assert err == msg + "\n"
def test_plog(capfd):
msg = "Xymox - Phoenix"
log = logging.getLogger("test_plog")
with patch.object(log, "info") as mock:
pout(msg, log=log)
out, _ = capfd.readouterr()
assert out == msg + "\n"
mock.assert_called_once_with(msg)
def test_ansi_unsupported():
with envvars("TERM", "OS"):
for var, invalids in [("TERM", ["dumb"]), ("OS", ["Windows_NT"])]:
for val in invalids:
os.environ[var] = val
with pytest.raises(ValueError):
ansi_init(True)
def test_ansi():
CSI = "\033["
class KnownFg:
(GREY,
RED,
GREEN,
YELLOW,
BLUE,
MAGENTA,
CYAN,
WHITE) = [*range(30, 38)]
RESET = 39
class KnownBg:
(GREY,
RED,
GREEN,
YELLOW,
BLUE,
MAGENTA,
CYAN,
WHITE) = [*range(40, 48)]
RESET = 49
class KnownStyle:
(RESET_ALL,
BRIGHT,
DIM,
ITALICS,
UNDERLINE,
BLINK_SLOW,
BLINK_FAST,
INVERSE) = [*range(0, 8)]
STRIKE_THRU = 9
(RESET_BRIGHT,
RESET_ITALICS,
RESET_UNDERLINE,
RESET_BLINK_SLOW,
RESET_BLINK_FAST,
RESET_INVERSE) = [*range(22, 28)]
RESET_STRIKE_THRU = 29
RESET_DIM = RESET_BRIGHT
def mkcode(c):
return "{}{}m".format(CSI, c)
ansi_init(True)
for known_palette, palette in ((KnownFg, Fg),
(KnownBg, Bg),
(KnownStyle, Style),
):
code_list = [c for c in dir(known_palette) if c == c.upper()]
# Test values and members
if known_palette in (KnownFg, KnownBg):
assert len(code_list) == 9
else:
assert len(code_list) == 17
for c in code_list:
assert type(getattr(palette, c)) is str
assert mkcode(getattr(known_palette, c)) == \
getattr(palette, c)
if palette is Style and c.lower().startswith("reset_"):
# Style.reset_*() functions don't exist
continue
assert isinstance(getattr(palette, c.lower()), types.FunctionType)
# Test palette functions vs codes
assert Fg.BLUE + "SNFU" + Fg.RESET == Fg.blue("SNFU")
code = getattr(palette, c)
if palette is Style:
reset = getattr(palette, "RESET_{}".format(c))
else:
reset = getattr(palette, "RESET")
func = getattr(palette, c.lower())
assert code + "SNFU" + reset == func("SNFU")
def test_ansi_palette_attr_disabled():
ansi_init(False)
for palette in (Fg, Bg, Style):
code_list = [c for c in dir(palette) if c == c.upper()]
for c in code_list:
assert getattr(palette, c) == ""
def test_ansi_formats():
ansi_init(True)
s = "Heavy Cream - Run Free"
assert Fg.green(s, Style.BRIGHT,
Style.UNDERLINE,
Style.ITALICS) == \
Style.BRIGHT + Style.UNDERLINE + Style.ITALICS + Fg.GREEN + s + \
Fg.RESET + Style.RESET_ALL
print("%(BLUE)sNice%(RESET)s" % Fg)
# TODO: More complex examples and future format schems
| mit |
svenkreiss/decouple | Decouple/fullModel_utils.py | 1 | 4848 | #!/usr/bin/env python
# Created on: October 8, 2013
__author__ = "Sven Kreiss, Kyle Cranmer"
__version__ = "0.1"
import ROOT
def getNll( pdf, data, minStrategy=0, enableOffset=True, globObs=None ):
""" Generic functions to use with minimize() from BatchProfileLikelihood. """
# config minimizer
ROOT.RooAbsReal.defaultIntegratorConfig().method1D().setLabel("RooAdaptiveGaussKronrodIntegrator1D")
ROOT.Math.MinimizerOptions.SetDefaultMinimizer("Minuit2","Minimize")
ROOT.Math.MinimizerOptions.SetDefaultStrategy(minStrategy)
ROOT.Math.MinimizerOptions.SetDefaultPrintLevel(-1)
# minimizer initialize
params = pdf.getParameters(data)
ROOT.RooStats.RemoveConstantParameters(params)
if globObs:
nll = pdf.createNLL(
data,
ROOT.RooFit.CloneData(ROOT.kFALSE),
ROOT.RooFit.Constrain(params),
ROOT.RooFit.GlobalObservables(globObs),
ROOT.RooFit.Offset(enableOffset),
)
else:
nll = pdf.createNLL(
data,
ROOT.RooFit.CloneData(ROOT.kFALSE),
ROOT.RooFit.Constrain(params),
ROOT.RooFit.Offset(enableOffset),
)
# nllNoOffset = pdf.createNLL(
# data,
# ROOT.RooFit.CloneData(ROOT.kFALSE),
# ROOT.RooFit.Constrain(params),
# ROOT.RooFit.Offset(False),
# )
nll.setEvalErrorLoggingMode(ROOT.RooAbsReal.CountErrors)
# nllNoOffset.setEvalErrorLoggingMode(ROOT.RooAbsReal.CountErrors)
# if options.enableOffset:
print( "Get NLL once. This first call sets the offset, so it is important that this happens when the parameters are at their initial values." )
print( "nll = "+str( nll.getVal() ) )
return nll
def minimize( nll ):
strat = ROOT.Math.MinimizerOptions.DefaultStrategy()
msglevel = ROOT.RooMsgService.instance().globalKillBelow()
ROOT.RooMsgService.instance().setGlobalKillBelow(ROOT.RooFit.FATAL)
minim = ROOT.RooMinimizer( nll )
minim.setPrintLevel( ROOT.Math.MinimizerOptions.DefaultPrintLevel() )
minim.setStrategy(strat)
minim.optimizeConst(0)
#minim.optimizeConst(options.minOptimizeConst)
# Got to be very careful with SCAN. We have to allow for negative mu,
# so large part of the space that is scanned produces log-eval errors.
# Therefore, this is usually not feasible.
#minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(), "Scan")
status = -1
for i in range( 3 ):
status = minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(),
ROOT.Math.MinimizerOptions.DefaultMinimizerAlgo())
if status == 0: break
if status != 0 and status != 1 and strat <= 1:
strat += 1
print( "Retrying with strat "+str(strat) )
minim.setStrategy(strat)
status = minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(),
ROOT.Math.MinimizerOptions.DefaultMinimizerAlgo())
if status != 0 and status != 1 and strat <= 1:
strat += 1
print( "Retrying with strat "+str(strat) )
minim.setStrategy(strat)
status = minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(),
ROOT.Math.MinimizerOptions.DefaultMinimizerAlgo())
if status != 0 and status != 1:
print( "ERROR::Minimization failed!" )
ROOT.RooMsgService.instance().setGlobalKillBelow(msglevel)
return nll.getVal()
def minimize_fitResult( nll, hesse=True ):
strat = ROOT.Math.MinimizerOptions.DefaultStrategy()
msglevel = ROOT.RooMsgService.instance().globalKillBelow()
ROOT.RooMsgService.instance().setGlobalKillBelow(ROOT.RooFit.FATAL)
minim = ROOT.RooMinimizer( nll )
minim.setPrintLevel( ROOT.Math.MinimizerOptions.DefaultPrintLevel() )
minim.setStrategy(strat)
minim.optimizeConst(0)
#minim.optimizeConst(options.minOptimizeConst)
# Got to be very careful with SCAN. We have to allow for negative mu,
# so large part of the space that is scanned produces log-eval errors.
# Therefore, this is usually not feasible.
#minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(), "Scan")
status = -1
for i in range( 3 ):
status = minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(),
ROOT.Math.MinimizerOptions.DefaultMinimizerAlgo())
if status == 0: break
if status != 0 and status != 1 and strat <= 1:
strat += 1
print( "Retrying with strat "+str(strat) )
minim.setStrategy(strat)
status = minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(),
ROOT.Math.MinimizerOptions.DefaultMinimizerAlgo())
if status != 0 and status != 1 and strat <= 1:
strat += 1
print( "Retrying with strat "+str(strat) )
minim.setStrategy(strat)
status = minim.minimize(ROOT.Math.MinimizerOptions.DefaultMinimizerType(),
ROOT.Math.MinimizerOptions.DefaultMinimizerAlgo())
if status != 0 and status != 1:
print( "ERROR::Minimization failed!" )
# call Hesse
if hesse:
minim.hesse()
ROOT.RooMsgService.instance().setGlobalKillBelow(msglevel)
return minim.save()
| mit |
seem-sky/kbengine | kbe/src/lib/python/Lib/test/test_sort.py | 169 | 9164 | from test import support
import random
import sys
import unittest
from functools import cmp_to_key
verbose = support.verbose
nerrors = 0
def check(tag, expected, raw, compare=None):
global nerrors
if verbose:
print(" checking", tag)
orig = raw[:] # save input in case of error
if compare:
raw.sort(key=cmp_to_key(compare))
else:
raw.sort()
if len(expected) != len(raw):
print("error in", tag)
print("length mismatch;", len(expected), len(raw))
print(expected)
print(orig)
print(raw)
nerrors += 1
return
for i, good in enumerate(expected):
maybe = raw[i]
if good is not maybe:
print("error in", tag)
print("out of order at index", i, good, maybe)
print(expected)
print(orig)
print(raw)
nerrors += 1
return
class TestBase(unittest.TestCase):
def testStressfully(self):
# Try a variety of sizes at and around powers of 2, and at powers of 10.
sizes = [0]
for power in range(1, 10):
n = 2 ** power
sizes.extend(range(n-1, n+2))
sizes.extend([10, 100, 1000])
class Complains(object):
maybe_complain = True
def __init__(self, i):
self.i = i
def __lt__(self, other):
if Complains.maybe_complain and random.random() < 0.001:
if verbose:
print(" complaining at", self, other)
raise RuntimeError
return self.i < other.i
def __repr__(self):
return "Complains(%d)" % self.i
class Stable(object):
def __init__(self, key, i):
self.key = key
self.index = i
def __lt__(self, other):
return self.key < other.key
def __repr__(self):
return "Stable(%d, %d)" % (self.key, self.index)
for n in sizes:
x = list(range(n))
if verbose:
print("Testing size", n)
s = x[:]
check("identity", x, s)
s = x[:]
s.reverse()
check("reversed", x, s)
s = x[:]
random.shuffle(s)
check("random permutation", x, s)
y = x[:]
y.reverse()
s = x[:]
check("reversed via function", y, s, lambda a, b: (b>a)-(b<a))
if verbose:
print(" Checking against an insane comparison function.")
print(" If the implementation isn't careful, this may segfault.")
s = x[:]
s.sort(key=cmp_to_key(lambda a, b: int(random.random() * 3) - 1))
check("an insane function left some permutation", x, s)
if len(x) >= 2:
def bad_key(x):
raise RuntimeError
s = x[:]
self.assertRaises(RuntimeError, s.sort, key=bad_key)
x = [Complains(i) for i in x]
s = x[:]
random.shuffle(s)
Complains.maybe_complain = True
it_complained = False
try:
s.sort()
except RuntimeError:
it_complained = True
if it_complained:
Complains.maybe_complain = False
check("exception during sort left some permutation", x, s)
s = [Stable(random.randrange(10), i) for i in range(n)]
augmented = [(e, e.index) for e in s]
augmented.sort() # forced stable because ties broken by index
x = [e for e, i in augmented] # a stable sort of s
check("stability", x, s)
#==============================================================================
class TestBugs(unittest.TestCase):
def test_bug453523(self):
# bug 453523 -- list.sort() crasher.
# If this fails, the most likely outcome is a core dump.
# Mutations during a list sort should raise a ValueError.
class C:
def __lt__(self, other):
if L and random.random() < 0.75:
L.pop()
else:
L.append(3)
return random.random() < 0.5
L = [C() for i in range(50)]
self.assertRaises(ValueError, L.sort)
def test_undetected_mutation(self):
# Python 2.4a1 did not always detect mutation
memorywaster = []
for i in range(20):
def mutating_cmp(x, y):
L.append(3)
L.pop()
return (x > y) - (x < y)
L = [1,2]
self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp))
def mutating_cmp(x, y):
L.append(3)
del L[:]
return (x > y) - (x < y)
self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp))
memorywaster = [memorywaster]
#==============================================================================
class TestDecorateSortUndecorate(unittest.TestCase):
def test_decorated(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
copy = data[:]
random.shuffle(data)
data.sort(key=str.lower)
def my_cmp(x, y):
xlower, ylower = x.lower(), y.lower()
return (xlower > ylower) - (xlower < ylower)
copy.sort(key=cmp_to_key(my_cmp))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, data.sort, key=lambda x,y: 0)
def test_stability(self):
data = [(random.randrange(100), i) for i in range(200)]
copy = data[:]
data.sort(key=lambda t: t[0]) # sort on the random first field
copy.sort() # sort using both fields
self.assertEqual(data, copy) # should get the same result
def test_key_with_exception(self):
# Verify that the wrapper has been removed
data = list(range(-2, 2))
dup = data[:]
self.assertRaises(ZeroDivisionError, data.sort, key=lambda x: 1/x)
self.assertEqual(data, dup)
def test_key_with_mutation(self):
data = list(range(10))
def k(x):
del data[:]
data[:] = range(20)
return x
self.assertRaises(ValueError, data.sort, key=k)
def test_key_with_mutating_del(self):
data = list(range(10))
class SortKiller(object):
def __init__(self, x):
pass
def __del__(self):
del data[:]
data[:] = range(20)
def __lt__(self, other):
return id(self) < id(other)
self.assertRaises(ValueError, data.sort, key=SortKiller)
def test_key_with_mutating_del_and_exception(self):
data = list(range(10))
## dup = data[:]
class SortKiller(object):
def __init__(self, x):
if x > 2:
raise RuntimeError
def __del__(self):
del data[:]
data[:] = list(range(20))
self.assertRaises(RuntimeError, data.sort, key=SortKiller)
## major honking subtlety: we *can't* do:
##
## self.assertEqual(data, dup)
##
## because there is a reference to a SortKiller in the
## traceback and by the time it dies we're outside the call to
## .sort() and so the list protection gimmicks are out of
## date (this cost some brain cells to figure out...).
def test_reverse(self):
data = list(range(100))
random.shuffle(data)
data.sort(reverse=True)
self.assertEqual(data, list(range(99,-1,-1)))
def test_reverse_stability(self):
data = [(random.randrange(100), i) for i in range(200)]
copy1 = data[:]
copy2 = data[:]
def my_cmp(x, y):
x0, y0 = x[0], y[0]
return (x0 > y0) - (x0 < y0)
def my_cmp_reversed(x, y):
x0, y0 = x[0], y[0]
return (y0 > x0) - (y0 < x0)
data.sort(key=cmp_to_key(my_cmp), reverse=True)
copy1.sort(key=cmp_to_key(my_cmp_reversed))
self.assertEqual(data, copy1)
copy2.sort(key=lambda x: x[0], reverse=True)
self.assertEqual(data, copy2)
#==============================================================================
def test_main(verbose=None):
test_classes = (
TestBase,
TestDecorateSortUndecorate,
TestBugs,
)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| lgpl-3.0 |
tperrier/mwachx | contacts/urls.py | 1 | 1223 | from django.conf.urls import patterns, include, url
from rest_framework import routers
import views
from serializers import router
# from views import angular_views
urlpatterns = patterns('',
# DRF API viewer
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/v0.1/', include(router.urls)),
# Angular app
url(r'^$', 'contacts.views.angular_view'),
# Misc Actions
url(r'^staff/facility_change/(?P<facility_name>.*)/$','contacts.views.staff_facility_change'), #If we have more than 9 facilities we'd need to change this
url(r'^staff/date/(?P<direction>back|forward)/(?P<delta>\d{1,365})/$','contacts.views.change_current_date'),
url(r'^staff/change_password/','contacts.views.change_password',name='mx-change-password'),
# crispy-form partial
url(r'^crispy-forms/participant/new/?$','contacts.views.crispy.participant_add'),
url(r'^crispy-forms/participant/update/?$','contacts.views.crispy.participant_update'),
# static archive site
url(r'^static_archive/?$', 'contacts.views.static_archive_index'),
url(r'^static_archive/participants/(?P<study_id>\d{4})(.html)?$', 'contacts.views.static_archive_participant'),
)
| apache-2.0 |
apyrgio/snf-ganeti | test/py/cmdlib/testsupport/lock_manager_mock.py | 1 | 2465 | #
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Support for mocking the lock manager"""
from ganeti import locking
class LockManagerMock(locking.GanetiLockManager):
"""Mocked lock manager for tests.
"""
def __init__(self):
# reset singleton instance, there is a separate lock manager for every test
# pylint: disable=W0212
self.__class__._instance = None
super(LockManagerMock, self).__init__([], [], [], [])
def AddLocksFromConfig(self, cfg):
"""Create locks for all entities in the given configuration.
@type cfg: ganeti.config.ConfigWriter
"""
try:
self.acquire(locking.LEVEL_CLUSTER, locking.BGL)
for node_uuid in cfg.GetNodeList():
self.add(locking.LEVEL_NODE, node_uuid)
self.add(locking.LEVEL_NODE_RES, node_uuid)
for group_uuid in cfg.GetNodeGroupList():
self.add(locking.LEVEL_NODEGROUP, group_uuid)
for inst in cfg.GetAllInstancesInfo().values():
self.add(locking.LEVEL_INSTANCE, inst.name)
for net_uuid in cfg.GetNetworkList():
self.add(locking.LEVEL_NETWORK, net_uuid)
finally:
self.release(locking.LEVEL_CLUSTER, locking.BGL)
| bsd-2-clause |
maxdeliso/elevatorSim | Lib/idlelib/ClassBrowser.py | 67 | 6371 | """Class browser.
XXX TO DO:
- reparse when source changed (maybe just a button would be OK?)
(or recheck on window popup)
- add popup menu with more options (e.g. doc strings, base classes, imports)
- show function argument list? (have to do pattern matching on source)
- should the classes and methods lists also be in the module's menu bar?
- add base classes to class browser tree
"""
import os
import sys
import pyclbr
from idlelib import PyShell
from idlelib.WindowList import ListedToplevel
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.configHandler import idleConf
class ClassBrowser:
def __init__(self, flist, name, path):
# XXX This API should change, if the file doesn't end in ".py"
# XXX the code here is bogus!
self.name = name
self.file = os.path.join(path[0], self.name + ".py")
self.init(flist)
def close(self, event=None):
self.top.destroy()
self.node.destroy()
def init(self, flist):
self.flist = flist
# reset pyclbr
pyclbr._modules.clear()
# create top
self.top = top = ListedToplevel(flist.root)
top.protocol("WM_DELETE_WINDOW", self.close)
top.bind("<Escape>", self.close)
self.settitle()
top.focus_set()
# create scrolled canvas
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = self.rootnode()
self.node = node = TreeNode(sc.canvas, None, item)
node.update()
node.expand()
def settitle(self):
self.top.wm_title("Class Browser - " + self.name)
self.top.wm_iconname("Class Browser")
def rootnode(self):
return ModuleBrowserTreeItem(self.file)
class ModuleBrowserTreeItem(TreeItem):
def __init__(self, file):
self.file = file
def GetText(self):
return os.path.basename(self.file)
def GetIconName(self):
return "python"
def GetSubList(self):
sublist = []
for name in self.listclasses():
item = ClassBrowserTreeItem(name, self.classes, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if os.path.normcase(self.file[-3:]) != ".py":
return
if not os.path.exists(self.file):
return
PyShell.flist.open(self.file)
def IsExpandable(self):
return os.path.normcase(self.file[-3:]) == ".py"
def listclasses(self):
dir, file = os.path.split(self.file)
name, ext = os.path.splitext(file)
if os.path.normcase(ext) != ".py":
return []
try:
dict = pyclbr.readmodule_ex(name, [dir] + sys.path)
except ImportError as msg:
return []
items = []
self.classes = {}
for key, cl in dict.items():
if cl.module == name:
s = key
if hasattr(cl, 'super') and cl.super:
supers = []
for sup in cl.super:
if type(sup) is type(''):
sname = sup
else:
sname = sup.name
if sup.module != cl.module:
sname = "%s.%s" % (sup.module, sname)
supers.append(sname)
s = s + "(%s)" % ", ".join(supers)
items.append((cl.lineno, s))
self.classes[s] = cl
items.sort()
list = []
for item, s in items:
list.append(s)
return list
class ClassBrowserTreeItem(TreeItem):
def __init__(self, name, classes, file):
self.name = name
self.classes = classes
self.file = file
try:
self.cl = self.classes[self.name]
except (IndexError, KeyError):
self.cl = None
self.isfunction = isinstance(self.cl, pyclbr.Function)
def GetText(self):
if self.isfunction:
return "def " + self.name + "(...)"
else:
return "class " + self.name
def GetIconName(self):
if self.isfunction:
return "python"
else:
return "folder"
def IsExpandable(self):
if self.cl:
try:
return not not self.cl.methods
except AttributeError:
return False
def GetSubList(self):
if not self.cl:
return []
sublist = []
for name in self.listmethods():
item = MethodBrowserTreeItem(name, self.cl, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = PyShell.flist.open(self.file)
if hasattr(self.cl, 'lineno'):
lineno = self.cl.lineno
edit.gotoline(lineno)
def listmethods(self):
if not self.cl:
return []
items = []
for name, lineno in self.cl.methods.items():
items.append((lineno, name))
items.sort()
list = []
for item, name in items:
list.append(name)
return list
class MethodBrowserTreeItem(TreeItem):
def __init__(self, name, cl, file):
self.name = name
self.cl = cl
self.file = file
def GetText(self):
return "def " + self.name + "(...)"
def GetIconName(self):
return "python" # XXX
def IsExpandable(self):
return 0
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = PyShell.flist.open(self.file)
edit.gotoline(self.cl.methods[self.name])
def main():
try:
file = __file__
except NameError:
file = sys.argv[0]
if sys.argv[1:]:
file = sys.argv[1]
else:
file = sys.argv[0]
dir, file = os.path.split(file)
name = os.path.splitext(file)[0]
ClassBrowser(PyShell.flist, name, [dir])
if sys.stdin is sys.__stdin__:
mainloop()
if __name__ == "__main__":
main()
| bsd-2-clause |
katyushacccp/ISN_projet_final | alpha 1.3 (graphique lag, abandon)/test.py | 1 | 1507 | from time import *
from tkinter import *
from random import *
from modules.affichage import *
from modules.mouvements import *
def melange(root,cube,can,nombre):
global timeur
root.config(cursor="wait")
melangeur(cube,can,nombre)
root.after(nombre*timeur,lambda:root.config(cursor="gumby"))
def test():
"""ceci est un test, lol"""
start()
rotative(cube,can,1,4,"u")
rotative(cube,can,1,4,"u")
rotative(cube,can,1,4,"u")
rotative(cube,can,1,4,"u'")
rotative(cube,can,1,4,"l")
cube=[["green"]*9,["red"]*9,["blue"]*9,["orange"]*9,["white"]*9,["yellow"]*9]
lowCube=[['green', 'green', 'white', 'green', 'green', 'white', 'green', 'green', 'white'], ['red', 'red', 'red', 'red', 'red', 'red', 'red', 'red', 'red'], ['yellow', 'blue', 'blue', 'yellow', 'blue', 'blue', 'yellow', 'blue', 'blue'], ['orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange', 'orange'], ['white', 'white', 'white', 'white', 'white', 'white', 'blue', 'blue', 'blue'], ['green', 'green', 'green', 'yellow', 'yellow', 'yellow', 'yellow', 'yellow', 'yellow']]
root=Tk()
root.config(cursor="gumby")
root.title("Rubik's Solveur")
root.iconbitmap("Rubik-Cube.ico")
can=Canvas(root,bg="#F0F0F0",height=550,width=710)
can.pack()
actualise(cube,can)
bou1=Button(root, text="mélanger", command=lambda:melange(root,cube,can,5), fg='blue')
bou1.pack()
bou2=Button(root, text="test", command=lambda:rotationAnim(lowCube,cube,can,0,"droite"), fg='blue')
bou2.pack()
| cc0-1.0 |
revmischa/boto | boto/cognito/identity/exceptions.py | 135 | 1125 | # The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class LimitExceededException(BotoServerError):
pass
class ResourceConflictException(BotoServerError):
pass
class DeveloperUserAlreadyRegisteredException(BotoServerError):
pass
class TooManyRequestsException(BotoServerError):
pass
class InvalidParameterException(BotoServerError):
pass
class ResourceNotFoundException(BotoServerError):
pass
class InternalErrorException(BotoServerError):
pass
class NotAuthorizedException(BotoServerError):
pass
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.