repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bowenliu16/deepchem | examples/tox21/tox21_tf_progressive.py | 3 | 1049 | """
Script that trains progressive multitask models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from tox21_datasets import load_tox21
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
n_features = 1024
tox21_tasks, tox21_datasets, transformers = load_tox21()
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
model = dc.models.ProgressiveMultitaskClassifier(
len(tox21_tasks), n_features, layer_sizes=[1000], dropouts=[.25],
learning_rate=0.001, batch_size=50)
# Fit trained model
model.fit(train_dataset, nb_epoch=10)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| gpl-3.0 |
acmaheri/sms-tools | software/transformations/stftTransformations.py | 4 | 5645 | # functions that implement transformations using the stft
import numpy as np
import sys, os, math
from scipy.signal import resample
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import dftModel as DFT
def stftFiltering(x, fs, w, N, H, filter):
"""
Apply a filter to a sound by using the STFT
x: input sound, w: analysis window, N: FFT size, H: hop size
filter: magnitude response of filter with frequency-magnitude pairs (in dB)
returns y: output sound
"""
M = w.size # size of analysis window
hM1 = int(math.floor((M+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(M/2)) # half analysis window size by floor
x = np.append(np.zeros(hM2),x) # add zeros at beginning to center first window at sample 0
x = np.append(x,np.zeros(hM1)) # add zeros at the end to analyze last sample
pin = hM1 # initialize sound pointer in middle of analysis window
pend = x.size-hM1 # last sample to start a frame
w = w / sum(w) # normalize analysis window
y = np.zeros(x.size) # initialize output array
while pin<=pend: # while sound pointer is smaller than last sample
#-----analysis-----
x1 = x[pin-hM1:pin+hM2] # select one frame of input sound
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
#------transformation-----
mY = mX + filter # filter input magnitude spectrum
#-----synthesis-----
y1 = DFT.dftSynth(mY, pX, M) # compute idft
y[pin-hM1:pin+hM2] += H*y1 # overlap-add to generate output sound
pin += H # advance sound pointer
y = np.delete(y, range(hM2)) # delete half of first window which was added in stftAnal
y = np.delete(y, range(y.size-hM1, y.size)) # add zeros at the end to analyze last sample
return y
def stftMorph(x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef):
"""
Morph of two sounds using the STFT
x1, x2: input sounds, fs: sampling rate
w1, w2: analysis windows, N1, N2: FFT sizes, H1: hop size
smoothf: smooth factor of sound 2, bigger than 0 to max of 1, where 1 is no smothing,
balancef: balance between the 2 sounds, from 0 to 1, where 0 is sound 1 and 1 is sound 2
returns y: output sound
"""
if (N2/2*smoothf < 3): # raise exception if decimation factor too small
raise ValueError("Smooth factor too small")
if (smoothf > 1): # raise exception if decimation factor too big
raise ValueError("Smooth factor above 1")
if (balancef > 1 or balancef < 0): # raise exception if balancef outside 0-1
raise ValueError("Balance factor outside range")
if (H1 <= 0): # raise error if hop size 0 or negative
raise ValueError("Hop size (H1) smaller or equal to 0")
M1 = w1.size # size of analysis window
hM1_1 = int(math.floor((M1+1)/2)) # half analysis window size by rounding
hM1_2 = int(math.floor(M1/2)) # half analysis window size by floor
L = int(x1.size/H1) # number of frames for x1
x1 = np.append(np.zeros(hM1_2),x1) # add zeros at beginning to center first window at sample 0
x1 = np.append(x1,np.zeros(hM1_1)) # add zeros at the end to analyze last sample
pin1 = hM1_1 # initialize sound pointer in middle of analysis window
w1 = w1 / sum(w1) # normalize analysis window
M2 = w2.size # size of analysis window
hM2_1 = int(math.floor((M2+1)/2)) # half analysis window size by rounding
hM2_2 = int(math.floor(M2/2)) # half analysis window size by floor2
H2 = int(x2.size/L) # hop size for second sound
x2 = np.append(np.zeros(hM2_2),x2) # add zeros at beginning to center first window at sample 0
x2 = np.append(x2,np.zeros(hM2_1)) # add zeros at the end to analyze last sample
pin2 = hM2_1 # initialize sound pointer in middle of analysis window
y = np.zeros(x1.size) # initialize output array
for l in range(L):
#-----analysis-----
mX1, pX1 = DFT.dftAnal(x1[pin1-hM1_1:pin1+hM1_2], w1, N1) # compute dft
mX2, pX2 = DFT.dftAnal(x2[pin2-hM2_1:pin2+hM2_2], w2, N2) # compute dft
#-----transformation-----
mX2smooth = resample(np.maximum(-200, mX2), mX2.size*smoothf) # smooth spectrum of second sound
mX2 = resample(mX2smooth, mX1.size) # generate back the same size spectrum
mY = balancef * mX2 + (1-balancef) * mX1 # generate output spectrum
#-----synthesis-----
y[pin1-hM1_1:pin1+hM1_2] += H1*DFT.dftSynth(mY, pX1, M1) # overlap-add to generate output sound
pin1 += H1 # advance sound pointer
pin2 += H2 # advance sound pointer
y = np.delete(y, range(hM1_2)) # delete half of first window which was added in stftAnal
y = np.delete(y, range(y.size-hM1_1, y.size)) # add zeros at the end to analyze last sample
return y
| agpl-3.0 |
igemsoftware/SYSU-Software2013 | project/Python27/Tools/scripts/texi2html.py | 46 | 69824 | #! /usr/bin/env python
# Convert GNU texinfo files into HTML, one file per node.
# Based on Texinfo 2.14.
# Usage: texi2html [-d] [-d] [-c] inputfile outputdirectory
# The input file must be a complete texinfo file, e.g. emacs.texi.
# This creates many files (one per info node) in the output directory,
# overwriting existing files of the same name. All files created have
# ".html" as their extension.
# XXX To do:
# - handle @comment*** correctly
# - handle @xref {some words} correctly
# - handle @ftable correctly (items aren't indexed?)
# - handle @itemx properly
# - handle @exdent properly
# - add links directly to the proper line from indices
# - check against the definitive list of @-cmds; we still miss (among others):
# - @defindex (hard)
# - @c(omment) in the middle of a line (rarely used)
# - @this* (not really needed, only used in headers anyway)
# - @today{} (ever used outside title page?)
# More consistent handling of chapters/sections/etc.
# Lots of documentation
# Many more options:
# -top designate top node
# -links customize which types of links are included
# -split split at chapters or sections instead of nodes
# -name Allow different types of filename handling. Non unix systems
# will have problems with long node names
# ...
# Support the most recent texinfo version and take a good look at HTML 3.0
# More debugging output (customizable) and more flexible error handling
# How about icons ?
# rpyron 2002-05-07
# Robert Pyron <[email protected]>
# 1. BUGFIX: In function makefile(), strip blanks from the nodename.
# This is necessary to match the behavior of parser.makeref() and
# parser.do_node().
# 2. BUGFIX fixed KeyError in end_ifset (well, I may have just made
# it go away, rather than fix it)
# 3. BUGFIX allow @menu and menu items inside @ifset or @ifclear
# 4. Support added for:
# @uref URL reference
# @image image file reference (see note below)
# @multitable output an HTML table
# @vtable
# 5. Partial support for accents, to match MAKEINFO output
# 6. I added a new command-line option, '-H basename', to specify
# HTML Help output. This will cause three files to be created
# in the current directory:
# `basename`.hhp HTML Help Workshop project file
# `basename`.hhc Contents file for the project
# `basename`.hhk Index file for the project
# When fed into HTML Help Workshop, the resulting file will be
# named `basename`.chm.
# 7. A new class, HTMLHelp, to accomplish item 6.
# 8. Various calls to HTMLHelp functions.
# A NOTE ON IMAGES: Just as 'outputdirectory' must exist before
# running this program, all referenced images must already exist
# in outputdirectory.
import os
import sys
import string
import re
MAGIC = '\\input texinfo'
cmprog = re.compile('^@([a-z]+)([ \t]|$)') # Command (line-oriented)
blprog = re.compile('^[ \t]*$') # Blank line
kwprog = re.compile('@[a-z]+') # Keyword (embedded, usually
# with {} args)
spprog = re.compile('[\n@{}&<>]') # Special characters in
# running text
#
# menu item (Yuck!)
miprog = re.compile('^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*')
# 0 1 1 2 3 34 42 0
# ----- ---------- ---------
# -|-----------------------------
# -----------------------------------------------------
class HTMLNode:
"""Some of the parser's functionality is separated into this class.
A Node accumulates its contents, takes care of links to other Nodes
and saves itself when it is finished and all links are resolved.
"""
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">'
type = 0
cont = ''
epilogue = '</BODY></HTML>\n'
def __init__(self, dir, name, topname, title, next, prev, up):
self.dirname = dir
self.name = name
if topname:
self.topname = topname
else:
self.topname = name
self.title = title
self.next = next
self.prev = prev
self.up = up
self.lines = []
def write(self, *lines):
map(self.lines.append, lines)
def flush(self):
fp = open(self.dirname + '/' + makefile(self.name), 'w')
fp.write(self.prologue)
fp.write(self.text)
fp.write(self.epilogue)
fp.close()
def link(self, label, nodename, rel=None, rev=None):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
title = ''
else:
addr = makefile(nodename)
title = ' TITLE="%s"' % nodename
self.write(label, ': <A HREF="', addr, '"', \
rel and (' REL=' + rel) or "", \
rev and (' REV=' + rev) or "", \
title, '>', nodename, '</A> \n')
def finalize(self):
length = len(self.lines)
self.text = ''.join(self.lines)
self.lines = []
self.open_links()
self.output_links()
self.close_links()
links = ''.join(self.lines)
self.lines = []
self.prologue = (
self.DOCTYPE +
'\n<HTML><HEAD>\n'
' <!-- Converted with texi2html and Python -->\n'
' <TITLE>' + self.title + '</TITLE>\n'
' <LINK REL=Next HREF="'
+ makefile(self.next) + '" TITLE="' + self.next + '">\n'
' <LINK REL=Previous HREF="'
+ makefile(self.prev) + '" TITLE="' + self.prev + '">\n'
' <LINK REL=Up HREF="'
+ makefile(self.up) + '" TITLE="' + self.up + '">\n'
'</HEAD><BODY>\n' +
links)
if length > 20:
self.epilogue = '<P>\n%s</BODY></HTML>\n' % links
def open_links(self):
self.write('<HR>\n')
def close_links(self):
self.write('<HR>\n')
def output_links(self):
if self.cont != self.next:
self.link(' Cont', self.cont)
self.link(' Next', self.next, rel='Next')
self.link(' Prev', self.prev, rel='Previous')
self.link(' Up', self.up, rel='Up')
if self.name <> self.topname:
self.link(' Top', self.topname)
class HTML3Node(HTMLNode):
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML Level 3//EN//3.0">'
def open_links(self):
self.write('<DIV CLASS=Navigation>\n <HR>\n')
def close_links(self):
self.write(' <HR>\n</DIV>\n')
class TexinfoParser:
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "(%(id)s)"
FN_SOURCE_PATTERN = '<A NAME=footnoteref%(id)s' \
' HREF="#footnotetext%(id)s">' \
+ FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<A NAME=footnotetext%(id)s' \
' HREF="#footnoteref%(id)s">' \
+ FN_ID_PATTERN + '</A>\n%(text)s<P>\n'
FN_HEADER = '\n<P>\n<HR NOSHADE SIZE=1 WIDTH=200>\n' \
'<STRONG><EM>Footnotes</EM></STRONG>\n<P>'
Node = HTMLNode
# Initialize an instance
def __init__(self):
self.unknown = {} # statistics about unknown @-commands
self.filenames = {} # Check for identical filenames
self.debugging = 0 # larger values produce more output
self.print_headers = 0 # always print headers?
self.nodefp = None # open file we're writing to
self.nodelineno = 0 # Linenumber relative to node
self.links = None # Links from current node
self.savetext = None # If not None, save text head instead
self.savestack = [] # If not None, save text head instead
self.htmlhelp = None # html help data
self.dirname = 'tmp' # directory where files are created
self.includedir = '.' # directory to search @include files
self.nodename = '' # name of current node
self.topname = '' # name of top node (first node seen)
self.title = '' # title of this whole Texinfo tree
self.resetindex() # Reset all indices
self.contents = [] # Reset table of contents
self.numbering = [] # Reset section numbering counters
self.nofill = 0 # Normal operation: fill paragraphs
self.values={'html': 1} # Names that should be parsed in ifset
self.stackinfo={} # Keep track of state in the stack
# XXX The following should be reset per node?!
self.footnotes = [] # Reset list of footnotes
self.itemarg = None # Reset command used by @item
self.itemnumber = None # Reset number for @item in @enumerate
self.itemindex = None # Reset item index name
self.node = None
self.nodestack = []
self.cont = 0
self.includedepth = 0
# Set htmlhelp helper class
def sethtmlhelp(self, htmlhelp):
self.htmlhelp = htmlhelp
# Set (output) directory name
def setdirname(self, dirname):
self.dirname = dirname
# Set include directory name
def setincludedir(self, includedir):
self.includedir = includedir
# Parse the contents of an entire file
def parse(self, fp):
line = fp.readline()
lineno = 1
while line and (line[0] == '%' or blprog.match(line)):
line = fp.readline()
lineno = lineno + 1
if line[:len(MAGIC)] <> MAGIC:
raise SyntaxError, 'file does not begin with %r' % (MAGIC,)
self.parserest(fp, lineno)
# Parse the contents of a file, not expecting a MAGIC header
def parserest(self, fp, initial_lineno):
lineno = initial_lineno
self.done = 0
self.skip = 0
self.stack = []
accu = []
while not self.done:
line = fp.readline()
self.nodelineno = self.nodelineno + 1
if not line:
if accu:
if not self.skip: self.process(accu)
accu = []
if initial_lineno > 0:
print '*** EOF before @bye'
break
lineno = lineno + 1
mo = cmprog.match(line)
if mo:
a, b = mo.span(1)
cmd = line[a:b]
if cmd in ('noindent', 'refill'):
accu.append(line)
else:
if accu:
if not self.skip:
self.process(accu)
accu = []
self.command(line, mo)
elif blprog.match(line) and \
'format' not in self.stack and \
'example' not in self.stack:
if accu:
if not self.skip:
self.process(accu)
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
accu = []
else:
# Append the line including trailing \n!
accu.append(line)
#
if self.skip:
print '*** Still skipping at the end'
if self.stack:
print '*** Stack not empty at the end'
print '***', self.stack
if self.includedepth == 0:
while self.nodestack:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
# Start saving text in a buffer instead of writing it to a file
def startsaving(self):
if self.savetext <> None:
self.savestack.append(self.savetext)
# print '*** Recursively saving text, expect trouble'
self.savetext = ''
# Return the text saved so far and start writing to file again
def collectsavings(self):
savetext = self.savetext
if len(self.savestack) > 0:
self.savetext = self.savestack[-1]
del self.savestack[-1]
else:
self.savetext = None
return savetext or ''
# Write text to file, or save it in a buffer, or ignore it
def write(self, *args):
try:
text = ''.join(args)
except:
print args
raise TypeError
if self.savetext <> None:
self.savetext = self.savetext + text
elif self.nodefp:
self.nodefp.write(text)
elif self.node:
self.node.write(text)
# Complete the current node -- write footnotes and close file
def endnode(self):
if self.savetext <> None:
print '*** Still saving text at end of node'
dummy = self.collectsavings()
if self.footnotes:
self.writefootnotes()
if self.nodefp:
if self.nodelineno > 20:
self.write('<HR>\n')
[name, next, prev, up] = self.nodelinks[:4]
self.link('Next', next)
self.link('Prev', prev)
self.link('Up', up)
if self.nodename <> self.topname:
self.link('Top', self.topname)
self.write('<HR>\n')
self.write('</BODY>\n')
self.nodefp.close()
self.nodefp = None
elif self.node:
if not self.cont and \
(not self.node.type or \
(self.node.next and self.node.prev and self.node.up)):
self.node.finalize()
self.node.flush()
else:
self.nodestack.append(self.node)
self.node = None
self.nodename = ''
# Process a list of lines, expanding embedded @-commands
# This mostly distinguishes between menus and normal text
def process(self, accu):
if self.debugging > 1:
print '!'*self.debugging, 'process:', self.skip, self.stack,
if accu: print accu[0][:30],
if accu[0][30:] or accu[1:]: print '...',
print
if self.inmenu():
# XXX should be done differently
for line in accu:
mo = miprog.match(line)
if not mo:
line = line.strip() + '\n'
self.expand(line)
continue
bgn, end = mo.span(0)
a, b = mo.span(1)
c, d = mo.span(2)
e, f = mo.span(3)
g, h = mo.span(4)
label = line[a:b]
nodename = line[c:d]
if nodename[0] == ':': nodename = label
else: nodename = line[e:f]
punct = line[g:h]
self.write(' <LI><A HREF="',
makefile(nodename),
'">', nodename,
'</A>', punct, '\n')
self.htmlhelp.menuitem(nodename)
self.expand(line[end:])
else:
text = ''.join(accu)
self.expand(text)
# find 'menu' (we might be inside 'ifset' or 'ifclear')
def inmenu(self):
#if 'menu' in self.stack:
# print 'inmenu :', self.skip, self.stack, self.stackinfo
stack = self.stack
while stack and stack[-1] in ('ifset','ifclear'):
try:
if self.stackinfo[len(stack)]:
return 0
except KeyError:
pass
stack = stack[:-1]
return (stack and stack[-1] == 'menu')
# Write a string, expanding embedded @-commands
def expand(self, text):
stack = []
i = 0
n = len(text)
while i < n:
start = i
mo = spprog.search(text, i)
if mo:
i = mo.start()
else:
self.write(text[start:])
break
self.write(text[start:i])
c = text[i]
i = i+1
if c == '\n':
self.write('\n')
continue
if c == '<':
self.write('<')
continue
if c == '>':
self.write('>')
continue
if c == '&':
self.write('&')
continue
if c == '{':
stack.append('')
continue
if c == '}':
if not stack:
print '*** Unmatched }'
self.write('}')
continue
cmd = stack[-1]
del stack[-1]
try:
method = getattr(self, 'close_' + cmd)
except AttributeError:
self.unknown_close(cmd)
continue
method()
continue
if c <> '@':
# Cannot happen unless spprog is changed
raise RuntimeError, 'unexpected funny %r' % c
start = i
while i < n and text[i] in string.ascii_letters: i = i+1
if i == start:
# @ plus non-letter: literal next character
i = i+1
c = text[start:i]
if c == ':':
# `@:' means no extra space after
# preceding `.', `?', `!' or `:'
pass
else:
# `@.' means a sentence-ending period;
# `@@', `@{', `@}' quote `@', `{', `}'
self.write(c)
continue
cmd = text[start:i]
if i < n and text[i] == '{':
i = i+1
stack.append(cmd)
try:
method = getattr(self, 'open_' + cmd)
except AttributeError:
self.unknown_open(cmd)
continue
method()
continue
try:
method = getattr(self, 'handle_' + cmd)
except AttributeError:
self.unknown_handle(cmd)
continue
method()
if stack:
print '*** Stack not empty at para:', stack
# --- Handle unknown embedded @-commands ---
def unknown_open(self, cmd):
print '*** No open func for @' + cmd + '{...}'
cmd = cmd + '{'
self.write('@', cmd)
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_close(self, cmd):
print '*** No close func for @' + cmd + '{...}'
cmd = '}' + cmd
self.write('}')
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_handle(self, cmd):
print '*** No handler for @' + cmd
self.write('@', cmd)
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# XXX The following sections should be ordered as the texinfo docs
# --- Embedded @-commands without {} argument list --
def handle_noindent(self): pass
def handle_refill(self): pass
# --- Include file handling ---
def do_include(self, args):
file = args
file = os.path.join(self.includedir, file)
try:
fp = open(file, 'r')
except IOError, msg:
print '*** Can\'t open include file', repr(file)
return
print '!'*self.debugging, '--> file', repr(file)
save_done = self.done
save_skip = self.skip
save_stack = self.stack
self.includedepth = self.includedepth + 1
self.parserest(fp, 0)
self.includedepth = self.includedepth - 1
fp.close()
self.done = save_done
self.skip = save_skip
self.stack = save_stack
print '!'*self.debugging, '<-- file', repr(file)
# --- Special Insertions ---
def open_dmn(self): pass
def close_dmn(self): pass
def open_dots(self): self.write('...')
def close_dots(self): pass
def open_bullet(self): pass
def close_bullet(self): pass
def open_TeX(self): self.write('TeX')
def close_TeX(self): pass
def handle_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def open_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def close_copyright(self): pass
def open_minus(self): self.write('-')
def close_minus(self): pass
# --- Accents ---
# rpyron 2002-05-07
# I would like to do at least as well as makeinfo when
# it is producing HTML output:
#
# input output
# @"o @"o umlaut accent
# @'o 'o acute accent
# @,{c} @,{c} cedilla accent
# @=o @=o macron/overbar accent
# @^o @^o circumflex accent
# @`o `o grave accent
# @~o @~o tilde accent
# @dotaccent{o} @dotaccent{o} overdot accent
# @H{o} @H{o} long Hungarian umlaut
# @ringaccent{o} @ringaccent{o} ring accent
# @tieaccent{oo} @tieaccent{oo} tie-after accent
# @u{o} @u{o} breve accent
# @ubaraccent{o} @ubaraccent{o} underbar accent
# @udotaccent{o} @udotaccent{o} underdot accent
# @v{o} @v{o} hacek or check accent
# @exclamdown{} ¡ upside-down !
# @questiondown{} ¿ upside-down ?
# @aa{},@AA{} å,Å a,A with circle
# @ae{},@AE{} æ,Æ ae,AE ligatures
# @dotless{i} @dotless{i} dotless i
# @dotless{j} @dotless{j} dotless j
# @l{},@L{} l/,L/ suppressed-L,l
# @o{},@O{} ø,Ø O,o with slash
# @oe{},@OE{} oe,OE oe,OE ligatures
# @ss{} ß es-zet or sharp S
#
# The following character codes and approximations have been
# copied from makeinfo's HTML output.
def open_exclamdown(self): self.write('¡') # upside-down !
def close_exclamdown(self): pass
def open_questiondown(self): self.write('¿') # upside-down ?
def close_questiondown(self): pass
def open_aa(self): self.write('å') # a with circle
def close_aa(self): pass
def open_AA(self): self.write('Å') # A with circle
def close_AA(self): pass
def open_ae(self): self.write('æ') # ae ligatures
def close_ae(self): pass
def open_AE(self): self.write('Æ') # AE ligatures
def close_AE(self): pass
def open_o(self): self.write('ø') # o with slash
def close_o(self): pass
def open_O(self): self.write('Ø') # O with slash
def close_O(self): pass
def open_ss(self): self.write('ß') # es-zet or sharp S
def close_ss(self): pass
def open_oe(self): self.write('oe') # oe ligatures
def close_oe(self): pass
def open_OE(self): self.write('OE') # OE ligatures
def close_OE(self): pass
def open_l(self): self.write('l/') # suppressed-l
def close_l(self): pass
def open_L(self): self.write('L/') # suppressed-L
def close_L(self): pass
# --- Special Glyphs for Examples ---
def open_result(self): self.write('=>')
def close_result(self): pass
def open_expansion(self): self.write('==>')
def close_expansion(self): pass
def open_print(self): self.write('-|')
def close_print(self): pass
def open_error(self): self.write('error-->')
def close_error(self): pass
def open_equiv(self): self.write('==')
def close_equiv(self): pass
def open_point(self): self.write('-!-')
def close_point(self): pass
# --- Cross References ---
def open_pxref(self):
self.write('see ')
self.startsaving()
def close_pxref(self):
self.makeref()
def open_xref(self):
self.write('See ')
self.startsaving()
def close_xref(self):
self.makeref()
def open_ref(self):
self.startsaving()
def close_ref(self):
self.makeref()
def open_inforef(self):
self.write('See info file ')
self.startsaving()
def close_inforef(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 3: args.append('')
node = args[0]
file = args[2]
self.write('`', file, '\', node `', node, '\'')
def makeref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
nodename = label = args[0]
if args[2]: label = args[2]
file = args[3]
title = args[4]
href = makefile(nodename)
if file:
href = '../' + file + '/' + href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 uref support
def open_uref(self):
self.startsaving()
def close_uref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 2: args.append('')
href = args[0]
label = args[1]
if not label: label = href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 image support
# GNU makeinfo producing HTML output tries `filename.png'; if
# that does not exist, it tries `filename.jpg'. If that does
# not exist either, it complains. GNU makeinfo does not handle
# GIF files; however, I include GIF support here because
# MySQL documentation uses GIF files.
def open_image(self):
self.startsaving()
def close_image(self):
self.makeimage()
def makeimage(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
filename = args[0]
width = args[1]
height = args[2]
alt = args[3]
ext = args[4]
# The HTML output will have a reference to the image
# that is relative to the HTML output directory,
# which is what 'filename' gives us. However, we need
# to find it relative to our own current directory,
# so we construct 'imagename'.
imagelocation = self.dirname + '/' + filename
if os.path.exists(imagelocation+'.png'):
filename += '.png'
elif os.path.exists(imagelocation+'.jpg'):
filename += '.jpg'
elif os.path.exists(imagelocation+'.gif'): # MySQL uses GIF files
filename += '.gif'
else:
print "*** Cannot find image " + imagelocation
#TODO: what is 'ext'?
self.write('<IMG SRC="', filename, '"', \
width and (' WIDTH="' + width + '"') or "", \
height and (' HEIGHT="' + height + '"') or "", \
alt and (' ALT="' + alt + '"') or "", \
'/>' )
self.htmlhelp.addimage(imagelocation)
# --- Marking Words and Phrases ---
# --- Other @xxx{...} commands ---
def open_(self): pass # Used by {text enclosed in braces}
def close_(self): pass
open_asis = open_
close_asis = close_
def open_cite(self): self.write('<CITE>')
def close_cite(self): self.write('</CITE>')
def open_code(self): self.write('<CODE>')
def close_code(self): self.write('</CODE>')
def open_t(self): self.write('<TT>')
def close_t(self): self.write('</TT>')
def open_dfn(self): self.write('<DFN>')
def close_dfn(self): self.write('</DFN>')
def open_emph(self): self.write('<EM>')
def close_emph(self): self.write('</EM>')
def open_i(self): self.write('<I>')
def close_i(self): self.write('</I>')
def open_footnote(self):
# if self.savetext <> None:
# print '*** Recursive footnote -- expect weirdness'
id = len(self.footnotes) + 1
self.write(self.FN_SOURCE_PATTERN % {'id': repr(id)})
self.startsaving()
def close_footnote(self):
id = len(self.footnotes) + 1
self.footnotes.append((id, self.collectsavings()))
def writefootnotes(self):
self.write(self.FN_HEADER)
for id, text in self.footnotes:
self.write(self.FN_TARGET_PATTERN
% {'id': repr(id), 'text': text})
self.footnotes = []
def open_file(self): self.write('<CODE>')
def close_file(self): self.write('</CODE>')
def open_kbd(self): self.write('<KBD>')
def close_kbd(self): self.write('</KBD>')
def open_key(self): self.write('<KEY>')
def close_key(self): self.write('</KEY>')
def open_r(self): self.write('<R>')
def close_r(self): self.write('</R>')
def open_samp(self): self.write('`<SAMP>')
def close_samp(self): self.write('</SAMP>\'')
def open_sc(self): self.write('<SMALLCAPS>')
def close_sc(self): self.write('</SMALLCAPS>')
def open_strong(self): self.write('<STRONG>')
def close_strong(self): self.write('</STRONG>')
def open_b(self): self.write('<B>')
def close_b(self): self.write('</B>')
def open_var(self): self.write('<VAR>')
def close_var(self): self.write('</VAR>')
def open_w(self): self.write('<NOBREAK>')
def close_w(self): self.write('</NOBREAK>')
def open_url(self): self.startsaving()
def close_url(self):
text = self.collectsavings()
self.write('<A HREF="', text, '">', text, '</A>')
def open_email(self): self.startsaving()
def close_email(self):
text = self.collectsavings()
self.write('<A HREF="mailto:', text, '">', text, '</A>')
open_titlefont = open_
close_titlefont = close_
def open_small(self): pass
def close_small(self): pass
def command(self, line, mo):
a, b = mo.span(1)
cmd = line[a:b]
args = line[b:].strip()
if self.debugging > 1:
print '!'*self.debugging, 'command:', self.skip, self.stack, \
'@' + cmd, args
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
try:
func = getattr(self, 'bgn_' + cmd)
except AttributeError:
# don't complain if we are skipping anyway
if not self.skip:
self.unknown_cmd(cmd, args)
return
self.stack.append(cmd)
func(args)
return
if not self.skip or cmd == 'end':
func(args)
def unknown_cmd(self, cmd, args):
print '*** unknown', '@' + cmd, args
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def do_end(self, args):
words = args.split()
if not words:
print '*** @end w/o args'
else:
cmd = words[0]
if not self.stack or self.stack[-1] <> cmd:
print '*** @end', cmd, 'unexpected'
else:
del self.stack[-1]
try:
func = getattr(self, 'end_' + cmd)
except AttributeError:
self.unknown_end(cmd)
return
func()
def unknown_end(self, cmd):
cmd = 'end ' + cmd
print '*** unknown', '@' + cmd
if not self.unknown.has_key(cmd):
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# --- Comments ---
def do_comment(self, args): pass
do_c = do_comment
# --- Conditional processing ---
def bgn_ifinfo(self, args): pass
def end_ifinfo(self): pass
def bgn_iftex(self, args): self.skip = self.skip + 1
def end_iftex(self): self.skip = self.skip - 1
def bgn_ignore(self, args): self.skip = self.skip + 1
def end_ignore(self): self.skip = self.skip - 1
def bgn_tex(self, args): self.skip = self.skip + 1
def end_tex(self): self.skip = self.skip - 1
def do_set(self, args):
fields = args.split(' ')
key = fields[0]
if len(fields) == 1:
value = 1
else:
value = ' '.join(fields[1:])
self.values[key] = value
def do_clear(self, args):
self.values[args] = None
def bgn_ifset(self, args):
if args not in self.values.keys() \
or self.values[args] is None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifset(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print '*** end_ifset: KeyError :', len(self.stack) + 1
def bgn_ifclear(self, args):
if args in self.values.keys() \
and self.values[args] is not None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifclear(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print '*** end_ifclear: KeyError :', len(self.stack) + 1
def open_value(self):
self.startsaving()
def close_value(self):
key = self.collectsavings()
if key in self.values.keys():
self.write(self.values[key])
else:
print '*** Undefined value: ', key
# --- Beginning a file ---
do_finalout = do_comment
do_setchapternewpage = do_comment
do_setfilename = do_comment
def do_settitle(self, args):
self.startsaving()
self.expand(args)
self.title = self.collectsavings()
def do_parskip(self, args): pass
# --- Ending a file ---
def do_bye(self, args):
self.endnode()
self.done = 1
# --- Title page ---
def bgn_titlepage(self, args): self.skip = self.skip + 1
def end_titlepage(self): self.skip = self.skip - 1
def do_shorttitlepage(self, args): pass
def do_center(self, args):
# Actually not used outside title page...
self.write('<H1>')
self.expand(args)
self.write('</H1>\n')
do_title = do_center
do_subtitle = do_center
do_author = do_center
do_vskip = do_comment
do_vfill = do_comment
do_smallbook = do_comment
do_paragraphindent = do_comment
do_setchapternewpage = do_comment
do_headings = do_comment
do_footnotestyle = do_comment
do_evenheading = do_comment
do_evenfooting = do_comment
do_oddheading = do_comment
do_oddfooting = do_comment
do_everyheading = do_comment
do_everyfooting = do_comment
# --- Nodes ---
def do_node(self, args):
self.endnode()
self.nodelineno = 0
parts = [s.strip() for s in args.split(',')]
while len(parts) < 4: parts.append('')
self.nodelinks = parts
[name, next, prev, up] = parts[:4]
file = self.dirname + '/' + makefile(name)
if self.filenames.has_key(file):
print '*** Filename already in use: ', file
else:
if self.debugging: print '!'*self.debugging, '--- writing', file
self.filenames[file] = 1
# self.nodefp = open(file, 'w')
self.nodename = name
if self.cont and self.nodestack:
self.nodestack[-1].cont = self.nodename
if not self.topname: self.topname = name
title = name
if self.title: title = title + ' -- ' + self.title
self.node = self.Node(self.dirname, self.nodename, self.topname,
title, next, prev, up)
self.htmlhelp.addnode(self.nodename,next,prev,up,file)
def link(self, label, nodename):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
else:
addr = makefile(nodename)
self.write(label, ': <A HREF="', addr, '" TYPE="',
label, '">', nodename, '</A> \n')
# --- Sectioning commands ---
def popstack(self, type):
if (self.node):
self.node.type = type
while self.nodestack:
if self.nodestack[-1].type > type:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
elif self.nodestack[-1].type == type:
if not self.nodestack[-1].next:
self.nodestack[-1].next = self.node.name
if not self.node.prev:
self.node.prev = self.nodestack[-1].name
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
else:
if type > 1 and not self.node.up:
self.node.up = self.nodestack[-1].name
break
def do_chapter(self, args):
self.heading('H1', args, 0)
self.popstack(1)
def do_unnumbered(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_appendix(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_top(self, args):
self.heading('H1', args, -1)
def do_chapheading(self, args):
self.heading('H1', args, -1)
def do_majorheading(self, args):
self.heading('H1', args, -1)
def do_section(self, args):
self.heading('H1', args, 1)
self.popstack(2)
def do_unnumberedsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
def do_appendixsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
do_appendixsection = do_appendixsec
def do_heading(self, args):
self.heading('H1', args, -1)
def do_subsection(self, args):
self.heading('H2', args, 2)
self.popstack(3)
def do_unnumberedsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_appendixsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_subheading(self, args):
self.heading('H2', args, -1)
def do_subsubsection(self, args):
self.heading('H3', args, 3)
self.popstack(4)
def do_unnumberedsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_appendixsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_subsubheading(self, args):
self.heading('H3', args, -1)
def heading(self, type, args, level):
if level >= 0:
while len(self.numbering) <= level:
self.numbering.append(0)
del self.numbering[level+1:]
self.numbering[level] = self.numbering[level] + 1
x = ''
for i in self.numbering:
x = x + repr(i) + '.'
args = x + ' ' + args
self.contents.append((level, args, self.nodename))
self.write('<', type, '>')
self.expand(args)
self.write('</', type, '>\n')
if self.debugging or self.print_headers:
print '---', args
def do_contents(self, args):
# pass
self.listcontents('Table of Contents', 999)
def do_shortcontents(self, args):
pass
# self.listcontents('Short Contents', 0)
do_summarycontents = do_shortcontents
def listcontents(self, title, maxlevel):
self.write('<H1>', title, '</H1>\n<UL COMPACT PLAIN>\n')
prevlevels = [0]
for level, title, node in self.contents:
if level > maxlevel:
continue
if level > prevlevels[-1]:
# can only advance one level at a time
self.write(' '*prevlevels[-1], '<UL PLAIN>\n')
prevlevels.append(level)
elif level < prevlevels[-1]:
# might drop back multiple levels
while level < prevlevels[-1]:
del prevlevels[-1]
self.write(' '*prevlevels[-1],
'</UL>\n')
self.write(' '*level, '<LI> <A HREF="',
makefile(node), '">')
self.expand(title)
self.write('</A>\n')
self.write('</UL>\n' * len(prevlevels))
# --- Page lay-out ---
# These commands are only meaningful in printed text
def do_page(self, args): pass
def do_need(self, args): pass
def bgn_group(self, args): pass
def end_group(self): pass
# --- Line lay-out ---
def do_sp(self, args):
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
def do_hline(self, args):
self.write('<HR>')
# --- Function and variable definitions ---
def bgn_deffn(self, args):
self.write('<DL>')
self.do_deffnx(args)
def end_deffn(self):
self.write('</DL>\n')
def do_deffnx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_defun(self, args): self.bgn_deffn('Function ' + args)
end_defun = end_deffn
def do_defunx(self, args): self.do_deffnx('Function ' + args)
def bgn_defmac(self, args): self.bgn_deffn('Macro ' + args)
end_defmac = end_deffn
def do_defmacx(self, args): self.do_deffnx('Macro ' + args)
def bgn_defspec(self, args): self.bgn_deffn('{Special Form} ' + args)
end_defspec = end_deffn
def do_defspecx(self, args): self.do_deffnx('{Special Form} ' + args)
def bgn_defvr(self, args):
self.write('<DL>')
self.do_defvrx(args)
end_defvr = end_deffn
def do_defvrx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@code{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('vr', name)
def bgn_defvar(self, args): self.bgn_defvr('Variable ' + args)
end_defvar = end_defvr
def do_defvarx(self, args): self.do_defvrx('Variable ' + args)
def bgn_defopt(self, args): self.bgn_defvr('{User Option} ' + args)
end_defopt = end_defvr
def do_defoptx(self, args): self.do_defvrx('{User Option} ' + args)
# --- Ditto for typed languages ---
def bgn_deftypefn(self, args):
self.write('<DL>')
self.do_deftypefnx(args)
end_deftypefn = end_deffn
def do_deftypefnx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypefun(self, args): self.bgn_deftypefn('Function ' + args)
end_deftypefun = end_deftypefn
def do_deftypefunx(self, args): self.do_deftypefnx('Function ' + args)
def bgn_deftypevr(self, args):
self.write('<DL>')
self.do_deftypevrx(args)
end_deftypevr = end_deftypefn
def do_deftypevrx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypevar(self, args):
self.bgn_deftypevr('Variable ' + args)
end_deftypevar = end_deftypevr
def do_deftypevarx(self, args):
self.do_deftypevrx('Variable ' + args)
# --- Ditto for object-oriented languages ---
def bgn_defcv(self, args):
self.write('<DL>')
self.do_defcvx(args)
end_defcv = end_deftypevr
def do_defcvx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('vr', '%s @r{on %s}' % (name, classname))
def bgn_defivar(self, args):
self.bgn_defcv('{Instance Variable} ' + args)
end_defivar = end_defcv
def do_defivarx(self, args):
self.do_defcvx('{Instance Variable} ' + args)
def bgn_defop(self, args):
self.write('<DL>')
self.do_defopx(args)
end_defop = end_defcv
def do_defopx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('fn', '%s @r{on %s}' % (name, classname))
def bgn_defmethod(self, args):
self.bgn_defop('Method ' + args)
end_defmethod = end_defop
def do_defmethodx(self, args):
self.do_defopx('Method ' + args)
# --- Ditto for data types ---
def bgn_deftp(self, args):
self.write('<DL>')
self.do_deftpx(args)
end_deftp = end_defcv
def do_deftpx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('tp', name)
# --- Making Lists and Tables
def bgn_enumerate(self, args):
if not args:
self.write('<OL>\n')
self.stackinfo[len(self.stack)] = '</OL>\n'
else:
self.itemnumber = args
self.write('<UL>\n')
self.stackinfo[len(self.stack)] = '</UL>\n'
def end_enumerate(self):
self.itemnumber = None
self.write(self.stackinfo[len(self.stack) + 1])
del self.stackinfo[len(self.stack) + 1]
def bgn_itemize(self, args):
self.itemarg = args
self.write('<UL>\n')
def end_itemize(self):
self.itemarg = None
self.write('</UL>\n')
def bgn_table(self, args):
self.itemarg = args
self.write('<DL>\n')
def end_table(self):
self.itemarg = None
self.write('</DL>\n')
def bgn_ftable(self, args):
self.itemindex = 'fn'
self.bgn_table(args)
def end_ftable(self):
self.itemindex = None
self.end_table()
def bgn_vtable(self, args):
self.itemindex = 'vr'
self.bgn_table(args)
def end_vtable(self):
self.itemindex = None
self.end_table()
def do_item(self, args):
if self.itemindex: self.index(self.itemindex, args)
if self.itemarg:
if self.itemarg[0] == '@' and self.itemarg[1] and \
self.itemarg[1] in string.ascii_letters:
args = self.itemarg + '{' + args + '}'
else:
# some other character, e.g. '-'
args = self.itemarg + ' ' + args
if self.itemnumber <> None:
args = self.itemnumber + '. ' + args
self.itemnumber = increment(self.itemnumber)
if self.stack and self.stack[-1] == 'table':
self.write('<DT>')
self.expand(args)
self.write('\n<DD>')
elif self.stack and self.stack[-1] == 'multitable':
self.write('<TR><TD>')
self.expand(args)
self.write('</TD>\n</TR>\n')
else:
self.write('<LI>')
self.expand(args)
self.write(' ')
do_itemx = do_item # XXX Should suppress leading blank line
# rpyron 2002-05-07 multitable support
def bgn_multitable(self, args):
self.itemarg = None # should be handled by columnfractions
self.write('<TABLE BORDER="">\n')
def end_multitable(self):
self.itemarg = None
self.write('</TABLE>\n<BR>\n')
def handle_columnfractions(self):
# It would be better to handle this, but for now it's in the way...
self.itemarg = None
def handle_tab(self):
self.write('</TD>\n <TD>')
# --- Enumerations, displays, quotations ---
# XXX Most of these should increase the indentation somehow
def bgn_quotation(self, args): self.write('<BLOCKQUOTE>')
def end_quotation(self): self.write('</BLOCKQUOTE>\n')
def bgn_example(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>')
def end_example(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
bgn_lisp = bgn_example # Synonym when contents are executable lisp code
end_lisp = end_example
bgn_smallexample = bgn_example # XXX Should use smaller font
end_smallexample = end_example
bgn_smalllisp = bgn_lisp # Ditto
end_smalllisp = end_lisp
bgn_display = bgn_example
end_display = end_example
bgn_format = bgn_display
end_format = end_display
def do_exdent(self, args): self.expand(args + '\n')
# XXX Should really mess with indentation
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>\n')
def end_flushleft(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<DIR>\n')
self.write(' <STRONG><EM>Menu</EM></STRONG><P>\n')
self.htmlhelp.beginmenu()
def end_menu(self):
self.write('</DIR>\n')
self.htmlhelp.endmenu()
def bgn_cartouche(self, args): pass
def end_cartouche(self): pass
# --- Indices ---
def resetindex(self):
self.noncodeindices = ['cp']
self.indextitle = {}
self.indextitle['cp'] = 'Concept'
self.indextitle['fn'] = 'Function'
self.indextitle['ky'] = 'Keyword'
self.indextitle['pg'] = 'Program'
self.indextitle['tp'] = 'Type'
self.indextitle['vr'] = 'Variable'
#
self.whichindex = {}
for name in self.indextitle.keys():
self.whichindex[name] = []
def user_index(self, name, args):
if self.whichindex.has_key(name):
self.index(name, args)
else:
print '*** No index named', repr(name)
def do_cindex(self, args): self.index('cp', args)
def do_findex(self, args): self.index('fn', args)
def do_kindex(self, args): self.index('ky', args)
def do_pindex(self, args): self.index('pg', args)
def do_tindex(self, args): self.index('tp', args)
def do_vindex(self, args): self.index('vr', args)
def index(self, name, args):
self.whichindex[name].append((args, self.nodename))
self.htmlhelp.index(args, self.nodename)
def do_synindex(self, args):
words = args.split()
if len(words) <> 2:
print '*** bad @synindex', args
return
[old, new] = words
if not self.whichindex.has_key(old) or \
not self.whichindex.has_key(new):
print '*** bad key(s) in @synindex', args
return
if old <> new and \
self.whichindex[old] is not self.whichindex[new]:
inew = self.whichindex[new]
inew[len(inew):] = self.whichindex[old]
self.whichindex[old] = inew
do_syncodeindex = do_synindex # XXX Should use code font
def do_printindex(self, args):
words = args.split()
for name in words:
if self.whichindex.has_key(name):
self.prindex(name)
else:
print '*** No index named', repr(name)
def prindex(self, name):
iscodeindex = (name not in self.noncodeindices)
index = self.whichindex[name]
if not index: return
if self.debugging:
print '!'*self.debugging, '--- Generating', \
self.indextitle[name], 'index'
# The node already provides a title
index1 = []
junkprog = re.compile('^(@[a-z]+)?{')
for key, node in index:
sortkey = key.lower()
# Remove leading `@cmd{' from sort key
# -- don't bother about the matching `}'
oldsortkey = sortkey
while 1:
mo = junkprog.match(sortkey)
if not mo:
break
i = mo.end()
sortkey = sortkey[i:]
index1.append((sortkey, key, node))
del index[:]
index1.sort()
self.write('<DL COMPACT>\n')
prevkey = prevnode = None
for sortkey, key, node in index1:
if (key, node) == (prevkey, prevnode):
continue
if self.debugging > 1: print '!'*self.debugging, key, ':', node
self.write('<DT>')
if iscodeindex: key = '@code{' + key + '}'
if key != prevkey:
self.expand(key)
self.write('\n<DD><A HREF="%s">%s</A>\n' % (makefile(node), node))
prevkey, prevnode = key, node
self.write('</DL>\n')
# --- Final error reports ---
def report(self):
if self.unknown:
print '--- Unrecognized commands ---'
cmds = self.unknown.keys()
cmds.sort()
for cmd in cmds:
print cmd.ljust(20), self.unknown[cmd]
class TexinfoParserHTML3(TexinfoParser):
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "[%(id)s]"
FN_SOURCE_PATTERN = '<A ID=footnoteref%(id)s ' \
'HREF="#footnotetext%(id)s">' + FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<FN ID=footnotetext%(id)s>\n' \
'<P><A HREF="#footnoteref%(id)s">' + FN_ID_PATTERN \
+ '</A>\n%(text)s</P></FN>\n'
FN_HEADER = '<DIV CLASS=footnotes>\n <HR NOSHADE WIDTH=200>\n' \
' <STRONG><EM>Footnotes</EM></STRONG>\n <P>\n'
Node = HTML3Node
def bgn_quotation(self, args): self.write('<BQ>')
def end_quotation(self): self.write('</BQ>\n')
def bgn_example(self, args):
# this use of <CODE> would not be legal in HTML 2.0,
# but is in more recent DTDs.
self.nofill = self.nofill + 1
self.write('<PRE CLASS=example><CODE>')
def end_example(self):
self.write("</CODE></PRE>\n")
self.nofill = self.nofill - 1
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE CLASS=flushleft>\n')
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<DIV ALIGN=right CLASS=flushright><ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS></DIV>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<UL PLAIN CLASS=menu>\n')
self.write(' <LH>Menu</LH>\n')
def end_menu(self):
self.write('</UL>\n')
# rpyron 2002-05-07
class HTMLHelp:
"""
This class encapsulates support for HTML Help. Node names,
file names, menu items, index items, and image file names are
accumulated until a call to finalize(). At that time, three
output files are created in the current directory:
`helpbase`.hhp is a HTML Help Workshop project file.
It contains various information, some of
which I do not understand; I just copied
the default project info from a fresh
installation.
`helpbase`.hhc is the Contents file for the project.
`helpbase`.hhk is the Index file for the project.
When these files are used as input to HTML Help Workshop,
the resulting file will be named:
`helpbase`.chm
If none of the defaults in `helpbase`.hhp are changed,
the .CHM file will have Contents, Index, Search, and
Favorites tabs.
"""
codeprog = re.compile('@code{(.*?)}')
def __init__(self,helpbase,dirname):
self.helpbase = helpbase
self.dirname = dirname
self.projectfile = None
self.contentfile = None
self.indexfile = None
self.nodelist = []
self.nodenames = {} # nodename : index
self.nodeindex = {}
self.filenames = {} # filename : filename
self.indexlist = [] # (args,nodename) == (key,location)
self.current = ''
self.menudict = {}
self.dumped = {}
def addnode(self,name,next,prev,up,filename):
node = (name,next,prev,up,filename)
# add this file to dict
# retrieve list with self.filenames.values()
self.filenames[filename] = filename
# add this node to nodelist
self.nodeindex[name] = len(self.nodelist)
self.nodelist.append(node)
# set 'current' for menu items
self.current = name
self.menudict[self.current] = []
def menuitem(self,nodename):
menu = self.menudict[self.current]
menu.append(nodename)
def addimage(self,imagename):
self.filenames[imagename] = imagename
def index(self, args, nodename):
self.indexlist.append((args,nodename))
def beginmenu(self):
pass
def endmenu(self):
pass
def finalize(self):
if not self.helpbase:
return
# generate interesting filenames
resultfile = self.helpbase + '.chm'
projectfile = self.helpbase + '.hhp'
contentfile = self.helpbase + '.hhc'
indexfile = self.helpbase + '.hhk'
# generate a reasonable title
title = self.helpbase
# get the default topic file
(topname,topnext,topprev,topup,topfile) = self.nodelist[0]
defaulttopic = topfile
# PROJECT FILE
try:
fp = open(projectfile,'w')
print>>fp, '[OPTIONS]'
print>>fp, 'Auto Index=Yes'
print>>fp, 'Binary TOC=No'
print>>fp, 'Binary Index=Yes'
print>>fp, 'Compatibility=1.1'
print>>fp, 'Compiled file=' + resultfile + ''
print>>fp, 'Contents file=' + contentfile + ''
print>>fp, 'Default topic=' + defaulttopic + ''
print>>fp, 'Error log file=ErrorLog.log'
print>>fp, 'Index file=' + indexfile + ''
print>>fp, 'Title=' + title + ''
print>>fp, 'Display compile progress=Yes'
print>>fp, 'Full-text search=Yes'
print>>fp, 'Default window=main'
print>>fp, ''
print>>fp, '[WINDOWS]'
print>>fp, ('main=,"' + contentfile + '","' + indexfile
+ '","","",,,,,0x23520,222,0x1046,[10,10,780,560],'
'0xB0000,,,,,,0')
print>>fp, ''
print>>fp, '[FILES]'
print>>fp, ''
self.dumpfiles(fp)
fp.close()
except IOError, msg:
print projectfile, ':', msg
sys.exit(1)
# CONTENT FILE
try:
fp = open(contentfile,'w')
print>>fp, '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">'
print>>fp, '<!-- This file defines the table of contents -->'
print>>fp, '<HTML>'
print>>fp, '<HEAD>'
print>>fp, ('<meta name="GENERATOR"'
'content="Microsoft® HTML Help Workshop 4.1">')
print>>fp, '<!-- Sitemap 1.0 -->'
print>>fp, '</HEAD>'
print>>fp, '<BODY>'
print>>fp, ' <OBJECT type="text/site properties">'
print>>fp, ' <param name="Window Styles" value="0x800025">'
print>>fp, ' <param name="comment" value="title:">'
print>>fp, ' <param name="comment" value="base:">'
print>>fp, ' </OBJECT>'
self.dumpnodes(fp)
print>>fp, '</BODY>'
print>>fp, '</HTML>'
fp.close()
except IOError, msg:
print contentfile, ':', msg
sys.exit(1)
# INDEX FILE
try:
fp = open(indexfile ,'w')
print>>fp, '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">'
print>>fp, '<!-- This file defines the index -->'
print>>fp, '<HTML>'
print>>fp, '<HEAD>'
print>>fp, ('<meta name="GENERATOR"'
'content="Microsoft® HTML Help Workshop 4.1">')
print>>fp, '<!-- Sitemap 1.0 -->'
print>>fp, '</HEAD>'
print>>fp, '<BODY>'
print>>fp, '<OBJECT type="text/site properties">'
print>>fp, '</OBJECT>'
self.dumpindex(fp)
print>>fp, '</BODY>'
print>>fp, '</HTML>'
fp.close()
except IOError, msg:
print indexfile , ':', msg
sys.exit(1)
def dumpfiles(self, outfile=sys.stdout):
filelist = self.filenames.values()
filelist.sort()
for filename in filelist:
print>>outfile, filename
def dumpnodes(self, outfile=sys.stdout):
self.dumped = {}
if self.nodelist:
nodename, dummy, dummy, dummy, dummy = self.nodelist[0]
self.topnode = nodename
print>>outfile, '<UL>'
for node in self.nodelist:
self.dumpnode(node,0,outfile)
print>>outfile, '</UL>'
def dumpnode(self, node, indent=0, outfile=sys.stdout):
if node:
# Retrieve info for this node
(nodename,next,prev,up,filename) = node
self.current = nodename
# Have we been dumped already?
if self.dumped.has_key(nodename):
return
self.dumped[nodename] = 1
# Print info for this node
print>>outfile, ' '*indent,
print>>outfile, '<LI><OBJECT type="text/sitemap">',
print>>outfile, '<param name="Name" value="' + nodename +'">',
print>>outfile, '<param name="Local" value="'+ filename +'">',
print>>outfile, '</OBJECT>'
# Does this node have menu items?
try:
menu = self.menudict[nodename]
self.dumpmenu(menu,indent+2,outfile)
except KeyError:
pass
def dumpmenu(self, menu, indent=0, outfile=sys.stdout):
if menu:
currentnode = self.current
if currentnode != self.topnode: # XXX this is a hack
print>>outfile, ' '*indent + '<UL>'
indent += 2
for item in menu:
menunode = self.getnode(item)
self.dumpnode(menunode,indent,outfile)
if currentnode != self.topnode: # XXX this is a hack
print>>outfile, ' '*indent + '</UL>'
indent -= 2
def getnode(self, nodename):
try:
index = self.nodeindex[nodename]
return self.nodelist[index]
except KeyError:
return None
except IndexError:
return None
# (args,nodename) == (key,location)
def dumpindex(self, outfile=sys.stdout):
print>>outfile, '<UL>'
for (key,location) in self.indexlist:
key = self.codeexpand(key)
location = makefile(location)
location = self.dirname + '/' + location
print>>outfile, '<LI><OBJECT type="text/sitemap">',
print>>outfile, '<param name="Name" value="' + key + '">',
print>>outfile, '<param name="Local" value="' + location + '">',
print>>outfile, '</OBJECT>'
print>>outfile, '</UL>'
def codeexpand(self, line):
co = self.codeprog.match(line)
if not co:
return line
bgn, end = co.span(0)
a, b = co.span(1)
line = line[:bgn] + line[a:b] + line[end:]
return line
# Put @var{} around alphabetic substrings
def makevar(str):
return '@var{'+str+'}'
# Split a string in "words" according to findwordend
def splitwords(str, minlength):
words = []
i = 0
n = len(str)
while i < n:
while i < n and str[i] in ' \t\n': i = i+1
if i >= n: break
start = i
i = findwordend(str, i, n)
words.append(str[start:i])
while len(words) < minlength: words.append('')
return words
# Find the end of a "word", matching braces and interpreting @@ @{ @}
fwprog = re.compile('[@{} ]')
def findwordend(str, i, n):
level = 0
while i < n:
mo = fwprog.search(str, i)
if not mo:
break
i = mo.start()
c = str[i]; i = i+1
if c == '@': i = i+1 # Next character is not special
elif c == '{': level = level+1
elif c == '}': level = level-1
elif c == ' ' and level <= 0: return i-1
return n
# Convert a node name into a file name
def makefile(nodename):
nodename = nodename.strip()
return fixfunnychars(nodename) + '.html'
# Characters that are perfectly safe in filenames and hyperlinks
goodchars = string.ascii_letters + string.digits + '!@-=+.'
# Replace characters that aren't perfectly safe by dashes
# Underscores are bad since Cern HTTPD treats them as delimiters for
# encoding times, so you get mismatches if you compress your files:
# a.html.gz will map to a_b.html.gz
def fixfunnychars(addr):
i = 0
while i < len(addr):
c = addr[i]
if c not in goodchars:
c = '-'
addr = addr[:i] + c + addr[i+1:]
i = i + len(c)
return addr
# Increment a string used as an enumeration
def increment(s):
if not s:
return '1'
for sequence in string.digits, string.lowercase, string.uppercase:
lastc = s[-1]
if lastc in sequence:
i = sequence.index(lastc) + 1
if i >= len(sequence):
if len(s) == 1:
s = sequence[0]*2
if s == '00':
s = '10'
else:
s = increment(s[:-1]) + sequence[0]
else:
s = s[:-1] + sequence[i]
return s
return s # Don't increment
def test():
import sys
debugging = 0
print_headers = 0
cont = 0
html3 = 0
htmlhelp = ''
while sys.argv[1] == ['-d']:
debugging = debugging + 1
del sys.argv[1]
if sys.argv[1] == '-p':
print_headers = 1
del sys.argv[1]
if sys.argv[1] == '-c':
cont = 1
del sys.argv[1]
if sys.argv[1] == '-3':
html3 = 1
del sys.argv[1]
if sys.argv[1] == '-H':
helpbase = sys.argv[2]
del sys.argv[1:3]
if len(sys.argv) <> 3:
print 'usage: texi2hh [-d [-d]] [-p] [-c] [-3] [-H htmlhelp]', \
'inputfile outputdirectory'
sys.exit(2)
if html3:
parser = TexinfoParserHTML3()
else:
parser = TexinfoParser()
parser.cont = cont
parser.debugging = debugging
parser.print_headers = print_headers
file = sys.argv[1]
dirname = sys.argv[2]
parser.setdirname(dirname)
parser.setincludedir(os.path.dirname(file))
htmlhelp = HTMLHelp(helpbase, dirname)
parser.sethtmlhelp(htmlhelp)
try:
fp = open(file, 'r')
except IOError, msg:
print file, ':', msg
sys.exit(1)
parser.parse(fp)
fp.close()
parser.report()
htmlhelp.finalize()
if __name__ == "__main__":
test()
| mit |
nox/servo | tests/wpt/harness/wptrunner/browsers/webdriver.py | 194 | 4219 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import errno
import socket
import time
import traceback
import urlparse
import mozprocess
from .base import get_free_port, cmd_arg
__all__ = ["SeleniumLocalServer", "ChromedriverLocalServer"]
class LocalServer(object):
used_ports = set()
default_endpoint = "/"
def __init__(self, logger, binary, port=None, endpoint=None):
self.logger = logger
self.binary = binary
self.port = port
self.endpoint = endpoint or self.default_endpoint
if self.port is None:
self.port = get_free_port(4444, exclude=self.used_ports)
self.used_ports.add(self.port)
self.url = "http://127.0.0.1:%i%s" % (self.port, self.endpoint)
self.proc, self.cmd = None, None
def start(self):
self.proc = mozprocess.ProcessHandler(
self.cmd, processOutputLine=self.on_output)
try:
self.proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise IOError(
"chromedriver executable not found: %s" % self.binary)
raise
self.logger.debug(
"Waiting for server to become accessible: %s" % self.url)
surl = urlparse.urlparse(self.url)
addr = (surl.hostname, surl.port)
try:
wait_service(addr)
except:
self.logger.error(
"Server was not accessible within the timeout:\n%s" % traceback.format_exc())
raise
else:
self.logger.info("Server listening on port %i" % self.port)
def stop(self):
if hasattr(self.proc, "proc"):
self.proc.kill()
def is_alive(self):
if hasattr(self.proc, "proc"):
exitcode = self.proc.poll()
return exitcode is None
return False
def on_output(self, line):
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self.cmd))
@property
def pid(self):
if hasattr(self.proc, "proc"):
return self.proc.pid
class SeleniumLocalServer(LocalServer):
default_endpoint = "/wd/hub"
def __init__(self, logger, binary, port=None):
LocalServer.__init__(self, logger, binary, port=port)
self.cmd = ["java",
"-jar", self.binary,
"-port", str(self.port)]
def start(self):
self.logger.debug("Starting local Selenium server")
LocalServer.start(self)
def stop(self):
LocalServer.stop(self)
self.logger.info("Selenium server stopped listening")
class ChromedriverLocalServer(LocalServer):
default_endpoint = "/wd/hub"
def __init__(self, logger, binary="chromedriver", port=None, endpoint=None):
LocalServer.__init__(self, logger, binary, port=port, endpoint=endpoint)
# TODO: verbose logging
self.cmd = [self.binary,
cmd_arg("port", str(self.port)) if self.port else "",
cmd_arg("url-base", self.endpoint) if self.endpoint else ""]
def start(self):
self.logger.debug("Starting local chromedriver server")
LocalServer.start(self)
def stop(self):
LocalServer.stop(self)
self.logger.info("chromedriver server stopped listening")
def wait_service(addr, timeout=15):
"""Waits until network service given as a tuple of (host, port) becomes
available or the `timeout` duration is reached, at which point
``socket.error`` is raised."""
end = time.time() + timeout
while end > time.time():
so = socket.socket()
try:
so.connect(addr)
except socket.timeout:
pass
except socket.error as e:
if e[0] != errno.ECONNREFUSED:
raise
else:
return True
finally:
so.close()
time.sleep(0.5)
raise socket.error("Service is unavailable: %s:%i" % addr)
| mpl-2.0 |
Vingaard/conpot | conpot/protocols/kamstrup/usage_simulator.py | 3 | 4908 | # Copyright (C) 2014 Johnny Vestergaard <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import gevent
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
# Simulates power usage for a Kamstrup 382 meter
class UsageSimulator(object):
def __init__(self, *args):
self._enabled = True
self.stopped = gevent.event.Event()
# both highres, lowres will be calculated on the fly
self.energy_in = 0
self.energy_out = 0
# p1, p2, p3
self.voltage = [0, 0, 0]
self.current = [0, 0, 0]
self.power = [0, 0, 0]
gevent.spawn(self.initialize)
def usage_counter(self):
while self._enabled:
# since this is gevent, this actually sleep for _at least_ 1 second
# TODO: measure last entry and figure it out < jkv: Figure what out?!?
gevent.sleep(1)
for x in [0, 1, 2]:
self.energy_in += int(self.power[x] * 0.0036)
# ready for shutdown!
self.stopped.set()
def stop(self):
self._enabled = False
self.stopped.wait()
def initialize(self):
# we need the databus initialized before we can probe values
databus = conpot_core.get_databus()
databus.initialized.wait()
# accumulated counter
energy_in_register = 'register_13'
self.energy_in = databus.get_value(energy_in_register)
databus.set_value(energy_in_register, self._get_energy_in)
databus.set_value('register_1', self._get_energy_in_lowres)
energy_out_register = 'register_14'
self.energy_out = databus.get_value(energy_out_register)
databus.set_value(energy_out_register, self._get_energy_out)
databus.set_value('register_2', self._get_energy_out_lowres)
volt_1_register = 'register_1054'
self.voltage[0] = databus.get_value(volt_1_register)
databus.set_value(volt_1_register, self._get_voltage_1)
volt_2_register = 'register_1055'
self.voltage[1] = databus.get_value(volt_2_register)
databus.set_value(volt_2_register, self._get_voltage_2)
volt_3_register = 'register_1056'
self.voltage[2] = databus.get_value(volt_3_register)
databus.set_value(volt_3_register, self._get_voltage_3)
current_1_register = 'register_1076'
self.current[0] = databus.get_value(current_1_register)
databus.set_value(current_1_register, self._get_current_1)
current_2_register = 'register_1077'
self.current[1] = databus.get_value(current_2_register)
databus.set_value(current_2_register, self._get_current_2)
current_3_register = 'register_1078'
self.current[2] = databus.get_value(current_3_register)
databus.set_value(current_3_register, self._get_current_3)
power_1_register = 'register_1080'
self.power[0] = databus.get_value(power_1_register)
databus.set_value(power_1_register, self._get_power_1)
power_2_register = 'register_1081'
self.power[1] = databus.get_value(power_2_register)
databus.set_value(power_2_register, self._get_power_2)
power_3_register = 'register_1082'
self.power[2] = databus.get_value(power_3_register)
databus.set_value(power_3_register, self._get_power_3)
gevent.spawn(self.usage_counter)
def _get_energy_in(self):
return self.energy_in
def _get_energy_out(self):
return self.energy_out
def _get_energy_in_lowres(self):
return self.energy_in / 1000
def _get_energy_out_lowres(self):
return self.energy_out / 1000
def _get_voltage_1(self):
return self.voltage[0]
def _get_voltage_2(self):
return self.voltage[1]
def _get_voltage_3(self):
return self.voltage[2]
def _get_current_1(self):
return self.current[0]
def _get_current_2(self):
return self.current[1]
def _get_current_3(self):
return self.current[2]
def _get_power_1(self):
return self.power[0]
def _get_power_2(self):
return self.power[1]
def _get_power_3(self):
return self.power[2]
| gpl-2.0 |
SpectraLogic/samba | third_party/dnspython/tests/resolver.py | 56 | 4279 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import select
import sys
import time
import unittest
import dns.name
import dns.message
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.resolver
resolv_conf = """
/t/t
# comment 1
; comment 2
domain foo
nameserver 10.0.0.1
nameserver 10.0.0.2
"""
message_text = """id 1234
opcode QUERY
rcode NOERROR
flags QR AA RD
;QUESTION
example. IN A
;ANSWER
example. 1 IN A 10.0.0.1
;AUTHORITY
;ADDITIONAL
"""
class BaseResolverTests(object):
if sys.platform != 'win32':
def testRead(self):
f = cStringIO.StringIO(resolv_conf)
r = dns.resolver.Resolver(f)
self.failUnless(r.nameservers == ['10.0.0.1', '10.0.0.2'] and
r.domain == dns.name.from_text('foo'))
def testCacheExpiration(self):
message = dns.message.from_text(message_text)
name = dns.name.from_text('example.')
answer = dns.resolver.Answer(name, dns.rdatatype.A, dns.rdataclass.IN,
message)
cache = dns.resolver.Cache()
cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer)
time.sleep(2)
self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN))
is None)
def testCacheCleaning(self):
message = dns.message.from_text(message_text)
name = dns.name.from_text('example.')
answer = dns.resolver.Answer(name, dns.rdatatype.A, dns.rdataclass.IN,
message)
cache = dns.resolver.Cache(cleaning_interval=1.0)
cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer)
time.sleep(2)
self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN))
is None)
def testZoneForName1(self):
name = dns.name.from_text('www.dnspython.org.')
ezname = dns.name.from_text('dnspython.org.')
zname = dns.resolver.zone_for_name(name)
self.failUnless(zname == ezname)
def testZoneForName2(self):
name = dns.name.from_text('a.b.www.dnspython.org.')
ezname = dns.name.from_text('dnspython.org.')
zname = dns.resolver.zone_for_name(name)
self.failUnless(zname == ezname)
def testZoneForName3(self):
name = dns.name.from_text('dnspython.org.')
ezname = dns.name.from_text('dnspython.org.')
zname = dns.resolver.zone_for_name(name)
self.failUnless(zname == ezname)
def testZoneForName4(self):
def bad():
name = dns.name.from_text('dnspython.org', None)
zname = dns.resolver.zone_for_name(name)
self.failUnlessRaises(dns.resolver.NotAbsolute, bad)
class PollingMonkeyPatchMixin(object):
def setUp(self):
self.__native_polling_backend = dns.query._polling_backend
dns.query._set_polling_backend(self.polling_backend())
unittest.TestCase.setUp(self)
def tearDown(self):
dns.query._set_polling_backend(self.__native_polling_backend)
unittest.TestCase.tearDown(self)
class SelectResolverTestCase(PollingMonkeyPatchMixin, BaseResolverTests, unittest.TestCase):
def polling_backend(self):
return dns.query._select_for
if hasattr(select, 'poll'):
class PollResolverTestCase(PollingMonkeyPatchMixin, BaseResolverTests, unittest.TestCase):
def polling_backend(self):
return dns.query._poll_for
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
mibanescu/pulp | server/pulp/server/managers/repo/unit_association.py | 4 | 18861 | """
Contains the manager class and exceptions for handling the mappings between
repositories and content units.
"""
from gettext import gettext as _
import logging
import sys
from celery import task
import mongoengine
import pymongo
from pulp.common import error_codes
from pulp.plugins.conduits.unit_import import ImportUnitConduit
from pulp.plugins.config import PluginCallConfiguration
from pulp.plugins.loader import api as plugin_api
from pulp.server.async.tasks import Task
from pulp.server.controllers import repository as repo_controller
from pulp.server.controllers import units as units_controller
from pulp.server.db import model
from pulp.server.db.model.criteria import UnitAssociationCriteria
from pulp.server.db.model.repository import RepoContentUnit
import pulp.plugins.conduits._common as conduit_common_utils
import pulp.server.exceptions as exceptions
import pulp.server.managers.factory as manager_factory
# Valid sort strings
SORT_TYPE_ID = 'type_id'
SORT_CREATED = 'created'
SORT_UPDATED = 'updated'
_VALID_SORTS = (SORT_TYPE_ID, SORT_CREATED, SORT_UPDATED)
SORT_ASCENDING = pymongo.ASCENDING
SORT_DESCENDING = pymongo.DESCENDING
_VALID_DIRECTIONS = (SORT_ASCENDING, SORT_DESCENDING)
logger = logging.getLogger(__name__)
class RepoUnitAssociationManager(object):
"""
Manager used to handle the associations between repositories and content
units. The functionality provided within assumes the repo and units have
been created outside of this manager.
"""
def associate_unit_by_id(self, repo_id, unit_type_id, unit_id, update_repo_metadata=True):
"""
Creates an association between the given repository and content unit.
If there is already an association between the given repo and content
unit where all other metadata matches the input to this method,
this call has no effect.
Both repo and unit must exist in the database prior to this call,
however this call will not verify that for performance reasons. Care
should be taken by the caller to preserve the data integrity.
@param repo_id: identifies the repo
@type repo_id: str
@param unit_type_id: identifies the type of unit being added
@type unit_type_id: str
@param unit_id: uniquely identifies the unit within the given type
@type unit_id: str
@param update_repo_metadata: if True, updates the unit association count
after the new association is made. The last
unit added field will also be updated. Set this
to False when doing bulk associations, and
make one call to update the count at the end.
defaults to True
@type update_repo_metadata: bool
@raise InvalidType: if the given owner type is not of the valid enumeration
"""
# If the association already exists, no need to do anything else
spec = {'repo_id': repo_id,
'unit_id': unit_id,
'unit_type_id': unit_type_id}
existing_association = RepoContentUnit.get_collection().find_one(spec)
if existing_association is not None:
return
similar_exists = False
if update_repo_metadata:
similar_exists = RepoUnitAssociationManager.association_exists(repo_id, unit_id,
unit_type_id)
# Create the database entry
association = RepoContentUnit(repo_id, unit_id, unit_type_id)
RepoContentUnit.get_collection().save(association)
# update the count and times of associated units on the repo object
if update_repo_metadata and not similar_exists:
repo_controller.update_unit_count(repo_id, unit_type_id, 1)
repo_controller.update_last_unit_added(repo_id)
def associate_all_by_ids(self, repo_id, unit_type_id, unit_id_list):
"""
Creates multiple associations between the given repo and content units.
See associate_unit_by_id for semantics.
@param repo_id: identifies the repo
@type repo_id: str
@param unit_type_id: identifies the type of unit being added
@type unit_type_id: str
@param unit_id_list: list or generator of unique identifiers for units within the given type
@type unit_id_list: list or generator of str
:return: number of new units added to the repo
:rtype: int
@raise InvalidType: if the given owner type is not of the valid enumeration
"""
# There may be a way to batch this in mongo which would be ideal for a
# bulk operation like this. But for deadline purposes, this call will
# simply loop and call the single method.
unique_count = 0
for unit_id in unit_id_list:
if not RepoUnitAssociationManager.association_exists(repo_id, unit_id, unit_type_id):
unique_count += 1
self.associate_unit_by_id(repo_id, unit_type_id, unit_id, False)
# update the count of associated units on the repo object
if unique_count:
repo_controller.update_unit_count(repo_id, unit_type_id, unique_count)
repo_controller.update_last_unit_added(repo_id)
return unique_count
@staticmethod
def associate_from_repo(source_repo_id, dest_repo_id, criteria=None,
import_config_override=None):
"""
Creates associations in a repository based on the contents of a source
repository. Units from the source repository can be filtered by
specifying a criteria object.
The destination repository must have an importer that can support
the types of units being associated. This is done by analyzing the
unit list and the importer metadata and takes place before the
destination repository is called.
Pulp does not actually perform the associations as part of this call.
The unit list is determined and passed to the destination repository's
importer. It is the job of the importer to make the associate calls
back into Pulp where applicable.
If criteria is None, the effect of this call is to copy the source
repository's associations into the destination repository.
:param source_repo_id: identifies the source repository
:type source_repo_id: str
:param dest_repo_id: identifies the destination repository
:type dest_repo_id: str
:param criteria: optional; if specified, will filter the units retrieved from
the source repository
:type criteria: pulp.server.db.model.criteria.UnitAssociationCriteria
:param import_config_override: optional config containing values to use for this import only
:type import_config_override: dict
:return: dict with key 'units_successful' whose
value is a list of unit keys that were copied.
units that were associated by this operation
:rtype: dict
:raise MissingResource: if either of the specified repositories don't exist
"""
importer_manager = manager_factory.repo_importer_manager()
source_repo = model.Repository.objects.get_repo_or_missing_resource(source_repo_id)
dest_repo = model.Repository.objects.get_repo_or_missing_resource(dest_repo_id)
# This will raise MissingResource if there isn't one, which is the
# behavior we want this method to exhibit, so just let it bubble up.
dest_repo_importer = importer_manager.get_importer(dest_repo_id)
source_repo_importer = importer_manager.get_importer(source_repo_id)
# The docs are incorrect on the list_importer_types call; it actually
# returns a dict with the types under key "types" for some reason.
supported_type_ids = set(plugin_api.list_importer_types(
dest_repo_importer['importer_type_id'])['types'])
# Get the unit types from the repo source repo
source_repo_unit_types = set(source_repo.content_unit_counts.keys())
# Now we can make sure the destination repository's importer is capable
# of importing either the selected units or all of the units
if not source_repo_unit_types.issubset(supported_type_ids):
raise exceptions.PulpCodedException(
error_code=error_codes.PLP0000,
message='The the target importer does not support the types from the source')
transfer_units = None
# If criteria is specified, retrieve the list of units now
if criteria is not None:
# if all source types have been converted to mongo - search via new style
if source_repo_unit_types.issubset(set(plugin_api.list_unit_models())):
association_q = mongoengine.Q(__raw__=criteria.association_spec)
unit_q = mongoengine.Q(__raw__=criteria.unit_spec)
transfer_units = repo_controller.find_repo_content_units(
repository=source_repo,
repo_content_unit_q=association_q,
units_q=unit_q,
yield_content_unit=True)
else:
# else, search via old style
associate_us = load_associated_units(source_repo_id, criteria)
# If units were supposed to be filtered but none matched, we're done
if len(associate_us) == 0:
# Return an empty list to indicate nothing was copied
return {'units_successful': []}
# Convert all of the units into the plugin standard representation if
# a filter was specified
transfer_units = None
if associate_us is not None:
transfer_units = create_transfer_units(associate_us)
# Convert the two repos into the plugin API model
transfer_dest_repo = dest_repo.to_transfer_repo()
transfer_source_repo = source_repo.to_transfer_repo()
# Invoke the importer
importer_instance, plugin_config = plugin_api.get_importer_by_id(
dest_repo_importer['importer_type_id'])
call_config = PluginCallConfiguration(plugin_config, dest_repo_importer['config'],
import_config_override)
conduit = ImportUnitConduit(
source_repo_id, dest_repo_id, source_repo_importer['id'], dest_repo_importer['id'])
try:
copied_units = importer_instance.import_units(
transfer_source_repo, transfer_dest_repo, conduit, call_config,
units=transfer_units)
unit_ids = [u.to_id_dict() for u in copied_units]
return {'units_successful': unit_ids}
except Exception:
msg = _('Exception from importer [%(i)s] while importing units into repository [%(r)s]')
msg_dict = {'i': dest_repo_importer['importer_type_id'], 'r': dest_repo_id}
logger.exception(msg % msg_dict)
raise exceptions.PulpExecutionException(), None, sys.exc_info()[2]
def unassociate_unit_by_id(self, repo_id, unit_type_id, unit_id, notify_plugins=True):
"""
Removes the association between a repo and the given unit. Only the
association made by the given owner will be removed. It is possible the
repo will still have a manually created association will for the unit.
If no association exists between the repo and unit, this call has no
effect.
@param repo_id: identifies the repo
@type repo_id: str
@param unit_type_id: identifies the type of unit being removed
@type unit_type_id: str
@param unit_id: uniquely identifies the unit within the given type
@type unit_id: str
@param notify_plugins: if true, relevant plugins will be informed of the
removal
@type notify_plugins: bool
"""
return self.unassociate_all_by_ids(repo_id, unit_type_id, [unit_id],
notify_plugins=notify_plugins)
def unassociate_all_by_ids(self, repo_id, unit_type_id, unit_id_list,
notify_plugins=True):
"""
Removes the association between a repo and a number of units. Only the
association made by the given owner will be removed. It is possible the
repo will still have a manually created association will for the unit.
@param repo_id: identifies the repo
@type repo_id: str
@param unit_type_id: identifies the type of units being removed
@type unit_type_id: str
@param unit_id_list: list of unique identifiers for units within the given type
@type unit_id_list: list of str
@param notify_plugins: if true, relevant plugins will be informed of the
removal
@type notify_plugins: bool
"""
association_filters = {'unit_id': {'$in': unit_id_list}}
criteria = UnitAssociationCriteria(type_ids=[unit_type_id],
association_filters=association_filters)
return self.unassociate_by_criteria(repo_id, criteria,
notify_plugins=notify_plugins)
@staticmethod
def unassociate_by_criteria(repo_id, criteria, notify_plugins=True):
"""
Unassociate units that are matched by the given criteria.
:param repo_id: identifies the repo
:type repo_id: str
:param criteria:
:param notify_plugins: if true, relevant plugins will be informed of the removal
:type notify_plugins: bool
"""
association_query_manager = manager_factory.repo_unit_association_query_manager()
unassociate_units = association_query_manager.get_units(repo_id, criteria=criteria)
if len(unassociate_units) == 0:
return {}
unit_map = {} # maps unit_type_id to a list of unit_ids
for unit in unassociate_units:
id_list = unit_map.setdefault(unit['unit_type_id'], [])
id_list.append(unit['unit_id'])
collection = RepoContentUnit.get_collection()
for unit_type_id, unit_ids in unit_map.items():
spec = {'repo_id': repo_id,
'unit_type_id': unit_type_id,
'unit_id': {'$in': unit_ids}
}
collection.remove(spec)
unique_count = sum(
1 for unit_id in unit_ids if not RepoUnitAssociationManager.association_exists(
repo_id, unit_id, unit_type_id))
if not unique_count:
continue
repo_controller.update_unit_count(repo_id, unit_type_id, -unique_count)
repo_controller.update_last_unit_removed(repo_id)
# Convert the units into transfer units. This happens regardless of whether or not
# the plugin will be notified as it's used to generate the return result,
transfer_units = create_transfer_units(unassociate_units)
if notify_plugins:
remove_from_importer(repo_id, transfer_units)
# Match the return type/format as copy
serializable_units = [u.to_id_dict() for u in transfer_units]
return {'units_successful': serializable_units}
@staticmethod
def association_exists(repo_id, unit_id, unit_type_id):
"""
Determines if an identical association already exists.
I know the order of arguments does not match other methods in this
module, but it does match the constructor for the RepoContentUnit
object, which I think is the higher authority.
@param repo_id: identifies the repo
@type repo_id: str
@param unit_type_id: identifies the type of unit being removed
@type unit_type_id: str
@param unit_id: uniquely identifies the unit within the given type
@type unit_id: str
@return: True if unique else False
@rtype: bool
"""
spec = {
'repo_id': repo_id,
'unit_id': unit_id,
'unit_type_id': unit_type_id,
}
unit_coll = RepoContentUnit.get_collection()
existing_count = unit_coll.find(spec).count()
return bool(existing_count)
associate_from_repo = task(RepoUnitAssociationManager.associate_from_repo, base=Task)
unassociate_by_criteria = task(RepoUnitAssociationManager.unassociate_by_criteria, base=Task)
def load_associated_units(source_repo_id, criteria):
criteria.association_fields = None
# Retrieve the units to be associated
association_query_manager = manager_factory.repo_unit_association_query_manager()
associate_us = association_query_manager.get_units(source_repo_id, criteria=criteria)
return associate_us
def create_transfer_units(associate_units):
unit_key_fields = {}
transfer_units = []
for unit in associate_units:
type_id = unit['unit_type_id']
if type_id not in unit_key_fields:
unit_key_fields[type_id] = units_controller.get_unit_key_fields_for_type(type_id)
u = conduit_common_utils.to_plugin_associated_unit(unit, type_id, unit_key_fields[type_id])
transfer_units.append(u)
return transfer_units
def remove_from_importer(repo_id, transfer_units):
repo_obj = model.Repository.objects.get_repo_or_missing_resource(repo_id)
transfer_repo = repo_obj.to_transfer_repo()
importer_manager = manager_factory.repo_importer_manager()
repo_importer = importer_manager.get_importer(repo_id)
# Retrieve the plugin instance to invoke
importer_instance, plugin_config = plugin_api.get_importer_by_id(
repo_importer['importer_type_id'])
call_config = PluginCallConfiguration(plugin_config, repo_importer['config'])
# Invoke the importer's remove method
try:
importer_instance.remove_units(transfer_repo, transfer_units, call_config)
except Exception:
msg = _('Exception from importer [%(i)s] while removing units from repo [%(r)s]')
msg = msg % {'i': repo_importer['id'], 'r': repo_id}
logger.exception(msg)
# Do not raise the exception; this should not block the removal and is
# intended to be more informational to the plugin rather than a requirement
| gpl-2.0 |
ratoaq2/deluge | deluge/ui/gtkui/status_tab.py | 1 | 5369 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Andrew Resch <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from __future__ import division, unicode_literals
import logging
import deluge.component as component
from deluge.common import fpeer
from deluge.configmanager import ConfigManager
from deluge.ui.gtkui.piecesbar import PiecesBar
from deluge.ui.gtkui.tab_data_funcs import (fdate_or_never, fpcnt, fratio, fseed_rank_or_dash, fspeed_max,
ftime_or_dash, ftotal_sized)
from deluge.ui.gtkui.torrentdetails import Tab, TabWidget
log = logging.getLogger(__name__)
class StatusTab(Tab):
def __init__(self):
super(StatusTab, self).__init__('Status', 'status_tab', 'status_tab_label')
self.config = ConfigManager('gtkui.conf')
self.progressbar = self.main_builder.get_object('progressbar')
self.piecesbar = None
self.add_tab_widget('summary_availability', fratio, ('distributed_copies',))
self.add_tab_widget('summary_total_downloaded', ftotal_sized,
('all_time_download', 'total_payload_download'))
self.add_tab_widget('summary_total_uploaded', ftotal_sized,
('total_uploaded', 'total_payload_upload'))
self.add_tab_widget('summary_download_speed', fspeed_max,
('download_payload_rate', 'max_download_speed'))
self.add_tab_widget('summary_upload_speed', fspeed_max,
('upload_payload_rate', 'max_upload_speed'))
self.add_tab_widget('summary_seeds', fpeer, ('num_seeds', 'total_seeds'))
self.add_tab_widget('summary_peers', fpeer, ('num_peers', 'total_peers'))
self.add_tab_widget('summary_eta', ftime_or_dash, ('eta',))
self.add_tab_widget('summary_share_ratio', fratio, ('ratio',))
self.add_tab_widget('summary_active_time', ftime_or_dash, ('active_time',))
self.add_tab_widget('summary_seed_time', ftime_or_dash, ('seeding_time',))
self.add_tab_widget('summary_seed_rank', fseed_rank_or_dash, ('seed_rank', 'seeding_time'))
self.add_tab_widget('progressbar', fpcnt, ('progress', 'state', 'message'))
self.add_tab_widget('summary_last_seen_complete', fdate_or_never, ('last_seen_complete',))
self.add_tab_widget('summary_last_transfer', ftime_or_dash, ('time_since_transfer',))
self.config.register_set_function('show_piecesbar', self.on_show_piecesbar_config_changed, apply_now=True)
def update(self):
# Get the first selected torrent
selected = component.get('TorrentView').get_selected_torrent()
if not selected:
# No torrent is selected in the torrentview
self.clear()
return
# Get the torrent status
status_keys = self.status_keys
if self.config['show_piecesbar']:
status_keys.extend(['pieces', 'num_pieces'])
component.get('SessionProxy').get_torrent_status(
selected, status_keys).addCallback(self._on_get_torrent_status)
def _on_get_torrent_status(self, status):
# Check to see if we got valid data from the core
if not status:
return
# Update all the label widgets
for widget in self.tab_widgets.values():
txt = self.widget_status_as_fstr(widget, status)
if widget[0].get_text() != txt:
widget[0].set_text(txt)
# Update progress bar seperately as it's a special case (not a label).
fraction = status['progress'] / 100
if self.config['show_piecesbar']:
if self.piecesbar.get_fraction() != fraction:
self.piecesbar.set_fraction(fraction)
if status['state'] != 'Checking' and self.piecesbar.get_pieces() != status['pieces']:
# Skip pieces assignment if checking torrent.
self.piecesbar.set_pieces(status['pieces'], status['num_pieces'])
self.piecesbar.update()
else:
if self.progressbar.get_fraction() != fraction:
self.progressbar.set_fraction(fraction)
def on_show_piecesbar_config_changed(self, key, show):
if show:
self.show_piecesbar()
else:
self.hide_piecesbar()
def show_piecesbar(self):
if self.piecesbar is None:
self.piecesbar = PiecesBar()
self.main_builder.get_object(
'status_progress_vbox').pack_start(self.piecesbar, False, False, 0)
self.tab_widgets['piecesbar'] = TabWidget(self.piecesbar, fpcnt, ('progress', 'state', 'message'))
self.piecesbar.show()
self.progressbar.hide()
def hide_piecesbar(self):
self.progressbar.show()
if self.piecesbar:
self.piecesbar.hide()
self.tab_widgets.pop('piecesbar', None)
self.piecesbar = None
def clear(self):
for widget in self.tab_widgets.values():
widget[0].set_text('')
if self.config['show_piecesbar']:
self.piecesbar.clear()
else:
self.progressbar.set_fraction(0)
| gpl-3.0 |
trilomix/kmappy | blam.py | 1 | 1403 | #!/usr/bin/env python
import wx
from wx import Config
import blam
from blamframe import *
from multiprocessing import freeze_support
class blamapp(wx.App):
def OnInit(self):
## wxConfig *config = new wxConfig(wxT("Karnaugh Map Minimizer"));
##
## wxString lang;
## if ( config->Read(wxT("Language"), &lang) )
## {
## if(lang==wxT("hr")) m_locale.Init(wxLANGUAGE_CROATIAN);
## }
## else
## {
## if(wxLocale::GetSystemLanguage()==wxLANGUAGE_CROATIAN)
## {
## m_locale.Init(wxLANGUAGE_CROATIAN);
## config->Write(wxT("Language"), wxT("hr"));
## }
## }
##
## delete config;
##
## m_locale.AddCatalog(wxT("blam"));
self.main = blamFrame( ( "Karnaugh Map Minimizer" ), wx.DefaultPosition, wx.Size(450,700) );
self.main.Show()
self.SetTopWindow(self.main)
return True
def main():
application = blamapp(0)
if __debug__:
from wx.lib.inspection import InspectionTool
if not InspectionTool().initialized:
InspectionTool().Init()
wnd = wx.FindWindowAtPointer()
if not wnd:
wnd = application
InspectionTool().Show(wnd, True)
application.MainLoop()
## if __debug__:
## blamapp.OnOpenWidgetInspector(application)
application.MainLoop()
if __name__ == '__main__':
## import wx.lib.inspection
## wx.lib.inspection.InspectionTool().Show()
freeze_support()
main() | mit |
PlayUAV/MissionPlanner | Lib/encodings/cp857.py | 93 | 34858 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp857',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: None, # UNDEFINED
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: 0x00d7, # MULTIPLICATION SIGN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: None, # UNDEFINED
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
u'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\ufffe' # 0x00d5 -> UNDEFINED
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\ufffe' # 0x00e7 -> UNDEFINED
u'\xd7' # 0x00e8 -> MULTIPLICATION SIGN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
u'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\ufffe' # 0x00f2 -> UNDEFINED
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x00e8, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
saurabh6790/medapp | accounts/doctype/sales_invoice/pos.py | 29 | 1618 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
@webnotes.whitelist()
def get_items(price_list, sales_or_purchase, item=None, item_group=None):
condition = ""
args = {"price_list": price_list}
if sales_or_purchase == "Sales":
condition = "i.is_sales_item='Yes'"
else:
condition = "i.is_purchase_item='Yes'"
if item_group and item_group != "All Item Groups":
condition += " and i.item_group='%s'" % item_group
if item:
condition += " and CONCAT(i.name, i.item_name) like %(name)s"
args["name"] = "%%%s%%" % item
return webnotes.conn.sql("""select i.name, i.item_name, i.image,
item_det.ref_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, ref_rate, currency from
`tabItem Price` where price_list=%s) item_det
ON
item_det.item_code=i.name
where
%s""" % ('%(price_list)s', condition), args, as_dict=1)
@webnotes.whitelist()
def get_item_code(barcode_serial_no):
input_via = "serial_no"
item_code = webnotes.conn.sql("""select name, item_code from `tabSerial No` where
name=%s""", (barcode_serial_no), as_dict=1)
if not item_code:
input_via = "barcode"
item_code = webnotes.conn.sql("""select name from `tabItem` where barcode=%s""",
(barcode_serial_no), as_dict=1)
if item_code:
return item_code, input_via
else:
webnotes.throw("Invalid Barcode / Serial No")
@webnotes.whitelist()
def get_mode_of_payment():
return webnotes.conn.sql("""select name from `tabMode of Payment`""", as_dict=1) | agpl-3.0 |
civato/Note8.0-StormBorn | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
jianwei1216/my-scripts | mytest/argparse/misc.py | 1 | 13565 | #!/usr/bin/python
# -*- conding:UTF-8 -*-
import sys
import os
import ssh
import thread
import time
import argparse
import textwrap
# according ssh get a client for remote exec cmds, close after using
def get_ssh_client(host):
global args
client = ssh.SSHClient()
client.set_missing_host_key_policy(ssh.AutoAddPolicy())
client.connect(host, port=args.port, username=args.user, password=args.password)
return client
# get multi threads
def __multi_thread(nodes, func, *args):
locks = []
for host in nodes:
lock = thread.allocate_lock()
lock.acquire()
locks.append(lock)
thread.start_new_thread(func, (host, lock, args))
for lock in locks:
while lock.locked():
pass
def __client_exec_commands (host, cmd_list, sleep=0):
if host == '' or len(cmd_list) == 0:
print 'Error: args are NULL'
exit(-1)
client = get_ssh_client(host)
for cmd in cmd_list:
print host, cmd
stdin, stdout, stderr = client.exec_command(cmd)
err = stderr.read()
if len(err) > 0:
print host, err,
if sleep != 0:
time.sleep(sleep)
client.close()
# clean up all cluster environment
def __clean_all_cluster_env(host, lock, args1):
global args
cmd_list = []
clusterfs = args.cluster_keyword + 'fs'
clusterd = args.cluster_keyword + 'd'
clusterfsd = args.cluster_keyword + 'fsd'
cluster = args.cluster_keyword
cluster_gui = args.cluster_keyword + '_gui'
cluster_backup = args.cluster_keyword + '_backup'
cluster_manager = args.cluster_keyword + '_manager'
cluster_client = args.cluster_keyword + '_client'
print '__clean_all_cluster_env()', host
if args1[0]:
light_cleanup_cmd = 'rm -rf /var/log/' + clusterfs
cmd_list.append(light_cleanup_cmd)
else:
clean_hosts = 'node-manager stop; node-manager clear;'
cmd_list.append(clean_hosts)
clean_process = 'killall ' + clusterfs + '; killall ' + clusterd + '; killall ' + clusterfsd + '; killall mongod;'\
'killall mdadm; ps -ef | grep node_manager | grep -v grep | awk \'{print $2}\' | xargs kill -9;'\
'ps -ef | grep ' + cluster_gui + '| grep -v grep | awk \'{print $2}\' | xargs kill -9;'
cmd_list.append(clean_process)
clean_rpm = 'for i in `rpm -qa | grep ' + cluster + '`; do rpm -e $i --nodeps; done'
cmd_list.append(clean_rpm)
clean_lib = 'cd /usr/lib64/; rm -rf ' + clusterfs + 'libgfapi.* libgfdb.* libgfxdr.* libgfrpc.* libgfchangelog.*'
cmd_list.append(clean_lib)
clean_mgmt = 'cd /usr/local/; rm -rf ' + cluster_gui + ' ' + cluster_backup + ' ' + cluster_manager + ' '\
+ cluster_client + ' digiserver'
cmd_list.append(clean_mgmt)
clean_log_config = 'rm -rf /var/lib/' + clusterd + ' /var/log/' + clusterfs + \
' /etc/' + clusterfs + ' /etc/' + cluster_manager + \
' /etc/systemd/system/multi-user.target.wants/' + clusterfs + '-client.service '\
' /etc/nginx/' + clusterfs + '.* /etc/sudoers.d/' + clusterfs + \
' /var/log/' + cluster_manager + ' /usr/lib/ocf/resource.d/' + clusterfs + \
' /usr/share/doc/'+ clusterfs + ' /usr/share/' + clusterfs + \
' /usr/include/' + clusterfs + ' /usr/libexec/' + clusterfs + ' /var/run/' + cluster + \
' /data /var/log/' + cluster_gui + ' /usr/sbin/' + cluster + '* /usr/bin/' + clusterfs + '-client '\
' /usr/bin/' + cluster_gui + ' /usr/bin/' + clusterfs + '-reporter /usr/bin/' + cluster + 'find '\
' /usr/bin/digi_partition /usr/bin/' + clusterfs + '-volgen /usr/bin/' + clusterfs + '-afrgen '\
' /usr/bin/node-manager'
cmd_list.append(clean_log_config)
client = get_ssh_client(host)
for cmd in cmd_list:
print host, cmd
stdin, stdout, stderr = client.exec_command(cmd)
err = stderr.read()
out = stdout.read()
if len(err) > 0:
print host, err
if len(out) > 0:
print host, out
client.close()
lock.release()
def clean_all_cluster_env():
global args, command_remainder
if (args.nodes == None) or (args.password == None):
print 'Error: invalid arguments!!!\nExample:' + \
command_remainder['clean_all_cluster_env']
exit(-1)
__multi_thread(nodes, __clean_all_cluster_env, args.light_cleanup)
def __add_trace_module (host, lock, args):
cmd_list = []
if len(args) != 4:
print 'Error: args are zero'
exit (-1)
path_config = args[0]
start_line = args[1]
need_trace_vol_name = args[2]
volname = args[3]
subvolume_trace_name = volname + '-trace'
need_subvolumes_name = 'subvolumes ' + need_trace_vol_name
need_replace_trace_name = 'subvolumes ' + subvolume_trace_name
cmd1 = 'sed -i \'' + start_line + 'a\\volume ' + subvolume_trace_name + '\' ' + path_config
cmd_list.append(cmd1)
cmd2 = 'sed -i \'' + str(int(start_line)+1) + 'a\\ type debug/trace\' ' + path_config
cmd_list.append(cmd2)
cmd3 = 'sed -i \'' + str(int(start_line)+2) + 'a\\ option log-file yes\' ' + path_config
cmd_list.append(cmd3)
cmd4 = 'sed -i \'' + str(int(start_line)+3) + 'a\\ option log-history yes\' ' + path_config
cmd_list.append(cmd4)
cmd9 = 'sed -i \'s/' + need_subvolumes_name + '/' + need_replace_trace_name + '/g\' ' + path_config
cmd_list.append(cmd9)
cmd5 = 'sed -i \'' + str(int(start_line)+4) + 'a\\ ' + need_subvolumes_name + '\' ' + path_config
cmd_list.append(cmd5)
cmd6 = 'sed -i \'' + str(int(start_line)+5) + 'a\\end-volume\' ' + path_config
cmd_list.append(cmd6)
cmd7 = 'sed -i \'' + str(int(start_line)+6) + 'a\\zhangjianwei\' ' + path_config
cmd_list.append(cmd7)
cmd8 = 'sed -i \'s/zhangjianwei//g\' ' + path_config
cmd_list.append(cmd8)
__client_exec_commands(host, cmd_list)
lock.release()
def add_trace_module():
global args, command_remainder
if (args.nodes == None) or (args.start_line == None) \
or (args.need_trace_volume_name == None) \
or (args. volname == None) or (args.password == None):
print 'Error: invalid arguments!!!\nExample: ' + \
command_remainder['add_trace_module']
exit(-1)
__multi_thread(args.nodes, __add_trace_module, args.configure_path, \
args.start_line, args.need_trace_vol_name, args.volname)
# ssh non-password login
def not_use_ssh_passwd():
global args
global command_remainder
id_rsa_pub = ''
if (args.nodes==None) or (args.password==None):
print 'Error: invalid arguments!!!\nExample: ' +\
command_remainder['not_use_ssh_passwd']
exit(-1)
for host in args.nodes:
client = get_ssh_client(host)
#stdin, stdout, stderr = client.exec_command('rm -rf /root/.ssh/authorized_keys')
#if len(stderr.read()) > 0:
#print stderr.read()
stdin, stdout, stderr = client.exec_command('cat /root/.ssh/id_rsa.pub')
if len(stderr.read()) > 0:
print stderr.read()
id_rsa_pub += stdout.read()
client.close()
print id_rsa_pub
for host in args.nodes:
client = get_ssh_client(host)
echocmd = 'echo \'' + id_rsa_pub + '\' >> /root/.ssh/authorized_keys'
print echocmd
stdin, stdout, stderr = client.exec_command(echocmd)
if len(stderr.read()) > 0:
print stderr.read()
client.close()
print 'success!'
def build_mongodb():
global args
global command_remainder
master_node = ''
cmd = '/usr/bin/python'
prefix_str = 'test'
if (args.digi_mongodbdeploy_path == None) or \
(args.master_node == None) or (args.slave_nodes == None):
print 'Error: invalid arguments!\nExample: ' + \
command_remainder['build_mongodb']
exit(-1)
# python /usr/local/digioceanfs_manager/utils/digi-mongodbdeploy.pyc domain create
# test#10.10.21.115 test2#10.10.21.116 test3#10.10.21.91 10.10.21.115
exec_cmd = cmd + ' ' + args.digi_mongodbdeploy_path + ' domain create ' + prefix_str + '#' + args.master_node + ' '
for i in range(0, len(args.slave_nodes)):
exec_cmd += prefix_str + str(i) + '#' + args.slave_nodes[i] + ' '
exec_cmd += args.master_node
print exec_cmd
os.system (exec_cmd)
# python /usr/local/digioceanfs_manager/utils/digi-mongodbdeploy.pyc create rs0
# replset#10.10.21.115#10.10.21.116#10.10.21.91 10.10.21.115
exec_cmd = cmd + ' ' + args.digi_mongodbdeploy_path + ' create rs0 replset#' + args.master_node
for slave_node in args.slave_nodes:
exec_cmd += '#' + slave_node
exec_cmd += ' ' + args.master_node
print exec_cmd
os.system (exec_cmd)
# main
if __name__ == '__main__':
global args
global command_remainder
sys_argv_0 = sys.argv[0]
command_remainder = {}
command_remainder = {'build_mongodb':sys_argv_0 + ' --build-mongodb --master-node 10.10.21.115 --slave-nodes 10.10.21.116 10.10.21.91',\
'add_trace_module':sys_argv_0 + '--add-trace-module --nodes 10.10.21.111 --configure-path /var/lib/digioceand/vols/test'\
'/trusted-test.tcp-fuse.vol --start-line 150 --need-trace-volume-name test-dht --volname test --password 123456',\
'clean_all_cluster_env':sys_argv_0 + '--clean-all-cluster-env --nodes 10.10.21.9{1,2,3} --password 123456',\
'not_use_ssh_passwd':sys_argv_0 + '--not-use-ssh-passwd --nodes 10.10.21.11{1,2,3,4,5} 10.10.12.16 --password 123456'}
#print 'debug command_remainder', command_remainder
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\
epilog=textwrap.dedent('' + command_remainder['build_mongodb'] + '\n' \
'' + command_remainder['not_use_ssh_passwd'] +'\n' \
'' + command_remainder['add_trace_module'] + '\n' \
'' + command_remainder['clean_all_cluster_env']))
parser.add_argument ('--clean-all-cluster-env', action='store_true', help='cleanup all the cluster env on the specified nodes')
parser.add_argument ('--cluster-keyword', nargs=1, default='digiocean', help='gluster or digiocean (default is digiocean)')
parser.add_argument ('--light-cleanup', action='store_true', help='only delete the /var/log/digioceanfs')
parser.add_argument ('--not-use-ssh-passwd', action='store_true', help='ssh free password login on specified nodes')
parser.add_argument ('--add-trace-module', action='store_true', help='add trace module for cluster')
parser.add_argument ('--configure-path', nargs=1, help='the cluster configure absolute path')
parser.add_argument ('--start-line', nargs=1, type=int, help='the start line to add trace module')
parser.add_argument ('--need-trace-volume-name', nargs=1, type=str, help='the module\'s name that you want to debug')
parser.add_argument ('--volname', nargs=1, type=str, help='the cluster service name')
parser.add_argument ('--build-mongodb', action='store_true', help='quick create a mongodb cluster')
parser.add_argument ('--master-node', type=str, help='the master node of mongodb cluster')
parser.add_argument ('--slave-nodes', nargs='+', help='the slave nodes of mongodb cluster')
parser.add_argument ('--digi-mongodbdeploy-path', nargs=1, type=str, default='/usr/local/digioceanfs_manager/utils/digi-mongodbdeploy.pyc')
parser.add_argument ('--nodes', nargs='+', help='need to operate nodes IP')
parser.add_argument ('-p', '--port', nargs='?', type=int, default=22, help='ssh port')
parser.add_argument ('--user', default='root', help='ssh user name')
parser.add_argument ('--password', type=str, help='ssh password of the specified nodes')
args = parser.parse_args ()
#print 'debug-args:', args
#Namespace(add_trace_module=False,
# build_mongodb=False,
# clean_all_cluster_env=False,
# cluster_keyword='digiocean',
# configure_path=None,
# master_node=None,
# need_trace_volume_name=None,
# nodes=None,
# not_use_ssh_passwd=False,
# password=None,
# port=22,
# slave_nodes=None,
# start_line=None,
# volname=None)
# digi_mongodbdeploy_path=
# light_cleanup=None
if args.add_trace_module:
add_trace_module()
elif args.build_mongodb:
build_mongodb()
elif args.clean_all_cluster_env:
clean_all_cluster_env()
elif args.not_use_ssh_passwd:
not_use_ssh_passwd()
else:
print 'Error: unkown keyword!!!'
#parser.print_help()
| gpl-2.0 |
Unity-Technologies/ml-agents | ml-agents-envs/mlagents_envs/communicator_objects/agent_info_pb2.py | 1 | 5851 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents_envs/communicator_objects/agent_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mlagents_envs.communicator_objects import observation_pb2 as mlagents__envs_dot_communicator__objects_dot_observation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlagents_envs/communicator_objects/agent_info.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\n3mlagents_envs/communicator_objects/agent_info.proto\x12\x14\x63ommunicator_objects\x1a\x34mlagents_envs/communicator_objects/observation.proto\"\xf9\x01\n\x0e\x41gentInfoProto\x12\x0e\n\x06reward\x18\x07 \x01(\x02\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\x18\n\x10max_step_reached\x18\t \x01(\x08\x12\n\n\x02id\x18\n \x01(\x05\x12\x13\n\x0b\x61\x63tion_mask\x18\x0b \x03(\x08\x12<\n\x0cobservations\x18\r \x03(\x0b\x32&.communicator_objects.ObservationProto\x12\x10\n\x08group_id\x18\x0e \x01(\x05\x12\x14\n\x0cgroup_reward\x18\x0f \x01(\x02J\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x0c\x10\rB%\xaa\x02\"Unity.MLAgents.CommunicatorObjectsb\x06proto3')
,
dependencies=[mlagents__envs_dot_communicator__objects_dot_observation__pb2.DESCRIPTOR,])
_AGENTINFOPROTO = _descriptor.Descriptor(
name='AgentInfoProto',
full_name='communicator_objects.AgentInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reward', full_name='communicator_objects.AgentInfoProto.reward', index=0,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='done', full_name='communicator_objects.AgentInfoProto.done', index=1,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_step_reached', full_name='communicator_objects.AgentInfoProto.max_step_reached', index=2,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='communicator_objects.AgentInfoProto.id', index=3,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action_mask', full_name='communicator_objects.AgentInfoProto.action_mask', index=4,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='observations', full_name='communicator_objects.AgentInfoProto.observations', index=5,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group_id', full_name='communicator_objects.AgentInfoProto.group_id', index=6,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='group_reward', full_name='communicator_objects.AgentInfoProto.group_reward', index=7,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=381,
)
_AGENTINFOPROTO.fields_by_name['observations'].message_type = mlagents__envs_dot_communicator__objects_dot_observation__pb2._OBSERVATIONPROTO
DESCRIPTOR.message_types_by_name['AgentInfoProto'] = _AGENTINFOPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AgentInfoProto = _reflection.GeneratedProtocolMessageType('AgentInfoProto', (_message.Message,), dict(
DESCRIPTOR = _AGENTINFOPROTO,
__module__ = 'mlagents_envs.communicator_objects.agent_info_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.AgentInfoProto)
))
_sym_db.RegisterMessage(AgentInfoProto)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\252\002\"Unity.MLAgents.CommunicatorObjects'))
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
totallybradical/temp_servo2 | tests/wpt/web-platform-tests/tools/py/py/_code/_assertionnew.py | 217 | 12384 | """
Find intermediate evalutation results in assert statements through builtin AST.
This should replace _assertionold.py eventually.
"""
import sys
import ast
import py
from py._code.assertion import _format_explanation, BuiltinAssertionError
if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
"Repr", "Num", "Str", "Attribute", "Subscript", "Name",
"List", "Tuple")
_stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
"AugAssign", "Print", "For", "While", "If", "With", "Raise",
"TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
"Exec", "Global", "Expr", "Pass", "Break", "Continue")
_expr_nodes = set(getattr(ast, name) for name in _exprs)
_stmt_nodes = set(getattr(ast, name) for name in _stmts)
def _is_ast_expr(node):
return node.__class__ in _expr_nodes
def _is_ast_stmt(node):
return node.__class__ in _stmt_nodes
else:
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --no-assert)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(failure):
explanation = _format_explanation(failure.explanation)
value = failure.cause[1]
if str(value):
lines = explanation.splitlines()
if not lines:
lines.append("")
lines[0] += " << %s" % (value,)
explanation = "\n".join(lines)
text = "%s: %s" % (failure.cause[0].__name__, explanation)
if text.startswith("AssertionError: assert "):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = False
if not local:
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not result:
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
rcomp = py.code._reprcompare
if rcomp:
res = rcomp(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
if call.starargs:
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if call.kwargs:
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = True
if from_instance:
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
if test_explanation.startswith("False\n{False =") and \
test_explanation.endswith("\n"):
test_explanation = test_explanation[15:-2]
explanation = "assert %s" % (test_explanation,)
if not test_result:
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
| mpl-2.0 |
marcusramberg/dotfiles | bin/.venv-ansible-venv/lib/python2.6/site-packages/paramiko/agent.py | 5 | 11531 | # Copyright (C) 2003-2007 John Rochester <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
SSH Agent interface
"""
import os
import socket
import struct
import sys
import threading
import time
import tempfile
import stat
from select import select
from paramiko.common import asbytes, io_sleep
from paramiko.py3compat import byte_chr
from paramiko.ssh_exception import SSHException
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.util import retry_on_signal
cSSH2_AGENTC_REQUEST_IDENTITIES = byte_chr(11)
SSH2_AGENT_IDENTITIES_ANSWER = 12
cSSH2_AGENTC_SIGN_REQUEST = byte_chr(13)
SSH2_AGENT_SIGN_RESPONSE = 14
class AgentSSH(object):
def __init__(self):
self._conn = None
self._keys = ()
def get_keys(self):
"""
Return the list of keys available through the SSH agent, if any. If
no SSH agent was running (or it couldn't be contacted), an empty list
will be returned.
:return:
a tuple of `.AgentKey` objects representing keys available on the
SSH agent
"""
return self._keys
def _connect(self, conn):
self._conn = conn
ptype, result = self._send_message(cSSH2_AGENTC_REQUEST_IDENTITIES)
if ptype != SSH2_AGENT_IDENTITIES_ANSWER:
raise SSHException('could not get keys from ssh-agent')
keys = []
for i in range(result.get_int()):
keys.append(AgentKey(self, result.get_binary()))
result.get_string()
self._keys = tuple(keys)
def _close(self):
if self._conn is not None:
self._conn.close()
self._conn = None
self._keys = ()
def _send_message(self, msg):
msg = asbytes(msg)
self._conn.send(struct.pack('>I', len(msg)) + msg)
l = self._read_all(4)
msg = Message(self._read_all(struct.unpack('>I', l)[0]))
return ord(msg.get_byte()), msg
def _read_all(self, wanted):
result = self._conn.recv(wanted)
while len(result) < wanted:
if len(result) == 0:
raise SSHException('lost ssh-agent')
extra = self._conn.recv(wanted - len(result))
if len(extra) == 0:
raise SSHException('lost ssh-agent')
result += extra
return result
class AgentProxyThread(threading.Thread):
"""
Class in charge of communication between two channels.
"""
def __init__(self, agent):
threading.Thread.__init__(self, target=self.run)
self._agent = agent
self._exit = False
def run(self):
try:
(r, addr) = self.get_connection()
self.__inr = r
self.__addr = addr
self._agent.connect()
self._communicate()
except:
#XXX Not sure what to do here ... raise or pass ?
raise
def _communicate(self):
import fcntl
oldflags = fcntl.fcntl(self.__inr, fcntl.F_GETFL)
fcntl.fcntl(self.__inr, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
while not self._exit:
events = select([self._agent._conn, self.__inr], [], [], 0.5)
for fd in events[0]:
if self._agent._conn == fd:
data = self._agent._conn.recv(512)
if len(data) != 0:
self.__inr.send(data)
else:
self._close()
break
elif self.__inr == fd:
data = self.__inr.recv(512)
if len(data) != 0:
self._agent._conn.send(data)
else:
self._close()
break
time.sleep(io_sleep)
def _close(self):
self._exit = True
self.__inr.close()
self._agent._conn.close()
class AgentLocalProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a local SSH Agent being
asked from a remote fake agent (so use a unix socket for ex.)
"""
def __init__(self, agent):
AgentProxyThread.__init__(self, agent)
def get_connection(self):
"""
Return a pair of socket object and string address.
May block!
"""
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.bind(self._agent._get_filename())
conn.listen(1)
(r, addr) = conn.accept()
return r, addr
except:
raise
class AgentRemoteProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a remote SSH Agent
"""
def __init__(self, agent, chan):
AgentProxyThread.__init__(self, agent)
self.__chan = chan
def get_connection(self):
return self.__chan, None
class AgentClientProxy(object):
"""
Class proxying request as a client:
#. client ask for a request_forward_agent()
#. server creates a proxy and a fake SSH Agent
#. server ask for establishing a connection when needed,
calling the forward_agent_handler at client side.
#. the forward_agent_handler launch a thread for connecting
the remote fake agent and the local agent
#. Communication occurs ...
"""
def __init__(self, chanRemote):
self._conn = None
self.__chanR = chanRemote
self.thread = AgentRemoteProxy(self, chanRemote)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
"""
Method automatically called by ``AgentProxyThread.run``.
"""
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
import paramiko.win_pageant as win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn
def close(self):
"""
Close the current connection and terminate the agent
Should be called manually
"""
if hasattr(self, "thread"):
self.thread._exit = True
self.thread.join(1000)
if self._conn is not None:
self._conn.close()
class AgentServerProxy(AgentSSH):
"""
:param .Transport t: Transport used for SSH Agent communication forwarding
:raises SSHException: mostly if we lost the agent
"""
def __init__(self, t):
AgentSSH.__init__(self)
self.__t = t
self._dir = tempfile.mkdtemp('sshproxy')
os.chmod(self._dir, stat.S_IRWXU)
self._file = self._dir + '/sshproxy.ssh'
self.thread = AgentLocalProxy(self)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
conn_sock = self.__t.open_forward_agent_channel()
if conn_sock is None:
raise SSHException('lost ssh-agent')
conn_sock.set_name('auth-agent')
self._connect(conn_sock)
def close(self):
"""
Terminate the agent, clean the files, close connections
Should be called manually
"""
os.remove(self._file)
os.rmdir(self._dir)
self.thread._exit = True
self.thread.join(1000)
self._close()
def get_env(self):
"""
Helper for the environnement under unix
:return:
a dict containing the ``SSH_AUTH_SOCK`` environnement variables
"""
return {'SSH_AUTH_SOCK': self._get_filename()}
def _get_filename(self):
return self._file
class AgentRequestHandler(object):
def __init__(self, chanClient):
self._conn = None
self.__chanC = chanClient
chanClient.request_forward_agent(self._forward_agent_handler)
self.__clientProxys = []
def _forward_agent_handler(self, chanRemote):
self.__clientProxys.append(AgentClientProxy(chanRemote))
def __del__(self):
self.close()
def close(self):
for p in self.__clientProxys:
p.close()
class Agent(AgentSSH):
"""
Client interface for using private keys from an SSH agent running on the
local machine. If an SSH agent is running, this class can be used to
connect to it and retreive `.PKey` objects which can be used when
attempting to authenticate to remote SSH servers.
Upon initialization, a session with the local machine's SSH agent is
opened, if one is running. If no agent is running, initialization will
succeed, but `get_keys` will return an empty tuple.
:raises SSHException:
if an SSH agent is found, but speaks an incompatible protocol
"""
def __init__(self):
AgentSSH.__init__(self)
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.connect(os.environ['SSH_AUTH_SOCK'])
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
from . import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._connect(conn)
def close(self):
"""
Close the SSH agent connection.
"""
self._close()
class AgentKey(PKey):
"""
Private key held in a local SSH agent. This type of key can be used for
authenticating to a remote server (signing). Most other key operations
work as expected.
"""
def __init__(self, agent, blob):
self.agent = agent
self.blob = blob
self.name = Message(blob).get_text()
def asbytes(self):
return self.blob
def __str__(self):
return self.asbytes()
def get_name(self):
return self.name
def sign_ssh_data(self, data):
msg = Message()
msg.add_byte(cSSH2_AGENTC_SIGN_REQUEST)
msg.add_string(self.blob)
msg.add_string(data)
msg.add_int(0)
ptype, result = self.agent._send_message(msg)
if ptype != SSH2_AGENT_SIGN_RESPONSE:
raise SSHException('key cannot be used for signing')
return result.get_binary()
| mit |
davenovak/mtasa-blue | vendor/google-breakpad/src/third_party/protobuf/protobuf/gtest/run_tests.py | 199 | 2336 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Runs the specified tests for Google Test.
This script requires Python 2.3 or higher. To learn the usage, run it
with -h.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(__file__) or '.'
sys.path.append(os.path.join(SCRIPT_DIR, 'test'))
import run_tests_util
def _Main():
"""Runs all tests for Google Test."""
options, args = run_tests_util.ParseArgs('gtest')
test_runner = run_tests_util.TestRunner(script_dir=SCRIPT_DIR)
tests = test_runner.GetTestsToRun(args,
options.configurations,
options.built_configurations)
if not tests:
sys.exit(1) # Incorrect parameters given, abort execution.
sys.exit(test_runner.RunTests(tests[0], tests[1]))
if __name__ == '__main__':
_Main()
| gpl-3.0 |
lancezlin/pyjs | examples/showcase/src/demos_widgets/textArea.py | 6 | 2160 | """
The ``ui.TextArea`` class implements a standard multi-line input field.
The ``setCharacterWidth()`` method sets the width of the input field, in
characters, while ``setVisibleLines()`` sets the height of the field, in lines.
Use the ``getText()`` method to retrieve the field's current text, and
``setText()`` to set it. There are many other useful methods defined by
``ui.TextArea`` and its parent classes; see the module documentation for more
details.
If you want a TextArea that adjusts its size after its content, then have a look
at the AutoTextArea below. It also shows you how to catch certain keystrokes such as ctrl-enter.
"""
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.TextArea import TextArea
import math
class TextAreaDemo(VerticalPanel):
def __init__(self):
VerticalPanel.__init__(self)
self.setSpacing("10px")
field = TextArea()
field.setCharacterWidth(20)
field.setVisibleLines(4)
self.add(field)
self.add(AutoTextArea(self))
def done(self):
Window.alert("Ctrl-enter!")
class AutoTextArea(TextArea):
def __init__(self, doneHandler):
TextArea.__init__(self)
self.doneHandler = doneHandler #this handler will be called when user press ctrl-enter
def onBrowserEvent(self, event):
if event.type == "keydown":
if event.keyCode == 13 and event.ctrlKey: #ctrl-enter
self.doneHandler.done()
else:
self.autoHeight() #if not ctrl-enter, adjust height
def autoHeight(self):
#here is some really weak code to calculate the height of the textarea.
#its not very accurate, im sure you can do better! remember to "import math"
lines = self.getText().split("\n")
c = 0
for i in lines:
fsize = 9
a = float(len(i)*fsize) / float((self.getOffsetWidth()/fsize)*fsize)
b = int(math.ceil(a))
c += max(1,b)
newHeight = str(16*(2+c)) + "px"
self.setHeight(newHeight)
def setText(self, text):
TextArea.setText(self, text)
self.autoHeight()
| apache-2.0 |
thundernet8/WRGameVideos-API | venv/lib/python2.7/site-packages/jinja2/utils.py | 323 | 16560 | # -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import errno
from collections import deque
from threading import Lock
from jinja2._compat import text_type, string_types, implements_iterator, \
url_quote
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(map(re.escape, ('(', '<', '<'))),
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
concat = u''.join
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decorator can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
messuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return 'None'
elif obj is Ellipsis:
return 'Ellipsis'
# __builtin__ in 2.x, builtins in 3.x
if obj.__class__.__module__ in ('__builtin__', 'builtins'):
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
return '%s object' % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, nofollow=False, target=None):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If target is not None, a target attribute will be added to the link.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(text_type(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
if target is not None and isinstance(target, string_types):
target_attr = ' target="%s"' % target
else:
target_attr = ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
not middle.startswith('https://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
nofollow_attr, target_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s%s>%s</a>' % (middle,
nofollow_attr, target_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
def unicode_urlencode(obj, charset='utf-8', for_qs=False):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
safe = for_qs and b'' or b'/'
rv = text_type(url_quote(obj, safe))
if for_qs:
rv = rv.replace('%20', '+')
return rv
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self):
return {
'capacity': self.capacity,
'_mapping': self._mapping,
'_queue': self._queue
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release()
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
finally:
self._wlock.release()
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
# register the LRU cache as mutable mapping if possible
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
@implements_iterator
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError('at least one item has to be provided')
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def __next__(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u''
return self.sep
# Imported here because that's where it was in the past
from markupsafe import Markup, escape, soft_unicode
| gpl-2.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/numpy/ma/tests/test_mrecords.py | 35 | 20707 | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for mrecords.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
import warnings
import pickle
import numpy as np
import numpy.ma as ma
from numpy import recarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma import masked, nomask
from numpy.testing import TestCase, run_module_suite, temppath
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
from numpy.ma.mrecords import (
MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords,
addfield
)
from numpy.ma.testutils import (
assert_, assert_equal,
assert_equal_records,
)
class TestMRecords(TestCase):
# Base test class for MaskedArrays.
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
self.setup()
def setup(self):
# Generic setup
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = asbytes_nested(['one', 'two', 'three', 'four', 'five'])
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mask = [0, 1, 0, 0, 1]
self.base = ma.array(list(zip(ilist, flist, slist)),
mask=mask, dtype=ddtype)
def test_byview(self):
# Test creation by view
base = self.base
mbase = base.view(mrecarray)
assert_equal(mbase.recordmask, base.recordmask)
assert_equal_records(mbase._mask, base._mask)
assert_(isinstance(mbase._data, recarray))
assert_equal_records(mbase._data, base._data.view(recarray))
for field in ('a', 'b', 'c'):
assert_equal(base[field], mbase[field])
assert_equal_records(mbase.view(mrecarray), mbase)
def test_get(self):
# Tests fields retrieval
base = self.base.copy()
mbase = base.view(mrecarray)
# As fields..........
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase, field), mbase[field])
assert_equal(base[field], mbase[field])
# as elements .......
mbase_first = mbase[0]
assert_(isinstance(mbase_first, mrecarray))
assert_equal(mbase_first.dtype, mbase.dtype)
assert_equal(mbase_first.tolist(), (1, 1.1, asbytes('one')))
# Used to be mask, now it's recordmask
assert_equal(mbase_first.recordmask, nomask)
assert_equal(mbase_first._mask.item(), (False, False, False))
assert_equal(mbase_first['a'], mbase['a'][0])
mbase_last = mbase[-1]
assert_(isinstance(mbase_last, mrecarray))
assert_equal(mbase_last.dtype, mbase.dtype)
assert_equal(mbase_last.tolist(), (None, None, None))
# Used to be mask, now it's recordmask
assert_equal(mbase_last.recordmask, True)
assert_equal(mbase_last._mask.item(), (True, True, True))
assert_equal(mbase_last['a'], mbase['a'][-1])
assert_((mbase_last['a'] is masked))
# as slice ..........
mbase_sl = mbase[:2]
assert_(isinstance(mbase_sl, mrecarray))
assert_equal(mbase_sl.dtype, mbase.dtype)
# Used to be mask, now it's recordmask
assert_equal(mbase_sl.recordmask, [0, 1])
assert_equal_records(mbase_sl.mask,
np.array([(False, False, False),
(True, True, True)],
dtype=mbase._mask.dtype))
assert_equal_records(mbase_sl, base[:2].view(mrecarray))
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase_sl, field), base[:2][field])
def test_set_fields(self):
# Tests setting fields.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase = mbase.copy()
mbase.fill_value = (999999, 1e20, 'N/A')
# Change the data, the mask should be conserved
mbase.a._data[:] = 5
assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])
assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])
# Change the elements, and the mask will follow
mbase.a = 1
assert_equal(mbase['a']._data, [1]*5)
assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
# Use to be _mask, now it's recordmask
assert_equal(mbase.recordmask, [False]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0),
(0, 1, 1),
(0, 0, 0),
(0, 0, 0),
(0, 1, 1)],
dtype=bool))
# Set a field to mask ........................
mbase.c = masked
# Use to be mask, and now it's still mask !
assert_equal(mbase.c.mask, [1]*5)
assert_equal(mbase.c.recordmask, [1]*5)
assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 1),
(0, 1, 1),
(0, 0, 1),
(0, 0, 1),
(0, 1, 1)],
dtype=bool))
# Set fields by slices .......................
mbase = base.view(mrecarray).copy()
mbase.a[3:] = 5
assert_equal(mbase.a, [1, 2, 3, 5, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])
mbase.b[3:] = masked
assert_equal(mbase.b, base['b'])
assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])
# Set fields globally..........................
ndtype = [('alpha', '|S1'), ('num', int)]
data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)
rdata = data.view(MaskedRecords)
val = ma.array([10, 20, 30], mask=[1, 0, 0])
rdata['num'] = val
assert_equal(rdata.num, val)
assert_equal(rdata.num.mask, [1, 0, 0])
def test_set_fields_mask(self):
# Tests setting the mask of a field.
base = self.base.copy()
# This one has already a mask....
mbase = base.view(mrecarray)
mbase['a'][-2] = masked
assert_equal(mbase.a, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 1, 1])
# This one has not yet
mbase = fromarrays([np.arange(5), np.random.rand(5)],
dtype=[('a', int), ('b', float)])
mbase['a'][-2] = masked
assert_equal(mbase.a, [0, 1, 2, 3, 4])
assert_equal(mbase.a._mask, [0, 0, 0, 1, 0])
def test_set_mask(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Set the mask to True .......................
mbase.mask = masked
assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
assert_equal(mbase['a']._mask, mbase['b']._mask)
assert_equal(mbase['a']._mask, mbase['c']._mask)
assert_equal(mbase._mask.tolist(),
np.array([(1, 1, 1)]*5, dtype=bool))
# Delete the mask ............................
mbase.mask = nomask
assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0)]*5, dtype=bool))
def test_set_mask_fromarray(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Sets the mask w/ an array
mbase.mask = [1, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [1, 0, 0, 0, 1])
# Yay, once more !
mbase.mask = [0, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [0, 0, 0, 0, 1])
def test_set_mask_fromfields(self):
mbase = self.base.copy().view(mrecarray)
nmask = np.array(
[(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)],
dtype=[('a', bool), ('b', bool), ('c', bool)])
mbase.mask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
# Reinitialize and redo
mbase.mask = False
mbase.fieldmask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
def test_set_elements(self):
base = self.base.copy()
# Set an element to mask .....................
mbase = base.view(mrecarray).copy()
mbase[-2] = masked
assert_equal(
mbase._mask.tolist(),
np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)],
dtype=bool))
# Used to be mask, now it's recordmask!
assert_equal(mbase.recordmask, [0, 1, 0, 1, 1])
# Set slices .................................
mbase = base.view(mrecarray).copy()
mbase[:2] = (5, 5, 5)
assert_equal(mbase.a._data, [5, 5, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c._data,
asbytes_nested(['5', '5', 'three', 'four', 'five']))
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
mbase = base.view(mrecarray).copy()
mbase[:2] = masked
assert_equal(mbase.a._data, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.c._data,
asbytes_nested(['one', 'two', 'three', 'four', 'five']))
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
def test_setslices_hardmask(self):
# Tests setting slices w/ hardmask.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
try:
mbase[-2:] = (5, 5, 5)
assert_equal(mbase.a._data, [1, 2, 3, 5, 5])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5])
assert_equal(mbase.c._data,
asbytes_nested(['one', 'two', 'three', '5', 'five']))
assert_equal(mbase.a._mask, [0, 1, 0, 0, 1])
assert_equal(mbase.b._mask, mbase.a._mask)
assert_equal(mbase.b._mask, mbase.c._mask)
except NotImplementedError:
# OK, not implemented yet...
pass
except AssertionError:
raise
else:
raise Exception("Flexible hard masks should be supported !")
# Not using a tuple should crash
try:
mbase[-2:] = 3
except (NotImplementedError, TypeError):
pass
else:
raise TypeError("Should have expected a readable buffer object!")
def test_hardmask(self):
# Test hardmask
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
self.assertTrue(mbase._hardmask)
mbase.mask = nomask
assert_equal_records(mbase._mask, base._mask)
mbase.soften_mask()
self.assertTrue(not mbase._hardmask)
mbase.mask = nomask
# So, the mask of a field is no longer set to nomask...
assert_equal_records(mbase._mask,
ma.make_mask_none(base.shape, base.dtype))
self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask)
assert_equal(mbase['a']._mask, mbase['b']._mask)
def test_pickling(self):
# Test pickling
base = self.base.copy()
mrec = base.view(mrecarray)
_ = pickle.dumps(mrec)
mrec_ = pickle.loads(_)
assert_equal(mrec_.dtype, mrec.dtype)
assert_equal_records(mrec_._data, mrec._data)
assert_equal(mrec_._mask, mrec._mask)
assert_equal_records(mrec_._mask, mrec._mask)
def test_filled(self):
# Test filling the array
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
mrecfilled = mrec.filled()
assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int))
assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.),
dtype=float))
assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'),
dtype='|S8'))
def test_tolist(self):
# Test tolist.
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
assert_equal(mrec.tolist(),
[(1, 1.1, None), (2, 2.2, asbytes('two')),
(None, None, asbytes('three'))])
def test_withnames(self):
# Test the creation w/ format and names
x = mrecarray(1, formats=float, names='base')
x[0]['base'] = 10
assert_equal(x['base'][0], 10)
def test_exotic_formats(self):
# Test that 'exotic' formats are processed properly
easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)])
easy[0] = masked
assert_equal(easy.filled(1).item(), (1, asbytes('1'), 1.))
solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))])
solo[0] = masked
assert_equal(solo.filled(1).item(),
np.array((1,), dtype=solo.dtype).item())
mult = mrecarray(2, dtype="i4, (2,3)float, float")
mult[0] = masked
mult[1] = (1, 1, 1)
mult.filled(0)
assert_equal_records(mult.filled(0),
np.array([(0, 0, 0), (1, 1, 1)],
dtype=mult.dtype))
class TestView(TestCase):
def setUp(self):
(a, b) = (np.arange(10), np.random.rand(10))
ndtype = [('a', np.float), ('b', np.float)]
arr = np.array(list(zip(a, b)), dtype=ndtype)
mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))
mrec.mask[3] = (False, True)
self.data = (mrec, a, b, arr)
def test_view_by_itself(self):
(mrec, a, b, arr) = self.data
test = mrec.view()
self.assertTrue(isinstance(test, MaskedRecords))
assert_equal_records(test, mrec)
assert_equal_records(test._mask, mrec._mask)
def test_view_simple_dtype(self):
(mrec, a, b, arr) = self.data
ntype = (np.float, 2)
test = mrec.view(ntype)
self.assertTrue(isinstance(test, ma.MaskedArray))
assert_equal(test, np.array(list(zip(a, b)), dtype=np.float))
self.assertTrue(test[3, 1] is ma.masked)
def test_view_flexible_type(self):
(mrec, a, b, arr) = self.data
alttype = [('A', np.float), ('B', np.float)]
test = mrec.view(alttype)
self.assertTrue(isinstance(test, MaskedRecords))
assert_equal_records(test, arr.view(alttype))
self.assertTrue(test['B'][3] is masked)
assert_equal(test.dtype, np.dtype(alttype))
self.assertTrue(test._fill_value is None)
##############################################################################
class TestMRecordsImport(TestCase):
# Base test class for MaskedArrays.
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
self.setup()
def setup(self):
# Generic setup
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(list(map(asbytes, ['one', 'two', 'three'])),
mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(asbytes('99999'), asbytes('99999.'),
asbytes('N/A')))
nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
self.data = (mrec, nrec, ddtype)
def test_fromarrays(self):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
(mrec, nrec, _) = self.data
for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):
assert_equal(getattr(mrec, f)._mask, l._mask)
# One record only
_x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0],)
assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])
def test_fromrecords(self):
# Test construction from records.
(mrec, nrec, ddtype) = self.data
#......
palist = [(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)]
pa = recfromrecords(palist, names='c1, c2, c3, c4')
mpa = fromrecords(palist, names='c1, c2, c3, c4')
assert_equal_records(pa, mpa)
#.....
_mrec = fromrecords(nrec)
assert_equal(_mrec.dtype, mrec.dtype)
for field in _mrec.dtype.names:
assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
_mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')])
for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')):
assert_equal(getattr(_mrec, f), getattr(mrec._data, n))
_mrec = fromrecords(mrec)
assert_equal(_mrec.dtype, mrec.dtype)
assert_equal_records(_mrec._data, mrec.filled())
assert_equal_records(_mrec._mask, mrec._mask)
def test_fromrecords_wmask(self):
# Tests construction from records w/ mask.
(mrec, nrec, ddtype) = self.data
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,])
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
_mrec = fromrecords(nrec.tolist(), dtype=ddtype,
mask=mrec._mask.tolist())
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
def test_fromtextfile(self):
# Tests reading from a text file.
fcontent = (
"""#
'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'
'strings',1,1.0,'mixed column',,1
'with embedded "double quotes"',2,2.0,1.0,,1
'strings',3,3.0E5,3,,1
'strings',4,-1e-10,,,1
""")
with temppath() as path:
with open(path, 'w') as f:
f.write(fcontent)
mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG')
self.assertTrue(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
def test_addfield(self):
# Tests addfield
(mrec, nrec, ddtype) = self.data
(d, m) = ([100, 200, 300], [1, 0, 0])
mrec = addfield(mrec, ma.array(d, mask=m))
assert_equal(mrec.f3, d)
assert_equal(mrec.f3._mask, m)
def test_record_array_with_object_field():
# Trac #1839
y = ma.masked_array(
[(1, '2'), (3, '4')],
mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', np.object)])
# getting an item used to fail
y[1]
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
BiaDarkia/scikit-learn | examples/tree/plot_iris.py | 30 | 2062 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "ryb"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.RdYlBu, edgecolor='black', s=15)
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend(loc='lower right', borderpad=0, handletextpad=0)
plt.axis("tight")
plt.show()
| bsd-3-clause |
mupi/tecsaladeaula | accounts/forms.py | 1 | 8712 | # -*- coding: utf-8 -*-
import datetime
import requests
from django.contrib.auth import get_user_model
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import get_template
from django.template import Context
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm,)
from allauth.account.forms import SignupForm
User = get_user_model()
from .models import School, City, Occupation, Discipline, EducationLevel
class MultipleChoiceFieldNoValidation(forms.MultipleChoiceField):
def validate(self, value):
pass
class ProfileEditForm(forms.ModelForm):
disciplines = MultipleChoiceFieldNoValidation()
education_levels = MultipleChoiceFieldNoValidation()
class Meta:
model = User
fields = ('picture', 'first_name', 'occupations','city', 'site', 'biography', 'cpf', 'rg', 'phone')
def clean_cpf(self):
cpf = self.cleaned_data['cpf']
cpf = cpf.replace('.', '')
digits = cpf.split('-')
soma = 0
all_eq = True
if len(cpf) == 0:
return None
for d in cpf.replace('-', ''):
if d != cpf[0]:
all_eq = False
break
if all_eq:
raise forms.ValidationError(_("CPF invalido!"))
for i in range(len(digits[0])):
soma = soma + (10-i) * int(digits[0][i])
res = (soma*10)%11
if res %10 != int(digits[1][0]):
raise forms.ValidationError(_("CPF invalido!"))
soma = 0
for i in range(len(digits[0])):
soma = soma + (11-i) * int(digits[0][i])
soma = soma + 2 * int(digits[1][0])
res = (soma*10)%11
if res %10 != int(digits[1][1]):
raise forms.ValidationError(_("CPF invalido!"))
return self.cleaned_data['cpf']
def clean_rg(self):
rg = self.cleaned_data['rg']
if len(rg) > 0 and len(rg) < 4:
raise forms.ValidationError(_("Insira o RG corretamente!"))
return self.cleaned_data['rg']
def clean_phone(self):
phone = self.cleaned_data['phone']
if len(phone) > 0 and len(phone) < 14:
raise forms.ValidationError(_("Insira o telefone corretamente!"))
return self.cleaned_data['phone']
def save(self, commit=True):
disciplines = self.cleaned_data.get('disciplines')
education_levels = self.cleaned_data.get('education_levels')
profile = super(ProfileEditForm, self).save(commit=False)
saved_disciplines = profile.disciplines.all()
saving_disciplines = []
for d in disciplines:
if (not Discipline.objects.filter(name=d).exists()):
new_d = Discipline.objects.create(name=d)
new_d.save()
saving_disciplines.append(new_d)
else:
saving_disciplines.append(Discipline.objects.get(name=d))
to_save = [d for d in saving_disciplines if d not in saved_disciplines]
for d in to_save:
profile.disciplines.add(d)
to_remove = [d for d in saved_disciplines if d not in saving_disciplines]
for d in to_remove:
profile.disciplines.remove(d)
saved_education_levels = profile.education_levels.all()
saving_education_levels = []
for el in education_levels:
saving_education_levels.append(EducationLevel.objects.get(slug=el))
to_save = [el for el in saving_education_levels if el not in saved_education_levels]
for el in to_save:
profile.education_levels.add(el)
to_remove = [el for el in saved_education_levels if el not in saving_education_levels]
for el in to_remove:
profile.education_levels.remove(el)
self.save_m2m()
profile.save()
class ProfilePasswordForm(forms.ModelForm):
# email = forms.RegexField(max_length=75, regex=r"^[\w.@+-]+$")
business_email = forms.RegexField(max_length=75, regex=r"^[\w.@+-]+$", required=False)
password1 = forms.CharField(widget=forms.PasswordInput, required=False)
password2 = forms.CharField(widget=forms.PasswordInput, required=False)
class Meta:
model = User
fields = ('business_email',)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
if self.cleaned_data['password1']:
self.instance.set_password(self.cleaned_data['password1'])
return super(ProfilePasswordForm, self).save(commit=commit)
class SchoolAddForm(forms.ModelForm):
class Meta:
model = School
class AcceptTermsForm(forms.Form):
accept_terms = forms.BooleanField(label=_('Eu aceito os termos de uso'), initial=False, required=False)
def __init__(self, *args, **kwargs):
self.captcha = kwargs
super(AcceptTermsForm, self).__init__(*args, **kwargs)
def clean_accept_terms(self):
captcha = self.captcha['data']
data = self.cleaned_data['accept_terms']
if settings.TERMS_ACCEPTANCE_REQUIRED and not data:
raise forms.ValidationError(_('You must agree to the Terms of Use to use %(site_name)s.'),
params={'site_name': settings.SITE_NAME},)
''' Begin reCAPTCHA validation '''
if not "g-recaptcha-response" in captcha:
raise forms.ValidationError(_('Invalid Data'))
if "g-recaptcha-response" in captcha:
data = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': captcha["g-recaptcha-response"]
}
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)
result = r.json()
''' End reCAPTCHA validation '''
if result['success'] == False:
raise forms.ValidationError(_('Invalid reCAPTCHA. Please try again'))
return self.cleaned_data['accept_terms']
class SignupForm(SignupForm, AcceptTermsForm):
username = forms.CharField(initial=False,required=False)
fullname = forms.CharField(label=_("Nome Completo"),initial=False,required=True)
def extract_first_name(self,name):
names = name.split(" ")
return names[0]
def extract_last_name(self, name):
names = name.split(" ")
if len(names) > 1:
return " ".join(names[1:])
else:
return ""
def clean_username(self):
return self.data['email']
def clean_fullname(self):
data = self.cleaned_data['fullname']
if not data.strip():
raise forms.ValidationError(_('You Must fill your complete name'))
return data
def save(self, request):
user = super(SignupForm, self).save(request)
name = self.cleaned_data['fullname']
user.accepted_terms = self.cleaned_data['accept_terms']
user.first_name = name
# user.first_name = self.extract_first_name(name)
# user.last_name = self.extract_last_name(name)
user.save()
return user
# send_mail('Novo Usuário Cadastrado', get_template('account/email/email_new_user_message.txt').render(Context(
# {'date': now.strftime("%d/%m/%Y"), 'time': now.strftime("%H:%M"), 'username': username, 'email': email})), settings.EMAIL_SUPPORT, [settings.EMAIL_SUPPORT])
class NewUserCreationForm(UserCreationForm):
username = forms.RegexField(label=_("Username"), max_length=75,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 75 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
class NewUserChangeForm(UserChangeForm):
username = forms.RegexField(label=_("Username"), max_length=75,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 75 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
| agpl-3.0 |
elventear/ansible | lib/ansible/modules/network/cumulus/cl_bond.py | 5 | 15915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <[email protected]>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_bond
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bond port on Cumulus Linux
description:
- Configures a bond interface on Cumulus Linux To configure a bridge port
use the cl_bridge module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bonding found in the
Cumulus User Guide at U(http://docs.cumulusnetworks.com).
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Configures the port to use DHCP.
To enable this feature use the option I(dhcp).
choices: ['dhcp']
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
mstpctl_bpduguard:
description:
- Enables BPDU Guard on a port in vlan-aware mode.
choices:
- true
- false
mstpctl_portnetwork:
description:
- Enables bridge assurance in vlan-aware mode.
choices:
- true
- false
mstpctl_portadminedge:
description:
- Enables admin edge port.
choices:
- true
- false
clag_id:
description:
- Specify a unique clag_id for every dual connected bond on each
peer switch. The value must be between 1 and 65535 and must be the
same on both peer switches in order for the bond to be considered
dual-connected.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
miimon:
description:
- The mii link monitoring interval.
default: 100
mode:
description:
- The bond mode, as of Cumulus Linux 2.5 only LACP bond mode is
supported.
default: '802.3ad'
min_links:
description:
- Minimum number of links.
default: 1
lacp_bypass_allow:
description:
- Enable LACP bypass.
lacp_bypass_period:
description:
- Period for enabling LACP bypass. Max value is 900.
lacp_bypass_priority:
description:
- List of ports and priorities. Example I("swp1=10, swp2=20").
lacp_bypass_all_active:
description:
- Activate all interfaces for bypass.
It is recommended to configure all_active instead
of using bypass_priority.
lacp_rate:
description:
- The lacp rate.
default: 1
slaves:
description:
- Bond members.
required: True
xmit_hash_policy:
description:
- Transmit load balancing algorithm. As of Cumulus Linux 2.5 only
I(layer3+4) policy is supported.
default: layer3+4
location:
description:
- Interface directory location.
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager - \
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bond interface with IP address
- cl_bond:
name: bond0
slaves:
- swp4-5
ipv4: 10.1.1.1/24
# configure bond as a dual-connected clag bond
- cl_bond:
name: bond1
slaves:
- swp1s0
- swp2s0
clag_id: 1
# define cl_bond once in tasks file
# then write interface config in variables file
# with just the options you want.
- cl_bond:
name: "{{ item.key }}"
slaves: "{{ item.value.slaves }}"
clag_id: "{{ item.value.clag_id|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}"
mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}"
mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}"
with_dict: "{{ cl_bonds }}"
# In vars file
# ============
---
cl_bonds:
bond0:
alias_name: uplink to isp
slaves:
- swp1
- swp3
ipv4: 10.1.1.1/24'
bond2:
vids:
- 1
- 50
clag_id: 1
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def conv_array_to_str(_value):
if isinstance(_value, list):
return ' '.join(_value)
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = conv_array_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bondmems):
"""
goes through each bond member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bondmems, list):
for _entry in _bondmems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bondmems
def build_bond_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bond-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['slaves', 'mode', 'xmit_hash_policy',
'miimon', 'lacp_rate', 'lacp_bypass_allow',
'lacp_bypass_period', 'lacp_bypass_all_active',
'min_links']:
build_bond_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_pvid(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge'
'mstpctl_bpduguard', 'clag_id',
'lacp_bypass_priority']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
slaves=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_portnetwork=dict(type='bool', choices=BOOLEANS),
mstpctl_portadminedge=dict(type='bool', choices=BOOLEANS),
mstpctl_bpduguard=dict(type='bool', choices=BOOLEANS),
clag_id=dict(type='str'),
min_links=dict(type='int', default=1),
mode=dict(type='str', default='802.3ad'),
miimon=dict(type='int', default=100),
xmit_hash_policy=dict(type='str', default='layer3+4'),
lacp_rate=dict(type='int', default=1),
lacp_bypass_allow=dict(type='int', choices=[0, 1]),
lacp_bypass_all_active=dict(type='int', choices=[0, 1]),
lacp_bypass_priority=dict(type='list'),
lacp_bypass_period=dict(type='int'),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
mutually_exclusive=[['lacp_bypass_priority', 'lacp_bypass_all_active']],
required_together=[['virtual_ip', 'virtual_mac']]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.items():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
import re
if __name__ == '__main__':
main()
| gpl-3.0 |
dnozay/testlink-code | third_party/fckeditor/editor/filemanager/connectors/py/fckcommands.py | 48 | 6335 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
if size > 0:
size = round(size/1024)
if size < 1:
size = 1
files += """<File name="%s" size="%d" />""" % (
convertToXmlAttribute(someObject),
size
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = combinePaths(self.webUserFilesFolder, currentFolder) + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| gpl-2.0 |
JFriel/honours_project | venv/lib/python2.7/site-packages/requests/auth.py | 355 | 8175 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
| gpl-3.0 |
mropert/conan | conans/server/rest/api_v1.py | 4 | 2145 | from bottle import Bottle
from conans.server.rest.bottle_plugins.http_basic_authentication import HttpBasicAuthentication
from conans.server.rest.bottle_plugins.jwt_authentication import JWTAuthentication
from conans.server.rest.bottle_plugins.return_handler import ReturnHandlerPlugin
from conans.errors import EXCEPTION_CODE_MAPPING
from conans.server.rest.controllers.conan_controller import ConanController
from conans.server.rest.controllers.users_controller import UsersController
from conans.server.rest.controllers.file_upload_download_controller import FileUploadDownloadController
from conans.server.rest.bottle_plugins.version_checker import VersionCheckerPlugin
class ApiV1(Bottle):
def __init__(self, credentials_manager, updown_auth_manager,
server_version, min_client_compatible_version,
server_capabilities, *argc, **argv):
self.credentials_manager = credentials_manager
self.updown_auth_manager = updown_auth_manager
self.server_version = server_version
self.min_client_compatible_version = min_client_compatible_version
self.server_capabilities = server_capabilities
Bottle.__init__(self, *argc, **argv)
def setup(self):
self.install_plugins()
# Install conans controller
ConanController("/conans").attach_to(self)
# Install users controller
UsersController("/users").attach_to(self)
# Install updown controller
if self.updown_auth_manager:
FileUploadDownloadController("/files").attach_to(self)
def install_plugins(self):
# Check client version
self.install(VersionCheckerPlugin(self.server_version,
self.min_client_compatible_version,
self.server_capabilities))
# Second, check Http Basic Auth
self.install(HttpBasicAuthentication())
# Map exceptions to http return codes
self.install(ReturnHandlerPlugin(EXCEPTION_CODE_MAPPING))
# Handle jwt auth
self.install(JWTAuthentication(self.credentials_manager))
| mit |
feroda/django | tests/gis_tests/gdal_tests/test_ds.py | 21 | 11450 | import os
import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from ..test_data import TEST_DATA, TestDS, get_ds_file
if HAS_GDAL:
from django.contrib.gis.gdal import DataSource, Envelope, OGRGeometry, GDALException, OGRIndexError, GDAL_VERSION
from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString
# List of acceptable data sources.
ds_list = (
TestDS('test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile',
fields={'dbl': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.35011, 0.166623, -0.524093, 0.824508), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
field_values={
'dbl': [float(i) for i in range(1, 6)],
'int': list(range(1, 6)),
'str': [str(i) for i in range(1, 6)],
},
fids=range(5)
),
TestDS('test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype='Point25D', driver='VRT',
fields={
'POINT_X': OFTString,
'POINT_Y': OFTString,
'NUM': OFTString,
}, # VRT uses CSV, which all types are OFTString.
extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV
field_values={
'POINT_X': ['1.0', '5.0', '100.0'],
'POINT_Y': ['2.0', '23.0', '523.5'],
'NUM': ['5', '17', '23'],
},
fids=range(1, 4)
),
TestDS('test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3,
driver='ESRI Shapefile',
fields={'float': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.01513, -0.558245, 0.161876, 0.839637), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
)
)
bad_ds = (TestDS('foo'),)
@skipUnless(HAS_GDAL, "GDAL is required")
class DataSourceTest(unittest.TestCase):
def test01_valid_shp(self):
"Testing valid SHP Data Source files."
for source in ds_list:
# Loading up the data source
ds = DataSource(source.ds)
# Making sure the layer count is what's expected (only 1 layer in a SHP file)
self.assertEqual(1, len(ds))
# Making sure GetName works
self.assertEqual(source.ds, ds.name)
# Making sure the driver name matches up
self.assertEqual(source.driver, str(ds.driver))
# Making sure indexing works
try:
ds[len(ds)]
except OGRIndexError:
pass
else:
self.fail('Expected an IndexError!')
def test02_invalid_shp(self):
"Testing invalid SHP files for the Data Source."
for source in bad_ds:
self.assertRaises(GDALException, DataSource, source.ds)
def test03a_layers(self):
"Testing Data Source Layers."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer, this tests DataSource.__iter__
for layer in ds:
# Making sure we get the number of features we expect
self.assertEqual(len(layer), source.nfeat)
# Making sure we get the number of fields we expect
self.assertEqual(source.nfld, layer.num_fields)
self.assertEqual(source.nfld, len(layer.fields))
# Testing the layer's extent (an Envelope), and its properties
if source.driver == 'VRT' and (GDAL_VERSION >= (1, 7, 0) and GDAL_VERSION < (1, 7, 3)):
# There's a known GDAL regression with retrieving the extent
# of a VRT layer in versions 1.7.0-1.7.2:
# http://trac.osgeo.org/gdal/ticket/3783
pass
else:
self.assertEqual(True, isinstance(layer.extent, Envelope))
self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5)
self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5)
self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5)
self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5)
# Now checking the field names.
flds = layer.fields
for f in flds:
self.assertEqual(True, f in source.fields)
# Negative FIDs are not allowed.
self.assertRaises(OGRIndexError, layer.__getitem__, -1)
self.assertRaises(OGRIndexError, layer.__getitem__, 50000)
if hasattr(source, 'field_values'):
fld_names = source.field_values.keys()
# Testing `Layer.get_fields` (which uses Layer.__iter__)
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name))
# Testing `Layer.__getitem__`.
for i, fid in enumerate(source.fids):
feat = layer[fid]
self.assertEqual(fid, feat.fid)
# Maybe this should be in the test below, but we might as well test
# the feature values here while in this loop.
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name))
def test03b_layer_slice(self):
"Test indexing and slicing on Layers."
# Using the first data-source because the same slice
# can be used for both the layer and the control values.
source = ds_list[0]
ds = DataSource(source.ds)
sl = slice(1, 3)
feats = ds[0][sl]
for fld_name in ds[0].fields:
test_vals = [feat.get(fld_name) for feat in feats]
control_vals = source.field_values[fld_name][sl]
self.assertEqual(control_vals, test_vals)
def test03c_layer_references(self):
"""
Ensure OGR objects keep references to the objects they belong to.
"""
source = ds_list[0]
# See ticket #9448.
def get_layer():
# This DataSource object is not accessible outside this
# scope. However, a reference should still be kept alive
# on the `Layer` returned.
ds = DataSource(source.ds)
return ds[0]
# Making sure we can call OGR routines on the Layer returned.
lyr = get_layer()
self.assertEqual(source.nfeat, len(lyr))
self.assertEqual(source.gtype, lyr.geom_type.num)
# Same issue for Feature/Field objects, see #18640
self.assertEqual(str(lyr[0]['str']), "1")
def test04_features(self):
"Testing Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer
for layer in ds:
# Incrementing through each feature in the layer
for feat in layer:
# Making sure the number of fields, and the geometry type
# are what's expected.
self.assertEqual(source.nfld, len(list(feat)))
self.assertEqual(source.gtype, feat.geom_type)
# Making sure the fields match to an appropriate OFT type.
for k, v in source.fields.items():
# Making sure we get the proper OGR Field instance, using
# a string value index for the feature.
self.assertEqual(True, isinstance(feat[k], v))
# Testing Feature.__iter__
for fld in feat:
self.assertEqual(True, fld.name in source.fields.keys())
def test05_geometries(self):
"Testing Geometries from Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer and feature.
for layer in ds:
for feat in layer:
g = feat.geom
# Making sure we get the right Geometry name & type
self.assertEqual(source.geom, g.geom_name)
self.assertEqual(source.gtype, g.geom_type)
# Making sure the SpatialReference is as expected.
if hasattr(source, 'srs_wkt'):
self.assertEqual(
source.srs_wkt,
# Depending on lib versions, WGS_84 might be WGS_1984
g.srs.wkt.replace('SPHEROID["WGS_84"', 'SPHEROID["WGS_1984"')
)
def test06_spatial_filter(self):
"Testing the Layer.spatial_filter property."
ds = DataSource(get_ds_file('cities', 'shp'))
lyr = ds[0]
# When not set, it should be None.
self.assertEqual(None, lyr.spatial_filter)
# Must be set a/an OGRGeometry or 4-tuple.
self.assertRaises(TypeError, lyr._set_spatial_filter, 'foo')
# Setting the spatial filter with a tuple/list with the extent of
# a buffer centering around Pueblo.
self.assertRaises(ValueError, lyr._set_spatial_filter, list(range(5)))
filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Pueblo', feats[0].get('Name'))
# Setting the spatial filter with an OGRGeometry for buffer centering
# around Houston.
filter_geom = OGRGeometry(
'POLYGON((-96.363151 28.763374,-94.363151 28.763374,'
'-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))'
)
lyr.spatial_filter = filter_geom
self.assertEqual(filter_geom, lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Houston', feats[0].get('Name'))
# Clearing the spatial filter by setting it to None. Now
# should indicate that there are 3 features in the Layer.
lyr.spatial_filter = None
self.assertEqual(3, len(lyr))
def test07_integer_overflow(self):
"Testing that OFTReal fields, treated as OFTInteger, do not overflow."
# Using *.dbf from Census 2010 TIGER Shapefile for Texas,
# which has land area ('ALAND10') stored in a Real field
# with no precision.
ds = DataSource(os.path.join(TEST_DATA, 'texas.dbf'))
feat = ds[0][0]
# Reference value obtained using `ogrinfo`.
self.assertEqual(676586997978, feat.get('ALAND10'))
| bsd-3-clause |
persandstrom/home-assistant | homeassistant/monkey_patch.py | 1 | 2319 | """Monkey patch Python to work around issues causing segfaults.
Under heavy threading operations that schedule calls into
the asyncio event loop, Task objects are created. Due to
a bug in Python, GC may have an issue when switching between
the threads and objects with __del__ (which various components
in HASS have).
This monkey-patch removes the weakref.Weakset, and replaces it
with an object that ignores the only call utilizing it (the
Task.__init__ which calls _all_tasks.add(self)). It also removes
the __del__ which could trigger the future objects __del__ at
unpredictable times.
The side-effect of this manipulation of the Task is that
Task.all_tasks() is no longer accurate, and there will be no
warning emitted if a Task is GC'd while in use.
Related Python bugs:
- https://bugs.python.org/issue26617
"""
import sys
from typing import Any
def patch_weakref_tasks() -> None:
"""Replace weakref.WeakSet to address Python 3 bug."""
# pylint: disable=no-self-use, protected-access, bare-except
import asyncio.tasks
class IgnoreCalls:
"""Ignore add calls."""
def add(self, other: Any) -> None:
"""No-op add."""
return
asyncio.tasks.Task._all_tasks = IgnoreCalls() # type: ignore
try:
del asyncio.tasks.Task.__del__
except: # noqa: E722
pass
def disable_c_asyncio() -> None:
"""Disable using C implementation of asyncio.
Required to be able to apply the weakref monkey patch.
Requires Python 3.6+.
"""
class AsyncioImportFinder:
"""Finder that blocks C version of asyncio being loaded."""
PATH_TRIGGER = '_asyncio'
def __init__(self, path_entry: str) -> None:
if path_entry != self.PATH_TRIGGER:
raise ImportError()
def find_module(self, fullname: str, path: Any = None) -> None:
"""Find a module."""
if fullname == self.PATH_TRIGGER:
# We lint in Py35, exception is introduced in Py36
# pylint: disable=undefined-variable
raise ModuleNotFoundError() # type: ignore # noqa
sys.path_hooks.append(AsyncioImportFinder)
sys.path.insert(0, AsyncioImportFinder.PATH_TRIGGER)
try:
import _asyncio # noqa
except ImportError:
pass
| apache-2.0 |
loseblue/vim-ycm-windows-64 | python/ycm/client/base_request.py | 6 | 6840 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import requests
import urlparse
from base64 import b64decode, b64encode
from retries import retries
from requests_futures.sessions import FuturesSession
from ycm.unsafe_thread_pool_executor import UnsafeThreadPoolExecutor
from ycm import vimsupport
from ycmd import utils
from ycmd.utils import ToUtf8Json
from ycmd.responses import ServerError, UnknownExtraConf
_HEADERS = {'content-type': 'application/json'}
_EXECUTOR = UnsafeThreadPoolExecutor( max_workers = 30 )
# Setting this to None seems to screw up the Requests/urllib3 libs.
_DEFAULT_TIMEOUT_SEC = 30
_HMAC_HEADER = 'x-ycm-hmac'
class BaseRequest( object ):
def __init__( self ):
pass
def Start( self ):
pass
def Done( self ):
return True
def Response( self ):
return {}
# This method blocks
# |timeout| is num seconds to tolerate no response from server before giving
# up; see Requests docs for details (we just pass the param along).
@staticmethod
def GetDataFromHandler( handler, timeout = _DEFAULT_TIMEOUT_SEC ):
return JsonFromFuture( BaseRequest._TalkToHandlerAsync( '',
handler,
'GET',
timeout ) )
# This is the blocking version of the method. See below for async.
# |timeout| is num seconds to tolerate no response from server before giving
# up; see Requests docs for details (we just pass the param along).
@staticmethod
def PostDataToHandler( data, handler, timeout = _DEFAULT_TIMEOUT_SEC ):
return JsonFromFuture( BaseRequest.PostDataToHandlerAsync( data,
handler,
timeout ) )
# This returns a future! Use JsonFromFuture to get the value.
# |timeout| is num seconds to tolerate no response from server before giving
# up; see Requests docs for details (we just pass the param along).
@staticmethod
def PostDataToHandlerAsync( data, handler, timeout = _DEFAULT_TIMEOUT_SEC ):
return BaseRequest._TalkToHandlerAsync( data, handler, 'POST', timeout )
# This returns a future! Use JsonFromFuture to get the value.
# |method| is either 'POST' or 'GET'.
# |timeout| is num seconds to tolerate no response from server before giving
# up; see Requests docs for details (we just pass the param along).
@staticmethod
def _TalkToHandlerAsync( data,
handler,
method,
timeout = _DEFAULT_TIMEOUT_SEC ):
def SendRequest( data, handler, method, timeout ):
if method == 'POST':
sent_data = ToUtf8Json( data )
return BaseRequest.session.post(
_BuildUri( handler ),
data = sent_data,
headers = BaseRequest._ExtraHeaders( sent_data ),
timeout = timeout )
if method == 'GET':
return BaseRequest.session.get(
_BuildUri( handler ),
headers = BaseRequest._ExtraHeaders(),
timeout = timeout )
@retries( 5, delay = 0.5, backoff = 1.5 )
def DelayedSendRequest( data, handler, method ):
if method == 'POST':
sent_data = ToUtf8Json( data )
return requests.post( _BuildUri( handler ),
data = sent_data,
headers = BaseRequest._ExtraHeaders( sent_data ) )
if method == 'GET':
return requests.get( _BuildUri( handler ),
headers = BaseRequest._ExtraHeaders() )
if not _CheckServerIsHealthyWithCache():
return _EXECUTOR.submit( DelayedSendRequest, data, handler, method )
return SendRequest( data, handler, method, timeout )
@staticmethod
def _ExtraHeaders( request_body = None ):
if not request_body:
request_body = ''
headers = dict( _HEADERS )
headers[ _HMAC_HEADER ] = b64encode(
utils.CreateHexHmac( request_body, BaseRequest.hmac_secret ) )
return headers
session = FuturesSession( executor = _EXECUTOR )
server_location = ''
hmac_secret = ''
def BuildRequestData( include_buffer_data = True ):
line, column = vimsupport.CurrentLineAndColumn()
filepath = vimsupport.GetCurrentBufferFilepath()
request_data = {
'line_num': line + 1,
'column_num': column + 1,
'filepath': filepath
}
if include_buffer_data:
request_data[ 'file_data' ] = vimsupport.GetUnsavedAndCurrentBufferData()
return request_data
def JsonFromFuture( future ):
response = future.result()
_ValidateResponseObject( response )
if response.status_code == requests.codes.server_error:
_RaiseExceptionForData( response.json() )
# We let Requests handle the other status types, we only handle the 500
# error code.
response.raise_for_status()
if response.text:
return response.json()
return None
def _ValidateResponseObject( response ):
if not utils.ContentHexHmacValid(
response.content,
b64decode( response.headers[ _HMAC_HEADER ] ),
BaseRequest.hmac_secret ):
raise RuntimeError( 'Received invalid HMAC for response!' )
return True
def _BuildUri( handler ):
return urlparse.urljoin( BaseRequest.server_location, handler )
SERVER_HEALTHY = False
def _CheckServerIsHealthyWithCache():
global SERVER_HEALTHY
def _ServerIsHealthy():
response = requests.get( _BuildUri( 'healthy' ),
headers = BaseRequest._ExtraHeaders() )
_ValidateResponseObject( response )
response.raise_for_status()
return response.json()
if SERVER_HEALTHY:
return True
try:
SERVER_HEALTHY = _ServerIsHealthy()
return SERVER_HEALTHY
except:
return False
def _RaiseExceptionForData( data ):
if data[ 'exception' ][ 'TYPE' ] == UnknownExtraConf.__name__:
raise UnknownExtraConf( data[ 'exception' ][ 'extra_conf_file' ] )
raise ServerError( '{0}: {1}'.format( data[ 'exception' ][ 'TYPE' ],
data[ 'message' ] ) )
| gpl-3.0 |
jbedorf/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy_test.py | 47 | 9513 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the conversion code and for feature importances export.
Tests that cover conversion from TFBT format to a tensorflow.contrib.
decision_tree generic_tree_model format and feature importances export.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib.boosted_trees.estimator_batch import custom_export_strategy
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ConvertModelTest(test_util.TensorFlowTestCase):
def _make_trees(self):
dtec_str = """
trees {
nodes {
leaf {
vector {
value: -1
}
}
}
}
trees {
nodes {
dense_float_binary_split {
feature_column: 0
threshold: 1740.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 500
}
}
nodes {
leaf {
vector {
value: 0.6
}
}
}
nodes {
sparse_float_binary_split_default_left {
split {
feature_column: 0
threshold: 1500.0
left_id: 3
right_id: 4
}
}
node_metadata {
gain: 500
}
}
nodes {
categorical_id_binary_split {
feature_column: 0
feature_id: 5
left_id: 5
right_id: 6
}
node_metadata {
gain: 500
}
}
nodes {
leaf {
vector {
value: 0.8
}
}
}
nodes {
leaf {
vector {
value: 0.5
}
}
}
nodes {
sparse_float_binary_split_default_right {
split {
feature_column: 1
dimension_id:3
threshold: -0.4
left_id: 7
right_id: 8
}
}
node_metadata {
gain: 3600
}
}
nodes {
leaf {
vector {
value: 0.36
}
}
}
nodes {
leaf {
vector {
value: 18
}
}
}
}
tree_weights: 1.0
tree_weights: 0.1
"""
dtec = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge(dtec_str, dtec)
feature_columns = [
"feature_b",
"feature_a",
"feature_a_m",
"feature_d",
]
return dtec, feature_columns
def testConvertModel(self):
dtec, feature_columns = self._make_trees()
# Assume 2 sparse float columns, one with 1 dimension, the second one with
# 5 dimensions.
# The feature columns in the order they were added.
out = custom_export_strategy.convert_to_universal_format(
dtec, feature_columns, 1, 2, 1)
# Features a and a_m are sparse float features, a_m is multidimensional.
expected_tree = """
features { key: "feature_a_0" }
features { key: "feature_a_m_3" }
features { key: "feature_b" }
features { key: "feature_d" }
model {
ensemble {
summation_combination_technique {
}
members {
submodel {
decision_tree {
nodes {
node_id {
}
leaf {
vector {
value {
float_value: -1.0
}
}
}
}
}
}
submodel_id {
}
}
members {
submodel {
decision_tree {
nodes {
node_id {
}
binary_node {
left_child_id {
value: 1
}
right_child_id {
value: 2
}
inequality_left_child_test {
feature_id {
id {
value: "feature_b"
}
}
threshold {
float_value: 1740.0
}
}
}
}
nodes {
node_id {
value: 1
}
leaf {
vector {
value {
float_value: 0.06
}
}
}
}
nodes {
node_id {
value: 2
}
binary_node {
left_child_id {
value: 3
}
right_child_id {
value: 4
}
inequality_left_child_test {
feature_id {
id {
value: "feature_a_0"
}
}
threshold {
float_value: 1500.0
}
}
}
}
nodes {
node_id {
value: 3
}
binary_node {
left_child_id {
value: 5
}
right_child_id {
value: 6
}
default_direction: RIGHT
custom_left_child_test {
[type.googleapis.com/tensorflow.decision_trees.MatchingValuesTest] {
feature_id {
id {
value: "feature_d"
}
}
value {
int64_value: 5
}
}
}
}
}
nodes {
node_id {
value: 4
}
leaf {
vector {
value {
float_value: 0.08
}
}
}
}
nodes {
node_id {
value: 5
}
leaf {
vector {
value {
float_value: 0.05
}
}
}
}
nodes {
node_id {
value: 6
}
binary_node {
left_child_id {
value: 7
}
right_child_id {
value: 8
}
default_direction: RIGHT
inequality_left_child_test {
feature_id {
id {
value: "feature_a_m_3"
}
}
threshold {
float_value: -0.4
}
}
}
}
nodes {
node_id {
value: 7
}
leaf {
vector {
value {
float_value: 0.036
}
}
}
}
nodes {
node_id {
value: 8
}
leaf {
vector {
value {
float_value: 1.8
}
}
}
}
}
}
submodel_id {
value: 1
}
}
}
}"""
self.assertProtoEquals(expected_tree, out)
def testFeatureImportance(self):
dtec, feature_columns = self._make_trees()
feature_importances = custom_export_strategy._get_feature_importances(
dtec, feature_columns, 1, 2, 1)
self.assertItemsEqual(
["feature_b", "feature_a_0", "feature_a_m_3", "feature_d"],
feature_importances.keys())
self.assertAlmostEqual(50.0, feature_importances["feature_b"], places=4)
self.assertAlmostEqual(50.0, feature_importances["feature_a_0"], places=4)
self.assertAlmostEqual(50.0, feature_importances["feature_d"], places=4)
self.assertAlmostEqual(
360.0, feature_importances["feature_a_m_3"], places=4)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
alshedivat/tensorflow | tensorflow/python/debug/wrappers/local_cli_wrapper_test.py | 9 | 33763 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self,
command_sequence,
sess,
dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_sequence: (list of list of str) A list of command arguments,
including the command prefix, each element of the list is such as:
["run", "-n"],
["print_feed", "input:0"].
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_sequence = command_sequence
self._command_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
"print_feed_responses": [],
"profiler_py_graphs": [],
"profiler_run_metadata": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_debug_cli_for_run_end(self,
debug_dump,
tf_error,
passed_filter,
passed_filter_exclude_op_names):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self.observers["profiler_py_graphs"].append(py_graph)
self.observers["profiler_run_metadata"].append(run_metadata)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
readline_cli = ui_factory.get_ui("readline")
self._register_this_run_info(readline_cli)
while True:
command = self._command_sequence[self._command_pointer]
self._command_pointer += 1
try:
if command[0] == "run":
self._run_handler(command[1:])
elif command[0] == "print_feed":
self.observers["print_feed_responses"].append(
self._print_feed_handler(command[1:]))
else:
raise ValueError("Unrecognized command prefix: %s" % command[0])
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.VariableV1(10.0, name="v")
self.w = variables.VariableV1(21.0, name="w")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.w_int = control_flow_ops.with_dependencies(
[self.inc_v],
math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
name="w_int_outer")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sparse_ph = array_ops.sparse_placeholder(
dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config_proto = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config_proto)
# Initialize variable.
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the TensorFlow runtime errors are picked up and in this case,
# they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunsWithEmptyStringDumpRootWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root="")
# run under debug mode.
wrapped_sess.run(self.inc_v)
self.assertAllClose(11.0, self.sess.run(self.v))
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run", "-n"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunningWithSparsePlaceholderFeedWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
sparse_feed = ([[0, 1], [0, 2]], [10.0, 20.0])
sparse_result = wrapped_sess.run(
self.sparse_add, feed_dict={self.sparse_ph: sparse_feed})
self.assertAllEqual([[0, 1], [0, 2]], sparse_result.indices)
self.assertAllClose([20.0, 40.0], sparse_result.values)
def testRunsUnderNonDebugThenDebugMode(self):
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-t", "2"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testDebuggingMakeCallableTensorRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.VariableV1(42)
tensor_runner = wrapped_sess.make_callable(v)
self.sess.run(v.initializer)
self.assertAllClose(42, tensor_runner())
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testDebuggingMakeCallableTensorRunnerWithCustomRunOptionsWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
a = constant_op.constant(42)
tensor_runner = wrapped_sess.make_callable(a)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(
42, tensor_runner(options=run_options, run_metadata=run_metadata))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testDebuggingMakeCallableOperationRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.VariableV1(10.0)
inc_v = state_ops.assign_add(v, 1.0)
op_runner = wrapped_sess.make_callable(inc_v.op)
self.sess.run(v.initializer)
op_runner()
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual(11.0, self.sess.run(v))
def testDebuggingMakeCallableRunnerWithFeedListWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
ph1 = array_ops.placeholder(dtypes.float32)
ph2 = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph1, ph2)
tensor_runner = wrapped_sess.make_callable(a, feed_list=[ph1, ph2])
self.assertAllClose(42.0, tensor_runner(41.0, 1.0))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testDebuggingMakeCallableFromOptionsWithZeroFeedWorks(self):
variable_1 = variables.VariableV1(
10.5, dtype=dtypes.float32, name="variable_1")
a = math_ops.add(variable_1, variable_1, "callable_a")
math_ops.add(a, a, "callable_b")
self.sess.run(variable_1.initializer)
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]] * 3, self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.fetch.append("callable_b")
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
for _ in range(2):
callable_output = sess_callable()
self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
for debug_dump in debug_dumps:
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(
["callable_a", "callable_b", "variable_1", "variable_1/read"],
node_names)
def testDebuggingMakeCallableFromOptionsWithOneFeedWorks(self):
ph1 = array_ops.placeholder(dtypes.float32, name="callable_ph1")
a = math_ops.add(ph1, ph1, "callable_a")
math_ops.add(a, a, "callable_b")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]] * 3, self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.feed.append("callable_ph1")
callable_options.fetch.append("callable_b")
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
ph1_value = np.array([10.5, -10.5], dtype=np.float32)
for _ in range(2):
callable_output = sess_callable(ph1_value)
self.assertAllClose(
np.array([42.0, -42.0], dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
for debug_dump in debug_dumps:
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(["callable_a", "callable_b"], node_names)
def testDebuggingMakeCallableFromOptionsWithTwoFeedsWorks(self):
ph1 = array_ops.placeholder(dtypes.float32, name="callable_ph1")
ph2 = array_ops.placeholder(dtypes.float32, name="callable_ph2")
a = math_ops.add(ph1, ph2, "callable_a")
math_ops.add(a, a, "callable_b")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]] * 3, self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.feed.append("callable_ph1")
callable_options.feed.append("callable_ph2")
callable_options.fetch.append("callable_b")
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
ph1_value = np.array(5.0, dtype=np.float32)
ph2_value = np.array(16.0, dtype=np.float32)
for _ in range(2):
callable_output = sess_callable(ph1_value, ph2_value)
self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
for debug_dump in debug_dumps:
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(["callable_a", "callable_b"], node_names)
def testDebugMakeCallableFromOptionsWithCustomOptionsAndMetadataWorks(self):
variable_1 = variables.VariableV1(
10.5, dtype=dtypes.float32, name="variable_1")
a = math_ops.add(variable_1, variable_1, "callable_a")
math_ops.add(a, a, "callable_b")
self.sess.run(variable_1.initializer)
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
callable_options = config_pb2.CallableOptions()
callable_options.fetch.append("callable_b")
callable_options.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
sess_callable = wrapped_sess._make_callable_from_options(callable_options)
run_metadata = config_pb2.RunMetadata()
# Call the callable with a custom run_metadata.
callable_output = sess_callable(run_metadata=run_metadata)
# Verify that step_stats is populated in the custom run_metadata.
self.assertTrue(run_metadata.step_stats)
self.assertAllClose(np.array(42.0, dtype=np.float32), callable_output[0])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(1, len(debug_dumps))
debug_dump = debug_dumps[0]
node_names = [datum.node_name for datum in debug_dump.dumped_tensor_data]
self.assertItemsEqual(
["callable_a", "callable_b", "variable_1", "variable_1/read"],
node_names)
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRuntimeErrorBeforeGraphExecutionIsRaised(self):
# Use an impossible device name to cause an error before graph execution.
with ops.device("/device:GPU:1337"):
w = variables.VariableV1([1.0] * 10, name="w")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root=self._tmp_dir)
with self.assertRaisesRegexp(errors.OpError, r".*[Dd]evice.*1337.*"):
wrapped_sess.run(w)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "v_greater_than_twelve"],
["run", "-f", "v_greater_than_twelve"],
["run"]],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
# Verify that adding the same tensor filter more than once is tolerated
# (i.e., as if it were added only once).
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunTillFilterPassesWithExcludeOpNames(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "greater_than_twelve",
"--filter_exclude_node_names", "inc_v.*"],
["run"], ["run"]],
self.sess,
dump_root=self._tmp_dir)
def greater_than_twelve(datum, tensor):
del datum # Unused.
return tensor > 12.0
# Verify that adding the same tensor filter more than once is tolerated
# (i.e., as if it were added only once).
wrapped_sess.add_tensor_filter("greater_than_twelve", greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# Due to the --filter_exclude_op_names flag, the run-end CLI should show up
# not after run 3, but after run 4.
self.assertEqual([4], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunTillFilterPassesWorksInConjunctionWithOtherNodeNameFilter(self):
"""Test that --.*_filter flags work in conjunction with -f.
In other words, test that you can use a tensor filter on a subset of
the tensors.
"""
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "v_greater_than_twelve", "--node_name_filter", "v$"],
["run", "-f", "v_greater_than_twelve", "--node_name_filter", "v$"],
["run"]],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
self.assertEqual(1, len(debug_dumps[0].dumped_tensor_data))
self.assertEqual("v:0", debug_dumps[0].dumped_tensor_data[0].tensor_name)
self.assertEqual(1, len(debug_dumps[1].dumped_tensor_data))
self.assertEqual("v:0", debug_dumps[1].dumped_tensor_data[0].tensor_name)
def testRunsUnderDebugModeWithWatchFnFilteringNodeNames(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "inc.*"],
["run", "--node_name_filter", "delta"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringOpTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "delta"],
["run", "--op_type_filter", "AssignAdd"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Variable.*"],
["run", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(2, dumps.size)
self.assertItemsEqual(
["v", "w"], [dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(2, dumps.size)
self.assertEqual(
["w_int_inner", "w_int_outer"],
[dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
def testRunsUnderDebugModeWithWatchFnFilteringOpTypesAndTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Cast", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("w_int_inner", dumps.dumped_tensor_data[0].node_name)
def testPrintFeedPrintsFeedValueForTensorFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsFeedValueForTensorNameFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsErrorForInvalidFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run does not contain the key "
"spam"], print_feed_responses[0].lines)
def testPrintFeedPrintsErrorWhenFeedDictIsNone(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run is None or empty."],
print_feed_responses[0].lines)
def testRunUnderProfilerModeWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-p"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
self.assertEqual(1, len(wrapped_sess.observers["profiler_run_metadata"]))
self.assertTrue(
wrapped_sess.observers["profiler_run_metadata"][0].step_stats)
self.assertEqual(1, len(wrapped_sess.observers["profiler_py_graphs"]))
self.assertIsInstance(
wrapped_sess.observers["profiler_py_graphs"][0], ops.Graph)
def testCallingHookDelBeforeAnyRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
del wrapped_sess
def testCallingShouldStopMethodOnNonWrappedNonMonitoredSessionErrors(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
with self.assertRaisesRegexp(
ValueError,
r"The wrapped session .* does not have a method .*should_stop.*"):
wrapped_sess.should_stop()
def testLocalCLIDebugWrapperSessionWorksOnMonitoredSession(self):
monitored_sess = monitored_session.MonitoredSession()
wrapped_monitored_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], monitored_sess)
self.assertFalse(wrapped_monitored_sess.should_stop())
def testRunsWithEmptyFetchWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root="")
run_output = wrapped_sess.run([])
self.assertEqual([], run_output)
def testRunsWithEmptyNestedFetchWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root="")
run_output = wrapped_sess.run({"foo": {"baz": []}, "bar": ()})
self.assertEqual({"foo": {"baz": []}, "bar": ()}, run_output)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
kartikp1995/gnuradio | gr-wxgui/python/wxgui/plotter/gltext.py | 37 | 16891 | #!/usr/bin/env python
# -*- coding: utf-8
#
# Provides some text display functions for wx + ogl
# Copyright (C) 2007 Christian Brugger, Stefan Hacker
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import wx
from OpenGL.GL import *
"""
Optimize with psyco if possible, this gains us about 50% speed when
creating our textures in trade for about 4MBytes of additional memory usage for
psyco. If you don't like loosing the memory you have to turn the lines following
"enable psyco" into a comment while uncommenting the line after "Disable psyco".
"""
#Try to enable psyco
try:
import psyco
psyco_optimized = False
except ImportError:
psyco = None
#Disable psyco
#psyco = None
class TextElement(object):
"""
A simple class for using system Fonts to display
text in an OpenGL scene
"""
def __init__(self,
text = '',
font = None,
foreground = wx.BLACK,
centered = False):
"""
text (String) - Text
font (wx.Font) - Font to draw with (None = System default)
foreground (wx.Color) - Color of the text
or (wx.Bitmap)- Bitmap to overlay the text with
centered (bool) - Center the text
Initializes the TextElement
"""
# save given variables
self._text = text
self._lines = text.split('\n')
self._font = font
self._foreground = foreground
self._centered = centered
# init own variables
self._owner_cnt = 0 #refcounter
self._texture = None #OpenGL texture ID
self._text_size = None #x/y size tuple of the text
self._texture_size= None #x/y Texture size tuple
# create Texture
self.createTexture()
#---Internal helpers
def _getUpper2Base(self, value):
"""
Returns the lowest value with the power of
2 greater than 'value' (2^n>value)
"""
base2 = 1
while base2 < value:
base2 *= 2
return base2
#---Functions
def draw_text(self, position = wx.Point(0,0), scale = 1.0, rotation = 0):
"""
position (wx.Point) - x/y Position to draw in scene
scale (float) - Scale
rotation (int) - Rotation in degree
Draws the text to the scene
"""
#Enable necessary functions
glColor(1,1,1,1)
glEnable(GL_TEXTURE_2D)
glEnable(GL_ALPHA_TEST) #Enable alpha test
glAlphaFunc(GL_GREATER, 0)
glEnable(GL_BLEND) #Enable blending
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#Bind texture
glBindTexture(GL_TEXTURE_2D, self._texture)
ow, oh = self._text_size
w , h = self._texture_size
#Perform transformations
glPushMatrix()
glTranslated(position.x, position.y, 0)
glRotate(-rotation, 0, 0, 1)
glScaled(scale, scale, scale)
if self._centered:
glTranslate(-w/2, -oh/2, 0)
#Draw vertices
glBegin(GL_QUADS)
glTexCoord2f(0,0); glVertex2f(0,0)
glTexCoord2f(0,1); glVertex2f(0,h)
glTexCoord2f(1,1); glVertex2f(w,h)
glTexCoord2f(1,0); glVertex2f(w,0)
glEnd()
glPopMatrix()
#Disable features
glDisable(GL_BLEND)
glDisable(GL_ALPHA_TEST)
glDisable(GL_TEXTURE_2D)
def createTexture(self):
"""
Creates a texture from the settings saved in TextElement, to be able to use normal
system fonts conviently a wx.MemoryDC is used to draw on a wx.Bitmap. As wxwidgets
device contexts don't support alpha at all it is necessary to apply a little hack
to preserve antialiasing without sticking to a fixed background color:
We draw the bmp in b/w mode so we can use its data as a alpha channel for a solid
color bitmap which after GL_ALPHA_TEST and GL_BLEND will show a nicely antialiased
text on any surface.
To access the raw pixel data the bmp gets converted to a wx.Image. Now we just have
to merge our foreground color with the alpha data we just created and push it all
into a OpenGL texture and we are DONE *inhalesdelpy*
DRAWBACK of the whole conversion thing is a really long time for creating the
texture. If you see any optimizations that could save time PLEASE CREATE A PATCH!!!
"""
# get a memory dc
dc = wx.MemoryDC()
# Select an empty bitmap into the MemoryDC - otherwise the call to
# GetMultiLineTextExtent() may fail below
dc.SelectObject(wx.EmptyBitmap(1,1))
# set our font
dc.SetFont(self._font)
# Approximate extend to next power of 2 and create our bitmap
# REMARK: You wouldn't believe how much fucking speed this little
# sucker gains compared to sizes not of the power of 2. It's like
# 500ms --> 0.5ms (on my ATI-GPU powered Notebook). On Sams nvidia
# machine there don't seem to occur any losses...bad drivers?
ow, oh = dc.GetMultiLineTextExtent(self._text)[:2]
w, h = self._getUpper2Base(ow), self._getUpper2Base(oh)
self._text_size = wx.Size(ow,oh)
self._texture_size = wx.Size(w,h)
bmp = wx.EmptyBitmap(w,h)
#Draw in b/w mode to bmp so we can use it as alpha channel
dc.SelectObject(bmp)
dc.SetBackground(wx.BLACK_BRUSH)
dc.Clear()
dc.SetTextForeground(wx.WHITE)
x,y = 0,0
centered = self.centered
for line in self._lines:
if not line: line = ' '
tw, th = dc.GetTextExtent(line)
if centered:
x = int(round((w-tw)/2))
dc.DrawText(line, x, y)
x = 0
y += th
#Release the dc
dc.SelectObject(wx.NullBitmap)
del dc
#Generate a correct RGBA data string from our bmp
"""
NOTE: You could also use wx.AlphaPixelData to access the pixel data
in 'bmp' directly, but the iterator given by it is much slower than
first converting to an image and using wx.Image.GetData().
"""
img = wx.ImageFromBitmap(bmp)
alpha = img.GetData()
if isinstance(self._foreground, wx.Colour):
"""
If we have a static color...
"""
r,g,b = self._foreground.Get()
color = "%c%c%c" % (chr(r), chr(g), chr(b))
data = ''
for i in xrange(0, len(alpha)-1, 3):
data += color + alpha[i]
elif isinstance(self._foreground, wx.Bitmap):
"""
If we have a bitmap...
"""
bg_img = wx.ImageFromBitmap(self._foreground)
bg = bg_img.GetData()
bg_width = self._foreground.GetWidth()
bg_height = self._foreground.GetHeight()
data = ''
for y in xrange(0, h):
for x in xrange(0, w):
if (y > (bg_height-1)) or (x > (bg_width-1)):
color = "%c%c%c" % (chr(0),chr(0),chr(0))
else:
pos = (x+y*bg_width) * 3
color = bg[pos:pos+3]
data += color + alpha[(x+y*w)*3]
# now convert it to ogl texture
self._texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self._texture)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0)
glPixelStorei(GL_UNPACK_ALIGNMENT, 2)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data)
def deleteTexture(self):
"""
Deletes the OpenGL texture object
"""
if self._texture:
if glIsTexture(self._texture):
glDeleteTextures(self._texture)
else:
self._texture = None
def bind(self):
"""
Increase refcount
"""
self._owner_cnt += 1
def release(self):
"""
Decrease refcount
"""
self._owner_cnt -= 1
def isBound(self):
"""
Return refcount
"""
return self._owner_cnt
def __del__(self):
"""
Destructor
"""
self.deleteTexture()
#---Getters/Setters
def getText(self): return self._text
def getFont(self): return self._font
def getForeground(self): return self._foreground
def getCentered(self): return self._centered
def getTexture(self): return self._texture
def getTexture_size(self): return self._texture_size
def getOwner_cnt(self): return self._owner_cnt
def setOwner_cnt(self, value):
self._owner_cnt = value
#---Properties
text = property(getText, None, None, "Text of the object")
font = property(getFont, None, None, "Font of the object")
foreground = property(getForeground, None, None, "Color of the text")
centered = property(getCentered, None, None, "Is text centered")
owner_cnt = property(getOwner_cnt, setOwner_cnt, None, "Owner count")
texture = property(getTexture, None, None, "Used texture")
texture_size = property(getTexture_size, None, None, "Size of the used texture")
class Text(object):
"""
A simple class for using System Fonts to display text in
an OpenGL scene. The Text adds a global Cache of already
created text elements to TextElement's base functionality
so you can save some memory and increase speed
"""
_texts = [] #Global cache for TextElements
def __init__(self,
text = 'Text',
font = None,
font_size = 8,
foreground = wx.BLACK,
centered = False,
bold = False):
"""
text (string) - displayed text
font (wx.Font) - if None, system default font will be used with font_size
font_size (int) - font size in points
foreground (wx.Color) - Color of the text
or (wx.Bitmap) - Bitmap to overlay the text with
centered (bool) - should the text drawn centered towards position?
Initializes the text object
"""
#Init/save variables
self._aloc_text = None
self._text = text
self._font_size = font_size
self._foreground= foreground
self._centered = centered
#Check if we are offered a font
if not font:
#if not use the system default
self._font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
else:
#save it
self._font = font
if bold: self._font.SetWeight(wx.FONTWEIGHT_BOLD)
#Bind us to our texture
self._initText()
#---Internal helpers
def _initText(self):
"""
Initializes/Reinitializes the Text object by binding it
to a TextElement suitable for its current settings
"""
#Check if we already bound to a texture
if self._aloc_text:
#if so release it
self._aloc_text.release()
if not self._aloc_text.isBound():
self._texts.remove(self._aloc_text)
self._aloc_text = None
#Adjust our font
self._font.SetPointSize(self._font_size)
#Search for existing element in our global buffer
for element in self._texts:
if element.text == self._text and\
element.font == self._font and\
element.foreground == self._foreground and\
element.centered == self._centered:
# We already exist in global buffer ;-)
element.bind()
self._aloc_text = element
break
if not self._aloc_text:
# We are not in the global buffer, let's create ourselves
aloc_text = self._aloc_text = TextElement(self._text,
self._font,
self._foreground,
self._centered)
aloc_text.bind()
self._texts.append(aloc_text)
def __del__(self):
"""
Destructor
"""
aloc_text = self._aloc_text
aloc_text.release()
if not aloc_text.isBound():
self._texts.remove(aloc_text)
#---Functions
def draw_text(self, position = wx.Point(0,0), scale = 1.0, rotation = 0):
"""
position (wx.Point) - x/y Position to draw in scene
scale (float) - Scale
rotation (int) - Rotation in degree
Draws the text to the scene
"""
self._aloc_text.draw_text(position, scale, rotation)
#---Setter/Getter
def getText(self): return self._text
def setText(self, value, reinit = True):
"""
value (bool) - New Text
reinit (bool) - Create a new texture
Sets a new text
"""
self._text = value
if reinit:
self._initText()
def getFont(self): return self._font
def setFont(self, value, reinit = True):
"""
value (bool) - New Font
reinit (bool) - Create a new texture
Sets a new font
"""
self._font = value
if reinit:
self._initText()
def getFont_size(self): return self._font_size
def setFont_size(self, value, reinit = True):
"""
value (bool) - New font size
reinit (bool) - Create a new texture
Sets a new font size
"""
self._font_size = value
if reinit:
self._initText()
def getForeground(self): return self._foreground
def setForeground(self, value, reinit = True):
"""
value (bool) - New centered value
reinit (bool) - Create a new texture
Sets a new value for 'centered'
"""
self._foreground = value
if reinit:
self._initText()
def getCentered(self): return self._centered
def setCentered(self, value, reinit = True):
"""
value (bool) - New centered value
reinit (bool) - Create a new texture
Sets a new value for 'centered'
"""
self._centered = value
if reinit:
self._initText()
def get_size(self):
"""
Returns a text size tuple
"""
return self._aloc_text._text_size
def getTexture_size(self):
"""
Returns a texture size tuple
"""
return self._aloc_text.texture_size
def getTextElement(self):
"""
Returns the text element bound to the Text class
"""
return self._aloc_text
def getTexture(self):
"""
Returns the texture of the bound TextElement
"""
return self._aloc_text.texture
#---Properties
text = property(getText, setText, None, "Text of the object")
font = property(getFont, setFont, None, "Font of the object")
font_size = property(getFont_size, setFont_size, None, "Font size")
foreground = property(getForeground, setForeground, None, "Color/Overlay bitmap of the text")
centered = property(getCentered, setCentered, None, "Display the text centered")
texture_size = property(getTexture_size, None, None, "Size of the used texture")
texture = property(getTexture, None, None, "Texture of bound TextElement")
text_element = property(getTextElement,None , None, "TextElement bound to this class")
#Optimize critical functions
if psyco and not psyco_optimized:
psyco.bind(TextElement.createTexture)
psyco_optimized = True
| gpl-3.0 |
saratang/servo | tests/wpt/web-platform-tests/tools/manifest/update.py | 230 | 3336 | #!/usr/bin/env python
import argparse
import imp
import os
import sys
import manifest
import vcs
from log import get_logger
from tree import GitTree, NoVCSTree
here = os.path.dirname(__file__)
localpaths = imp.load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, "localpaths.py")))
def update(tests_root, url_base, manifest, ignore_local=False):
if vcs.is_git_repo(tests_root):
tests_tree = GitTree(tests_root, url_base)
remove_missing_local = False
else:
tests_tree = NoVCSTree(tests_root, url_base)
remove_missing_local = not ignore_local
if not ignore_local:
local_changes = tests_tree.local_changes()
else:
local_changes = None
manifest.update(tests_root,
url_base,
tests_tree.current_rev(),
tests_tree.committed_changes(manifest.rev),
local_changes,
remove_missing_local=remove_missing_local)
def update_from_cli(**kwargs):
tests_root = kwargs["tests_root"]
path = kwargs["path"]
assert tests_root is not None
m = None
logger = get_logger()
if not kwargs.get("rebuild", False):
try:
m = manifest.load(tests_root, path)
except manifest.ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
m = None
else:
logger.info("Updating manifest")
if m is None:
m = manifest.Manifest(None)
update(tests_root,
kwargs["url_base"],
m,
ignore_local=kwargs.get("ignore_local", False))
manifest.write(m, path)
def abs_path(path):
return os.path.abspath(os.path.expanduser(path))
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--path", type=abs_path, help="Path to manifest file.")
parser.add_argument(
"--tests-root", type=abs_path, help="Path to root of tests.")
parser.add_argument(
"-r", "--rebuild", action="store_true", default=False,
help="Force a full rebuild of the manifest.")
parser.add_argument(
"--ignore-local", action="store_true", default=False,
help="Don't include uncommitted local changes in the manifest.")
parser.add_argument(
"--url-base", action="store", default="/",
help="Base url to use as the mount point for tests in this manifest.")
return parser
def find_top_repo():
path = here
rv = None
while path != "/":
if vcs.is_git_repo(path):
rv = path
path = os.path.abspath(os.path.join(path, os.pardir))
return rv
def main(default_tests_root=None):
opts = create_parser().parse_args()
if opts.tests_root is None:
tests_root = None
if default_tests_root is not None:
tests_root = default_tests_root
else:
tests_root = find_top_repo()
if tests_root is None:
print >> sys.stderr, """No git repo found; could not determine test root.
Run again with --test-root"""
sys.exit(1)
opts.tests_root = tests_root
if opts.path is None:
opts.path = os.path.join(opts.tests_root, "MANIFEST.json")
update_from_cli(**vars(opts))
if __name__ == "__main__":
main()
| mpl-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/PIL/FpxImagePlugin.py | 7 | 6323 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library.
# $Id$
#
# FlashPix support for PIL
#
# History:
# 97-01-25 fl Created (reads uncompressed RGB images only)
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile
from PIL.OleFileIO import i8, i32, MAGIC, OleFileIO
__version__ = "0.1"
# we map from colour field tuples to (mode, rawmode) descriptors
MODES = {
# opacity
(0x00007ffe): ("A", "L"),
# monochrome
(0x00010000,): ("L", "L"),
(0x00018000, 0x00017ffe): ("RGBA", "LA"),
# photo YCC
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
(0x00028000, 0x00028001, 0x00028002, 0x00027ffe): ("RGBA", "YCCA;P"),
# standard RGB (NIFRGB)
(0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
(0x00038000, 0x00038001, 0x00038002, 0x00037ffe): ("RGBA", "RGBA"),
}
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for the FlashPix images.
class FpxImageFile(ImageFile.ImageFile):
format = "FPX"
format_description = "FlashPix"
def _open(self):
#
# read the OLE directory and see if this is a likely
# to be a FlashPix file
try:
self.ole = OleFileIO(self.fp)
except IOError:
raise SyntaxError("not an FPX file; invalid OLE file")
if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
raise SyntaxError("not an FPX file; bad root CLSID")
self._open_index(1)
def _open_index(self, index=1):
#
# get the Image Contents Property Set
prop = self.ole.getproperties([
"Data Object Store %06d" % index,
"\005Image Contents"
])
# size (highest resolution)
self.size = prop[0x1000002], prop[0x1000003]
size = max(self.size)
i = 1
while size > 64:
size = size / 2
i += 1
self.maxid = i - 1
# mode. instead of using a single field for this, flashpix
# requires you to specify the mode for each channel in each
# resolution subimage, and leaves it to the decoder to make
# sure that they all match. for now, we'll cheat and assume
# that this is always the case.
id = self.maxid << 16
s = prop[0x2000002 | id]
colors = []
for i in range(i32(s, 4)):
# note: for now, we ignore the "uncalibrated" flag
colors.append(i32(s, 8+i*4) & 0x7fffffff)
self.mode, self.rawmode = MODES[tuple(colors)]
# load JPEG tables, if any
self.jpeg = {}
for i in range(256):
id = 0x3000001 | (i << 16)
if id in prop:
self.jpeg[i] = prop[id]
# print len(self.jpeg), "tables loaded"
self._open_subimage(1, self.maxid)
def _open_subimage(self, index=1, subimage=0):
#
# setup tile descriptors for a given subimage
stream = [
"Data Object Store %06d" % index,
"Resolution %04d" % subimage,
"Subimage 0000 Header"
]
fp = self.ole.openstream(stream)
# skip prefix
fp.read(28)
# header stream
s = fp.read(36)
size = i32(s, 4), i32(s, 8)
# tilecount = i32(s, 12)
tilesize = i32(s, 16), i32(s, 20)
# channels = i32(s, 24)
offset = i32(s, 28)
length = i32(s, 32)
# print size, self.mode, self.rawmode
if size != self.size:
raise IOError("subimage mismatch")
# get tile descriptors
fp.seek(28 + offset)
s = fp.read(i32(s, 12) * length)
x = y = 0
xsize, ysize = size
xtile, ytile = tilesize
self.tile = []
for i in range(0, len(s), length):
compression = i32(s, i+8)
if compression == 0:
self.tile.append(("raw", (x, y, x+xtile, y+ytile),
i32(s, i) + 28, (self.rawmode)))
elif compression == 1:
# FIXME: the fill decoder is not implemented
self.tile.append(("fill", (x, y, x+xtile, y+ytile),
i32(s, i) + 28, (self.rawmode, s[12:16])))
elif compression == 2:
internal_color_conversion = i8(s[14])
jpeg_tables = i8(s[15])
rawmode = self.rawmode
if internal_color_conversion:
# The image is stored as usual (usually YCbCr).
if rawmode == "RGBA":
# For "RGBA", data is stored as YCbCrA based on
# negative RGB. The following trick works around
# this problem :
jpegmode, rawmode = "YCbCrK", "CMYK"
else:
jpegmode = None # let the decoder decide
else:
# The image is stored as defined by rawmode
jpegmode = rawmode
self.tile.append(("jpeg", (x, y, x+xtile, y+ytile),
i32(s, i) + 28, (rawmode, jpegmode)))
# FIXME: jpeg tables are tile dependent; the prefix
# data must be placed in the tile descriptor itself!
if jpeg_tables:
self.tile_prefix = self.jpeg[jpeg_tables]
else:
raise IOError("unknown/invalid compression")
x = x + xtile
if x >= xsize:
x, y = 0, y + ytile
if y >= ysize:
break # isn't really required
self.stream = stream
self.fp = None
def load(self):
if not self.fp:
self.fp = self.ole.openstream(self.stream[:2] +
["Subimage 0000 Data"])
return ImageFile.ImageFile.load(self)
#
# --------------------------------------------------------------------
Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
Image.register_extension(FpxImageFile.format, ".fpx")
| mit |
migueldiascosta/pymatgen | pymatgen/transformations/tests/test_defect_transformations.py | 2 | 4980 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Unit tests for defect transformations
"""
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2014, The Materials Project"
__version__ = "0.1"
__maintainier__ = "Bharat Medasani"
__email__ = "[email protected]"
__date__ = "Jul 1 2014"
import unittest
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.transformations.defect_transformations import \
VacancyTransformation, SubstitutionDefectTransformation, \
AntisiteDefectTransformation, InterstitialTransformation
try:
import zeo
except ImportError:
zeo = None
class VacancyTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
t = VacancyTransformation([2,2,2],species="Li")
structures = t.apply_transformation(struct,return_ranked_list=100)
self.assertEqual(len(structures),1)
for vac_struct in structures:
self.assertIn(vac_struct['structure'].composition.formula,
[ "Li15 O16"])
t = VacancyTransformation([2,2,2],species="O")
structures = t.apply_transformation(struct,return_ranked_list=100)
self.assertEqual(len(structures),1)
for vac_struct in structures:
self.assertIn(vac_struct['structure'].composition.formula,
[ "Li16 O15"])
t = VacancyTransformation([2,2,2])
structures = t.apply_transformation(struct,return_ranked_list=1)
self.assertEqual(len(structures),1)
for vac_struct in structures:
self.assertIn(vac_struct['structure'].composition.formula,
[ "Li16 O15","Li15 O16"])
class SubstitutionDefectTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = SubstitutionDefectTransformation({"Li":"Na","O":"S"},[2,2,2])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
scs = t.apply_transformation(struct,return_ranked_list=100)
self.assertEqual(len(scs),2)
for sc in scs:
self.assertIn(sc['structure'].composition.formula,
["Li16 O16", "Na1 Li15 O16", "Li16 S1 O15"])
class AntisiteDefectTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = AntisiteDefectTransformation([2,2,2])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
scs = t.apply_transformation(struct,return_ranked_list=100)
self.assertEqual(len(scs),2)
for sc in scs:
self.assertIn(sc['structure'].composition.formula,
["Li16 O16", "Li15 O17", "Li17 O15"])
@unittest.skipIf(not zeo, "zeo not present.")
class InterstitialTransformationTest(unittest.TestCase):
def test_apply_transformation(self):
t = InterstitialTransformation("Na+",[2,2,2])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.75, 0.75])
coords.append([0.5, 0.5, 0.5])
coords.append([0.25, 0.25, 0.25])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Li+", "Li+", "O2-", "O2-"], coords)
scs = t.apply_transformation(struct,return_ranked_list=100000)
#self.assertEqual(len(scs),3)
for sc in scs:
#print sc.composition.formula
self.assertIn(sc['structure'].composition.formula,
["Li16 O16", "Na1 Li16 O16", "Li16 Na1 O16"])
if __name__ == '__main__':
unittest.main()
| mit |
SteveDiamond/cvxpy | cvxpy/atoms/norm1.py | 2 | 3043 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import scipy.sparse as sp
from cvxpy.atoms.axis_atom import AxisAtom
class norm1(AxisAtom):
_allow_complex = True
def numeric(self, values):
"""Returns the one norm of x.
"""
if self.axis is None:
values = np.array(values[0]).flatten()
else:
values = np.array(values[0])
return np.linalg.norm(values, 1, axis=self.axis, keepdims=self.keepdims)
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
# Always positive.
return (True, False)
def is_atom_convex(self):
"""Is the atom convex?
"""
return True
def is_atom_concave(self):
"""Is the atom concave?
"""
return False
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return self.args[0].is_nonneg()
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return self.args[0].is_nonpos()
def is_pwl(self):
"""Is the atom piecewise linear?
"""
return self.args[0].is_pwl() and \
(self.args[0].is_real() or self.args[0].is_imag())
def get_data(self):
return [self.axis]
def name(self):
return "%s(%s)" % (self.__class__.__name__,
self.args[0].name())
def _domain(self):
"""Returns constraints describing the domain of the node.
"""
return []
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
return self._axis_grad(values)
def _column_grad(self, value):
"""Gives the (sub/super)gradient of the atom w.r.t. a column argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
value: A numeric value for a column.
Returns:
A NumPy ndarray matrix or None.
"""
rows = self.args[0].size
D_null = sp.csc_matrix((rows, 1), dtype='float64')
D_null += (value > 0)
D_null -= (value < 0)
return sp.csc_matrix(D_null.A.ravel(order='F')).T
| gpl-3.0 |
incaser/odoo-odoo | addons/product_extended/wizard/wizard_price.py | 270 | 3043 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP S.A. (<http://www.openerp.com>).
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import except_orm
from openerp.osv import fields, osv
from openerp.tools.translate import _
class wizard_price(osv.osv):
_name = "wizard.price"
_description = "Compute price wizard"
_columns = {
'info_field': fields.text('Info', readonly=True),
'real_time_accounting': fields.boolean("Generate accounting entries when real-time"),
'recursive': fields.boolean("Change prices of child BoMs too"),
}
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_price, self).default_get(cr, uid, fields, context=context)
product_pool = self.pool.get('product.template')
product_obj = product_pool.browse(cr, uid, context.get('active_id', False))
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
res['info_field'] = str(product_pool.compute_price(cr, uid, [], template_ids=[product_obj.id], test=True, context=context))
return res
def compute_from_bom(self, cr, uid, ids, context=None):
assert len(ids) == 1
if context is None:
context = {}
model = context.get('active_model')
if model != 'product.template':
raise except_orm(_('Wrong model!'), _('This wizard is build for product templates, while you are currently running it from a product variant.'))
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
prod_obj = self.pool.get('product.template')
res = self.browse(cr, uid, ids, context=context)
prod = prod_obj.browse(cr, uid, rec_id, context=context)
prod_obj.compute_price(cr, uid, [], template_ids=[prod.id], real_time_accounting=res[0].real_time_accounting, recursive=res[0].recursive, test=False, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
blueboxgroup/horizon | horizon/utils/units.py | 71 | 4365 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import decimal
import pint
from horizon.utils import functions
# Mapping of units from Ceilometer to Pint
INFORMATION_UNITS = (
('B', 'byte'),
('KB', 'Kibyte'),
('MB', 'Mibyte'),
('GB', 'Gibyte'),
('TB', 'Tibyte'),
('PB', 'Pibyte'),
('EB', 'Eibyte'),
)
TIME_UNITS = ('ns', 's', 'min', 'hr', 'day', 'week', 'month', 'year')
ureg = pint.UnitRegistry()
def is_supported(unit):
"""Returns a bool indicating whether the unit specified is supported by
this module.
"""
return unit in functions.get_keys(INFORMATION_UNITS) + TIME_UNITS
def is_larger(unit_1, unit_2):
"""Returns a boolean indicating whether unit_1 is larger than unit_2.
E.g:
>>> is_larger('KB', 'B')
True
>>> is_larger('min', 'day')
False
"""
unit_1 = functions.value_for_key(INFORMATION_UNITS, unit_1)
unit_2 = functions.value_for_key(INFORMATION_UNITS, unit_2)
return ureg.parse_expression(unit_1) > ureg.parse_expression(unit_2)
def convert(value, source_unit, target_unit, fmt=False):
"""Converts value from source_unit to target_unit. Returns a tuple
containing the converted value and target_unit. Having fmt set to True
causes the value to be formatted to 1 decimal digit if it's a decimal or
be formatted as integer if it's an integer.
E.g:
>>> convert(2, 'hr', 'min')
(120.0, 'min')
>>> convert(2, 'hr', 'min', fmt=True)
(120, 'min')
>>> convert(30, 'min', 'hr', fmt=True)
(0.5, 'hr')
"""
orig_target_unit = target_unit
source_unit = functions.value_for_key(INFORMATION_UNITS, source_unit)
target_unit = functions.value_for_key(INFORMATION_UNITS, target_unit)
q = ureg.Quantity(value, source_unit)
q = q.to(ureg.parse_expression(target_unit))
value = functions.format_value(q.magnitude) if fmt else q.magnitude
return value, orig_target_unit
def normalize(value, unit):
"""Converts the value so that it belongs to some expected range.
Returns the new value and new unit.
E.g:
>>> normalize(1024, 'KB')
(1, 'MB')
>>> normalize(90, 'min')
(1.5, 'hr')
>>> normalize(1.0, 'object')
(1, 'object')
"""
if value < 0:
raise ValueError('Negative value: %s %s.' % (value, unit))
if unit in functions.get_keys(INFORMATION_UNITS):
return _normalize_information(value, unit)
elif unit in TIME_UNITS:
return _normalize_time(value, unit)
else:
# Unknown unit, just return it
return functions.format_value(value), unit
def _normalize_information(value, unit):
value = decimal.Decimal(str(value))
while value < 1:
prev_unit = functions.previous_key(INFORMATION_UNITS, unit)
if prev_unit is None:
break
value, unit = convert(value, unit, prev_unit)
while value >= 1024:
next_unit = functions.next_key(INFORMATION_UNITS, unit)
if next_unit is None:
break
value, unit = convert(value, unit, next_unit)
return functions.format_value(value), unit
def _normalize_time(value, unit):
# Normalize time by converting to next higher unit when value is
# at least 2 units
value, unit = convert(value, unit, 's')
if value >= 120:
value, unit = convert(value, 's', 'min')
if value >= 120:
value, unit = convert(value, 'min', 'hr')
if value >= 48:
value, unit = convert(value, 'hr', 'day')
if value >= 730:
value, unit = convert(value, 'day', 'year')
elif value >= 62:
value, unit = convert(value, 'day', 'month')
elif value >= 14:
value, unit = convert(value, 'day', 'week')
return functions.format_value(value), unit
| apache-2.0 |
apixandru/intellij-community | python/helpers/py3only/docutils/writers/odf_odt/pygmentsformatter.py | 244 | 4671 | # $Id: pygmentsformatter.py 5853 2009-01-19 21:02:02Z dkuhlman $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
| apache-2.0 |
pranjulkansal/wordbase-solver-master | find.py | 3 | 2554 | #!/usr/bin/env python
import sys
import sqlite3 as db
import codecs
from copy import deepcopy
sy,sx,ty,tx=0,0,0,0
nope, lang, infile = sys.argv[0:3]
if len(sys.argv)>3:
sy, sx = [int(x) for x in sys.argv[3:5]]
if len(sys.argv)>5:
ty, tx = [int(x) for x in sys.argv[5:7]]
conn = db.connect("%s.sqlite" % lang)
c = conn.cursor()
letters = [line.strip() for line in codecs.open(infile,'r','utf-8').read().strip().upper().split("\n")]
sizex = len(letters[0])
sizey = len(letters)
print sizex,sizey
used = [[False for x in xrange(sizex)] for y in xrange(sizey)]
reachable = [[False for x in xrange(sizex)] for y in xrange(sizey)]
longest = (0,[""])
deepest = (0,[""])
def continuable(root):
c.execute('select * from bits where bit = "%s" limit 1' % root)
return c.fetchone() and True or False
def exists(root):
c.execute('select * from words where word = "%s" limit 1' % root)
return c.fetchone() and True or False
def expand (y, x, root, used):
global longest, deepest
if used[y][x]: return
if not continuable(root) and not exists(root): return
used[y][x] = True
if exists(root):
reachable[y][x] = True
if (not ty and not tx) or (used[ty][tx]):
if len(root) > longest[0]:
longest=(len(root),[root])
if len(root) == longest[0]:
longest=(len(root), longest[1] + [root])
if y > deepest[0]:
deepest=(y,[root])
if y == deepest[0]:
deepest=(y, deepest[1] + [root])
for dx in [-1,0,1]:
for dy in [-1,0,1]:
nx = x+dx
ny = y+dy
if (ny<0) or (nx<0) or (ny>=sizey) or (nx>=sizex):
continue
else:
expand(ny, nx, root+letters[ny][nx],used)
used[y][x] = False
if sx or sy:
words = expand(sy,sx,letters[sy][sx],used)
else:
for x,start in enumerate(letters[0]):
y = 0
expand(y,x,letters[y][x],used)
print "longest", longest
print "deepest", deepest
if deepest[0] == sizey - 1:
print "YOU WIN"
sys.exit(0)
# level 2 analysis
reached = deepcopy(reachable)
for y2 in xrange(0,sizey):
for x2 in xrange(0,sizex):
if reached[y2][x2]:
expand(y2,x2, letters[y2][x2], used)
print "LEVEL 2"
print "longest", longest
print "deepest", deepest
if deepest[0] == sizey - 1:
print "YOU WIN"
sys.exit(0)
# level 3 analysis
reached = deepcopy(reachable)
for y2 in xrange(0,sizey):
for x2 in xrange(0,sizex):
if reached[y2][x2]:
expand(y2,x2, letters[y2][x2], used)
print "LEVEL 3"
print "longest", longest
print "deepest", deepest
if deepest[0] == sizey - 1:
print "YOU WIN"
| cc0-1.0 |
duniter/duniter-python-api | examples/save_and_load_private_key_file_ewif.py | 2 | 2435 | """
Copyright 2014-2021 Vincent Texier <[email protected]>
DuniterPy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DuniterPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from duniterpy.key import SigningKey
import getpass
import os
import sys
if "XDG_CONFIG_HOME" in os.environ:
home_path = os.environ["XDG_CONFIG_HOME"]
elif "HOME" in os.environ:
home_path = os.environ["HOME"]
elif "APPDATA" in os.environ:
home_path = os.environ["APPDATA"]
else:
home_path = os.path.dirname(__file__)
# CONFIG #######################################
# WARNING : Hide this file in a safe and secure place
# If one day you forget your credentials,
# you'll have to use one of your private keys instead
PRIVATE_KEY_FILE_PATH = os.path.join(home_path, ".duniter_account_ewif_v1.duniterkey")
################################################
# prompt hidden user entry
salt = getpass.getpass("Enter your passphrase (salt): ")
# prompt hidden user entry
password = getpass.getpass("Enter your password: ")
# prompt public key
pubkey = input("Enter your public key: ")
# init signer instance
signer = SigningKey.from_credentials(salt, password)
# check public key
if signer.pubkey != pubkey:
print("Bad credentials!")
sys.exit(1)
# prompt hidden user entry
ewif_password = getpass.getpass("Enter an encryption password: ")
# save private key in a file (EWIF v1 format)
signer.save_ewif_file(PRIVATE_KEY_FILE_PATH, ewif_password)
# document saved
print(
"Private key for public key %s saved in %s" % (signer.pubkey, PRIVATE_KEY_FILE_PATH)
)
try:
# load private keys from file
loaded_signer = SigningKey.from_ewif_file(
PRIVATE_KEY_FILE_PATH, ewif_password
) # type: SigningKey
# check public key from file
print(
"Public key %s loaded from file %s"
% (loaded_signer.pubkey, PRIVATE_KEY_FILE_PATH)
)
except IOError as error:
print(error)
sys.exit(1)
sys.exit(0)
| gpl-3.0 |
teamtuga4/teamtuga4ever.repository | script.module.urlresolver/lib/urlresolver/plugins/vidspot.py | 3 | 2377 | '''
Allmyvideos urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urlparse
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class VidSpotResolver(UrlResolver):
name = "vidspot"
domains = ["vidspot.net"]
pattern = '(?://|\.)(vidspot\.net)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
html = self.net.http_POST(url, data).content
r = re.search('"sources"\s*:\s*\[(.*?)\]', html, re.DOTALL)
if r:
fragment = r.group(1)
stream_url = None
for match in re.finditer('"file"\s*:\s*"([^"]+)', fragment):
stream_url = match.group(1)
if stream_url:
stream_url = '%s?%s&direct=false' % (stream_url.split('?')[0], urlparse.urlparse(stream_url).query)
return stream_url + '|' + urllib.urlencode({'User-Agent': common.IE_USER_AGENT})
else:
raise ResolverError('could not find file')
else:
raise ResolverError('could not find sources')
def get_url(self, host, media_id):
return 'http://vidspot.net/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
| gpl-2.0 |
dashea/anaconda | tests/gui/test_reclaim.py | 3 | 2568 | #!/usr/bin/python3
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Chris Lumens <[email protected]>
from . import base, welcome, summary, storage, progress, rootpassword
import subprocess
from blivet.size import Size
class BasicReclaimTestCase(base.DogtailTestCase):
drives = [("one", Size("8 GiB"))]
name = "basicreclaim"
# This does not test every spoke, as we only need to do enough to satisfy anaconda
# and get us onto the progress hub.
tests = [welcome.BasicWelcomeTestCase,
summary.SummaryTestCase,
storage.BasicReclaimTestCase,
progress.ProgressTestCase,
rootpassword.BasicRootPasswordTestCase,
progress.FinishTestCase]
def makeDrives(self):
base.DogtailTestCase.makeDrives(self)
# Put a partition and filesystem across the whole disk, which will
# force anaconda to display the reclaim dialog.
for (drive, size) in self.drives:
subprocess.call(["/sbin/parted", "-s", self._drivePaths[drive], "mklabel", "msdos"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
subprocess.call(["/sbin/parted", "-s", self._drivePaths[drive], "mkpart", "p", "ext2", "0", str(size)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
subprocess.call(["/sbin/mkfs.ext4", "-F", self._drivePaths[drive]],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
class CantReclaimTestCase(base.DogtailTestCase):
drives = [("one", Size("1 GiB"))]
name = "cantreclaim"
# We don't get to test much here, since the reclaim test shuts down anaconda.
tests = [welcome.BasicWelcomeTestCase,
summary.SummaryTestCase,
storage.CantReclaimTestCase]
| gpl-2.0 |
shepdelacreme/ansible | lib/ansible/parsing/vault/__init__.py | 12 | 52120 | # (c) 2014, James Tanner <[email protected]>
# (c) 2016, Adrian Likins <[email protected]>
# (c) 2016 Toshio Kuratomi <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import random
import shlex
import shutil
import subprocess
import sys
import tempfile
import warnings
from binascii import hexlify
from binascii import unhexlify
from binascii import Error as BinasciiError
HAS_CRYPTOGRAPHY = False
HAS_PYCRYPTO = False
HAS_SOME_PYCRYPTO = False
CRYPTOGRAPHY_BACKEND = None
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, padding
from cryptography.hazmat.primitives.hmac import HMAC
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.ciphers import (
Cipher as C_Cipher, algorithms, modes
)
CRYPTOGRAPHY_BACKEND = default_backend()
HAS_CRYPTOGRAPHY = True
except ImportError:
pass
try:
from Crypto.Cipher import AES as AES_pycrypto
HAS_SOME_PYCRYPTO = True
# Note: Only used for loading obsolete VaultAES files. All files are written
# using the newer VaultAES256 which does not require md5
from Crypto.Hash import SHA256 as SHA256_pycrypto
from Crypto.Hash import HMAC as HMAC_pycrypto
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
from Crypto.Util import Counter as Counter_pycrypto
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
from Crypto.Protocol.KDF import PBKDF2 as PBKDF2_pycrypto
HAS_PYCRYPTO = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible import constants as C
from ansible.module_utils.six import PY3, binary_type
# Note: on py2, this zip is izip not the list based zip() builtin
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.utils.path import makedirs_safe
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
b_HEADER = b'$ANSIBLE_VAULT'
CIPHER_WHITELIST = frozenset((u'AES', u'AES256'))
CIPHER_WRITE_WHITELIST = frozenset((u'AES256',))
# See also CIPHER_MAPPING at the bottom of the file which maps cipher strings
# (used in VaultFile header) to a cipher class
NEED_CRYPTO_LIBRARY = "ansible-vault requires either the cryptography library (preferred) or"
if HAS_SOME_PYCRYPTO:
NEED_CRYPTO_LIBRARY += " a newer version of"
NEED_CRYPTO_LIBRARY += " pycrypto in order to function."
class AnsibleVaultError(AnsibleError):
pass
class AnsibleVaultPasswordError(AnsibleVaultError):
pass
class AnsibleVaultFormatError(AnsibleError):
pass
def is_encrypted(data):
""" Test if this is vault encrypted data blob
:arg data: a byte or text string to test whether it is recognized as vault
encrypted data
:returns: True if it is recognized. Otherwise, False.
"""
try:
# Make sure we have a byte string and that it only contains ascii
# bytes.
b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict')
except (UnicodeError, TypeError):
# The vault format is pure ascii so if we failed to encode to bytes
# via ascii we know that this is not vault data.
# Similarly, if it's not a string, it's not vault data
return False
if b_data.startswith(b_HEADER):
return True
return False
def is_encrypted_file(file_obj, start_pos=0, count=-1):
"""Test if the contents of a file obj are a vault encrypted data blob.
:arg file_obj: A file object that will be read from.
:kwarg start_pos: A byte offset in the file to start reading the header
from. Defaults to 0, the beginning of the file.
:kwarg count: Read up to this number of bytes from the file to determine
if it looks like encrypted vault data. The default is -1, read to the
end of file.
:returns: True if the file looks like a vault file. Otherwise, False.
"""
# read the header and reset the file stream to where it started
current_position = file_obj.tell()
try:
file_obj.seek(start_pos)
return is_encrypted(file_obj.read(count))
finally:
file_obj.seek(current_position)
def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
b_tmpdata = b_vaulttext_envelope.splitlines()
b_tmpheader = b_tmpdata[0].strip().split(b';')
b_version = b_tmpheader[1].strip()
cipher_name = to_text(b_tmpheader[2].strip())
vault_id = default_vault_id
# Only attempt to find vault_id if the vault file is version 1.2 or newer
# if self.b_version == b'1.2':
if len(b_tmpheader) >= 4:
vault_id = to_text(b_tmpheader[3].strip())
b_ciphertext = b''.join(b_tmpdata[1:])
return b_ciphertext, b_version, cipher_name, vault_id
def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filename=None):
"""Parse the vaulttext envelope
When data is saved, it has a header prepended and is formatted into 80
character lines. This method extracts the information from the header
and then removes the header and the inserted newlines. The string returned
is suitable for processing by the Cipher classes.
:arg b_vaulttext: byte str containing the data from a save file
:kwarg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
:kwarg filename: The filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted. This is optional.
:returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext,
a byte str of the vault format version,
the name of the cipher used, and the vault_id.
:raises: AnsibleVaultFormatError: if the vaulttext_envelope format is invalid
"""
# used by decrypt
default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY
try:
return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id)
except Exception as exc:
msg = "Vault envelope format error"
if filename:
msg += ' in %s' % (filename)
msg += ': %s' % exc
raise AnsibleVaultFormatError(msg)
def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None):
""" Add header and format to 80 columns
:arg b_ciphertext: the encrypted and hexlified data as a byte string
:arg cipher_name: unicode cipher name (for ex, u'AES256')
:arg version: unicode vault version (for ex, '1.2'). Optional ('1.1' is default)
:arg vault_id: unicode vault identifier. If provided, the version will be bumped to 1.2.
:returns: a byte str that should be dumped into a file. It's
formatted to 80 char columns and has the header prepended
"""
if not cipher_name:
raise AnsibleError("the cipher must be set before adding a header")
version = version or '1.1'
# If we specify a vault_id, use format version 1.2. For no vault_id, stick to 1.1
if vault_id and vault_id != u'default':
version = '1.2'
b_version = to_bytes(version, 'utf-8', errors='strict')
b_vault_id = to_bytes(vault_id, 'utf-8', errors='strict')
b_cipher_name = to_bytes(cipher_name, 'utf-8', errors='strict')
header_parts = [b_HEADER,
b_version,
b_cipher_name]
if b_version == b'1.2' and b_vault_id:
header_parts.append(b_vault_id)
header = b';'.join(header_parts)
b_vaulttext = [header]
b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)]
b_vaulttext += [b'']
b_vaulttext = b'\n'.join(b_vaulttext)
return b_vaulttext
def _unhexlify(b_data):
try:
return unhexlify(b_data)
except (BinasciiError, TypeError) as exc:
raise AnsibleVaultFormatError('Vault format unhexlify error: %s' % exc)
def _parse_vaulttext(b_vaulttext):
b_vaulttext = _unhexlify(b_vaulttext)
b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b"\n", 2)
b_salt = _unhexlify(b_salt)
b_ciphertext = _unhexlify(b_ciphertext)
return b_ciphertext, b_salt, b_crypted_hmac
def parse_vaulttext(b_vaulttext):
"""Parse the vaulttext
:arg b_vaulttext: byte str containing the vaulttext (ciphertext, salt, crypted_hmac)
:returns: A tuple of byte str of the ciphertext suitable for passing to a
Cipher class's decrypt() function, a byte str of the salt,
and a byte str of the crypted_hmac
:raises: AnsibleVaultFormatError: if the vaulttext format is invalid
"""
# SPLIT SALT, DIGEST, AND DATA
try:
return _parse_vaulttext(b_vaulttext)
except AnsibleVaultFormatError:
raise
except Exception as exc:
msg = "Vault vaulttext format error: %s" % exc
raise AnsibleVaultFormatError(msg)
def verify_secret_is_not_empty(secret, msg=None):
'''Check the secret against minimal requirements.
Raises: AnsibleVaultPasswordError if the password does not meet requirements.
Currently, only requirement is that the password is not None or an empty string.
'''
msg = msg or 'Invalid vault password was provided'
if not secret:
raise AnsibleVaultPasswordError(msg)
class VaultSecret:
'''Opaque/abstract objects for a single vault secret. ie, a password or a key.'''
def __init__(self, _bytes=None):
# FIXME: ? that seems wrong... Unset etc?
self._bytes = _bytes
@property
def bytes(self):
'''The secret as a bytestring.
Sub classes that store text types will need to override to encode the text to bytes.
'''
return self._bytes
def load(self):
return self._bytes
class PromptVaultSecret(VaultSecret):
default_prompt_formats = ["Vault password (%s): "]
def __init__(self, _bytes=None, vault_id=None, prompt_formats=None):
super(PromptVaultSecret, self).__init__(_bytes=_bytes)
self.vault_id = vault_id
if prompt_formats is None:
self.prompt_formats = self.default_prompt_formats
else:
self.prompt_formats = prompt_formats
@property
def bytes(self):
return self._bytes
def load(self):
self._bytes = self.ask_vault_passwords()
def ask_vault_passwords(self):
b_vault_passwords = []
for prompt_format in self.prompt_formats:
prompt = prompt_format % {'vault_id': self.vault_id}
try:
vault_pass = display.prompt(prompt, private=True)
except EOFError:
raise AnsibleVaultError('EOFError (ctrl-d) on prompt for (%s)' % self.vault_id)
verify_secret_is_not_empty(vault_pass)
b_vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
b_vault_passwords.append(b_vault_pass)
# Make sure the passwords match by comparing them all to the first password
for b_vault_password in b_vault_passwords:
self.confirm(b_vault_passwords[0], b_vault_password)
if b_vault_passwords:
return b_vault_passwords[0]
return None
def confirm(self, b_vault_pass_1, b_vault_pass_2):
# enforce no newline chars at the end of passwords
if b_vault_pass_1 != b_vault_pass_2:
# FIXME: more specific exception
raise AnsibleError("Passwords do not match")
def script_is_client(filename):
'''Determine if a vault secret script is a client script that can be given --vault-id args'''
# if password script is 'something-client' or 'something-client.[sh|py|rb|etc]'
# script_name can still have '.' or could be entire filename if there is no ext
script_name, dummy = os.path.splitext(filename)
# TODO: for now, this is entirely based on filename
if script_name.endswith('-client'):
return True
return False
def get_file_vault_secret(filename=None, vault_id=None, encoding=None, loader=None):
this_path = os.path.realpath(os.path.expanduser(filename))
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
if loader.is_executable(this_path):
if script_is_client(filename):
display.vvvv('The vault password file %s is a client script.' % filename)
# TODO: pass vault_id_name to script via cli
return ClientScriptVaultSecret(filename=this_path, vault_id=vault_id,
encoding=encoding, loader=loader)
# just a plain vault password script. No args, returns a byte array
return ScriptVaultSecret(filename=this_path, encoding=encoding, loader=loader)
return FileVaultSecret(filename=this_path, encoding=encoding, loader=loader)
# TODO: mv these classes to a separate file so we don't pollute vault with 'subprocess' etc
class FileVaultSecret(VaultSecret):
def __init__(self, filename=None, encoding=None, loader=None):
super(FileVaultSecret, self).__init__()
self.filename = filename
self.loader = loader
self.encoding = encoding or 'utf8'
# We could load from file here, but that is eventually a pain to test
self._bytes = None
self._text = None
@property
def bytes(self):
if self._bytes:
return self._bytes
if self._text:
return self._text.encode(self.encoding)
return None
def load(self):
self._bytes = self._read_file(self.filename)
def _read_file(self, filename):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
# TODO: replace with use of self.loader
try:
f = open(filename, "rb")
vault_pass = f.read().strip()
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (filename, e))
b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass, filename)
vault_pass = b_vault_data.strip(b'\r\n')
verify_secret_is_not_empty(vault_pass,
msg='Invalid vault password was provided from file (%s)' % filename)
return vault_pass
def __repr__(self):
if self.filename:
return "%s(filename='%s')" % (self.__class__.__name__, self.filename)
return "%s()" % (self.__class__.__name__)
class ScriptVaultSecret(FileVaultSecret):
def _read_file(self, filename):
if not self.loader.is_executable(filename):
raise AnsibleVaultError("The vault password script %s was not executable" % filename)
command = self._build_command()
stdout, stderr, p = self._run(command)
self._check_results(stdout, stderr, p)
vault_pass = stdout.strip(b'\r\n')
empty_password_msg = 'Invalid vault password was provided from script (%s)' % filename
verify_secret_is_not_empty(vault_pass,
msg=empty_password_msg)
return vault_pass
def _run(self, command):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError as e:
msg_format = "Problem running vault password script %s (%s)." \
" If this is not a script, remove the executable bit from the file."
msg = msg_format % (self.filename, e)
raise AnsibleError(msg)
stdout, stderr = p.communicate()
return stdout, stderr, p
def _check_results(self, stdout, stderr, popen):
if popen.returncode != 0:
raise AnsibleError("Vault password script %s returned non-zero (%s): %s" %
(self.filename, popen.returncode, stderr))
def _build_command(self):
return [self.filename]
class ClientScriptVaultSecret(ScriptVaultSecret):
VAULT_ID_UNKNOWN_RC = 2
def __init__(self, filename=None, encoding=None, loader=None, vault_id=None):
super(ClientScriptVaultSecret, self).__init__(filename=filename,
encoding=encoding,
loader=loader)
self._vault_id = vault_id
display.vvvv('Executing vault password client script: %s --vault-id %s' % (filename, vault_id))
def _run(self, command):
try:
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
msg_format = "Problem running vault password client script %s (%s)." \
" If this is not a script, remove the executable bit from the file."
msg = msg_format % (self.filename, e)
raise AnsibleError(msg)
stdout, stderr = p.communicate()
return stdout, stderr, p
def _check_results(self, stdout, stderr, popen):
if popen.returncode == self.VAULT_ID_UNKNOWN_RC:
raise AnsibleError('Vault password client script %s did not find a secret for vault-id=%s: %s' %
(self.filename, self._vault_id, stderr))
if popen.returncode != 0:
raise AnsibleError("Vault password client script %s returned non-zero (%s) when getting secret for vault-id=%s: %s" %
(self.filename, popen.returncode, self._vault_id, stderr))
def _build_command(self):
command = [self.filename]
if self._vault_id:
command.extend(['--vault-id', self._vault_id])
return command
def __repr__(self):
if self.filename:
return "%s(filename='%s', vault_id='%s')" % \
(self.__class__.__name__, self.filename, self._vault_id)
return "%s()" % (self.__class__.__name__)
def match_secrets(secrets, target_vault_ids):
'''Find all VaultSecret objects that are mapped to any of the target_vault_ids in secrets'''
if not secrets:
return []
matches = [(vault_id, secret) for vault_id, secret in secrets if vault_id in target_vault_ids]
return matches
def match_best_secret(secrets, target_vault_ids):
'''Find the best secret from secrets that matches target_vault_ids
Since secrets should be ordered so the early secrets are 'better' than later ones, this
just finds all the matches, then returns the first secret'''
matches = match_secrets(secrets, target_vault_ids)
if matches:
return matches[0]
# raise exception?
return None
def match_encrypt_vault_id_secret(secrets, encrypt_vault_id=None):
# See if the --encrypt-vault-id matches a vault-id
display.vvvv('encrypt_vault_id=%s' % encrypt_vault_id)
if encrypt_vault_id is None:
raise AnsibleError('match_encrypt_vault_id_secret requires a non None encrypt_vault_id')
encrypt_vault_id_matchers = [encrypt_vault_id]
encrypt_secret = match_best_secret(secrets, encrypt_vault_id_matchers)
# return the best match for --encrypt-vault-id
if encrypt_secret:
return encrypt_secret
# If we specified a encrypt_vault_id and we couldn't find it, dont
# fallback to using the first/best secret
raise AnsibleVaultError('Did not find a match for --encrypt-vault-id=%s in the known vault-ids %s' % (encrypt_vault_id,
[_v for _v, _vs in secrets]))
def match_encrypt_secret(secrets, encrypt_vault_id=None):
'''Find the best/first/only secret in secrets to use for encrypting'''
display.vvvv('encrypt_vault_id=%s' % encrypt_vault_id)
# See if the --encrypt-vault-id matches a vault-id
if encrypt_vault_id:
return match_encrypt_vault_id_secret(secrets,
encrypt_vault_id=encrypt_vault_id)
# Find the best/first secret from secrets since we didnt specify otherwise
# ie, consider all of the available secrets as matches
_vault_id_matchers = [_vault_id for _vault_id, dummy in secrets]
best_secret = match_best_secret(secrets, _vault_id_matchers)
# can be empty list sans any tuple
return best_secret
class VaultLib:
def __init__(self, secrets=None):
self.secrets = secrets or []
self.cipher_name = None
self.b_version = b'1.2'
def encrypt(self, plaintext, secret=None, vault_id=None):
"""Vault encrypt a piece of data.
:arg plaintext: a text or byte string to encrypt.
:returns: a utf-8 encoded byte str of encrypted data. The string
contains a header identifying this as vault encrypted data and
formatted to newline terminated lines of 80 characters. This is
suitable for dumping as is to a vault file.
If the string passed in is a text string, it will be encoded to UTF-8
before encryption.
"""
if secret is None:
if self.secrets:
dummy, secret = match_encrypt_secret(self.secrets)
else:
raise AnsibleVaultError("A vault password must be specified to encrypt data")
b_plaintext = to_bytes(plaintext, errors='surrogate_or_strict')
if is_encrypted(b_plaintext):
raise AnsibleError("input is already encrypted")
if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_WHITELIST:
self.cipher_name = u"AES256"
try:
this_cipher = CIPHER_MAPPING[self.cipher_name]()
except KeyError:
raise AnsibleError(u"{0} cipher could not be found".format(self.cipher_name))
# encrypt data
if vault_id:
display.vvvvv('Encrypting with vault_id "%s" and vault secret %s' % (vault_id, secret))
else:
display.vvvvv('Encrypting without a vault_id using vault secret %s' % secret)
b_ciphertext = this_cipher.encrypt(b_plaintext, secret)
# format the data for output to the file
b_vaulttext = format_vaulttext_envelope(b_ciphertext,
self.cipher_name,
vault_id=vault_id)
return b_vaulttext
def decrypt(self, vaulttext, filename=None):
'''Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
:kwarg filename: a filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted.
:returns: a byte string containing the decrypted data and the vault-id that was used
'''
plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename)
return plaintext
def decrypt_and_get_vault_id(self, vaulttext, filename=None):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
:kwarg filename: a filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted.
:returns: a byte string containing the decrypted data and the vault-id vault-secret that was used
"""
b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8')
if self.secrets is None:
raise AnsibleVaultError("A vault password must be specified to decrypt data")
if not is_encrypted(b_vaulttext):
msg = "input is not vault encrypted data"
if filename:
msg += "%s is not a vault encrypted file" % to_native(filename)
raise AnsibleError(msg)
b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext,
filename=filename)
# create the cipher object, note that the cipher used for decrypt can
# be different than the cipher used for encrypt
if cipher_name in CIPHER_WHITELIST:
this_cipher = CIPHER_MAPPING[cipher_name]()
else:
raise AnsibleError("{0} cipher could not be found".format(cipher_name))
b_plaintext = None
if not self.secrets:
raise AnsibleVaultError('Attempting to decrypt but no vault secrets found')
# WARNING: Currently, the vault id is not required to match the vault id in the vault blob to
# decrypt a vault properly. The vault id in the vault blob is not part of the encrypted
# or signed vault payload. There is no cryptographic checking/verification/validation of the
# vault blobs vault id. It can be tampered with and changed. The vault id is just a nick
# name to use to pick the best secret and provide some ux/ui info.
# iterate over all the applicable secrets (all of them by default) until one works...
# if we specify a vault_id, only the corresponding vault secret is checked and
# we check it first.
vault_id_matchers = []
vault_id_used = None
vault_secret_used = None
if vault_id:
display.vvvvv('Found a vault_id (%s) in the vaulttext' % (vault_id))
vault_id_matchers.append(vault_id)
_matches = match_secrets(self.secrets, vault_id_matchers)
if _matches:
display.vvvvv('We have a secret associated with vault id (%s), will try to use to decrypt %s' % (vault_id, to_text(filename)))
else:
display.vvvvv('Found a vault_id (%s) in the vault text, but we do not have a associated secret (--vault-id)' % (vault_id))
# Not adding the other secrets to vault_secret_ids enforces a match between the vault_id from the vault_text and
# the known vault secrets.
if not C.DEFAULT_VAULT_ID_MATCH:
# Add all of the known vault_ids as candidates for decrypting a vault.
vault_id_matchers.extend([_vault_id for _vault_id, _dummy in self.secrets if _vault_id != vault_id])
matched_secrets = match_secrets(self.secrets, vault_id_matchers)
# for vault_secret_id in vault_secret_ids:
for vault_secret_id, vault_secret in matched_secrets:
display.vvvvv('Trying to use vault secret=(%s) id=%s to decrypt %s' % (vault_secret, vault_secret_id, to_text(filename)))
try:
# secret = self.secrets[vault_secret_id]
display.vvvv('Trying secret %s for vault_id=%s' % (vault_secret, vault_secret_id))
b_plaintext = this_cipher.decrypt(b_vaulttext, vault_secret)
if b_plaintext is not None:
vault_id_used = vault_secret_id
vault_secret_used = vault_secret
file_slug = ''
if filename:
file_slug = ' of "%s"' % filename
display.vvvvv('Decrypt%s successful with secret=%s and vault_id=%s' % (file_slug, vault_secret, vault_secret_id))
break
except AnsibleVaultFormatError as exc:
msg = "There was a vault format error"
if filename:
msg += ' in %s' % (to_text(filename))
msg += ': %s' % exc
display.warning(msg)
raise
except AnsibleError as e:
display.vvvv('Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' %
(vault_secret_id, to_text(filename), e))
continue
else:
msg = "Decryption failed (no vault secrets were found that could decrypt)"
if filename:
msg += " on %s" % to_native(filename)
raise AnsibleVaultError(msg)
if b_plaintext is None:
msg = "Decryption failed"
if filename:
msg += " on %s" % to_native(filename)
raise AnsibleError(msg)
return b_plaintext, vault_id_used, vault_secret_used
class VaultEditor:
def __init__(self, vault=None):
# TODO: it may be more useful to just make VaultSecrets and index of VaultLib objects...
self.vault = vault or VaultLib()
# TODO: mv shred file stuff to it's own class
def _shred_file_custom(self, tmp_path):
""""Destroy a file, when shred (core-utils) is not available
Unix `shred' destroys files "so that they can be recovered only with great difficulty with
specialised hardware, if at all". It is based on the method from the paper
"Secure Deletion of Data from Magnetic and Solid-State Memory",
Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
We do not go to that length to re-implement shred in Python; instead, overwriting with a block
of random data should suffice.
See https://github.com/ansible/ansible/pull/13700 .
"""
file_len = os.path.getsize(tmp_path)
if file_len > 0: # avoid work when file was empty
max_chunk_len = min(1024 * 1024 * 2, file_len)
passes = 3
with open(tmp_path, "wb") as fh:
for _ in range(passes):
fh.seek(0, 0)
# get a random chunk of data, each pass with other length
chunk_len = random.randint(max_chunk_len // 2, max_chunk_len)
data = os.urandom(chunk_len)
for _ in range(0, file_len // chunk_len):
fh.write(data)
fh.write(data[:file_len % chunk_len])
# FIXME remove this assert once we have unittests to check its accuracy
if fh.tell() != file_len:
raise AnsibleAssertionError()
os.fsync(fh)
def _shred_file(self, tmp_path):
"""Securely destroy a decrypted file
Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
it is a non-issue.
Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
a custom shredding method.
"""
if not os.path.isfile(tmp_path):
# file is already gone
return
try:
r = subprocess.call(['shred', tmp_path])
except (OSError, ValueError):
# shred is not available on this system, or some other error occurred.
# ValueError caught because macOS El Capitan is raising an
# exception big enough to hit a limit in python2-2.7.11 and below.
# Symptom is ValueError: insecure pickle when shred is not
# installed there.
r = 1
if r != 0:
# we could not successfully execute unix shred; therefore, do custom shred.
self._shred_file_custom(tmp_path)
os.remove(tmp_path)
def _edit_file_helper(self, filename, secret,
existing_data=None, force_save=False, vault_id=None):
# Create a tempfile
root, ext = os.path.splitext(os.path.realpath(filename))
fd, tmp_path = tempfile.mkstemp(suffix=ext)
os.close(fd)
cmd = self._editor_shell_command(tmp_path)
try:
if existing_data:
self.write_data(existing_data, tmp_path, shred=False)
# drop the user into an editor on the tmp file
subprocess.call(cmd)
except Exception as e:
# whatever happens, destroy the decrypted file
self._shred_file(tmp_path)
raise AnsibleError('Unable to execute the command "%s": %s' % (' '.join(cmd), to_native(e)))
b_tmpdata = self.read_data(tmp_path)
# Do nothing if the content has not changed
if existing_data == b_tmpdata and not force_save:
self._shred_file(tmp_path)
return
# encrypt new data and write out to tmp
# An existing vaultfile will always be UTF-8,
# so decode to unicode here
b_ciphertext = self.vault.encrypt(b_tmpdata, secret, vault_id=vault_id)
self.write_data(b_ciphertext, tmp_path)
# shuffle tmp file into place
self.shuffle_files(tmp_path, filename)
display.vvvvv('Saved edited file "%s" encrypted using %s and vault id "%s"' % (filename, secret, vault_id))
def _real_path(self, filename):
# '-' is special to VaultEditor, dont expand it.
if filename == '-':
return filename
real_path = os.path.realpath(filename)
return real_path
def encrypt_bytes(self, b_plaintext, secret, vault_id=None):
b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
return b_ciphertext
def encrypt_file(self, filename, secret, vault_id=None, output_file=None):
# A file to be encrypted into a vaultfile could be any encoding
# so treat the contents as a byte string.
# follow the symlink
filename = self._real_path(filename)
b_plaintext = self.read_data(filename)
b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
self.write_data(b_ciphertext, output_file or filename)
def decrypt_file(self, filename, output_file=None):
# follow the symlink
filename = self._real_path(filename)
ciphertext = self.read_data(filename)
try:
plaintext = self.vault.decrypt(ciphertext, filename=filename)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
self.write_data(plaintext, output_file or filename, shred=False)
def create_file(self, filename, secret, vault_id=None):
""" create a new encrypted file """
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
display.warning("%s does not exist, creating..." % dirname)
makedirs_safe(dirname)
# FIXME: If we can raise an error here, we can probably just make it
# behave like edit instead.
if os.path.isfile(filename):
raise AnsibleError("%s exists, please use 'edit' instead" % filename)
self._edit_file_helper(filename, secret, vault_id=vault_id)
def edit_file(self, filename):
vault_id_used = None
vault_secret_used = None
# follow the symlink
filename = self._real_path(filename)
b_vaulttext = self.read_data(filename)
# vault or yaml files are always utf8
vaulttext = to_text(b_vaulttext)
try:
# vaulttext gets converted back to bytes, but alas
# TODO: return the vault_id that worked?
plaintext, vault_id_used, vault_secret_used = self.vault.decrypt_and_get_vault_id(vaulttext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
# Figure out the vault id from the file, to select the right secret to re-encrypt it
# (duplicates parts of decrypt, but alas...)
dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext,
filename=filename)
# vault id here may not be the vault id actually used for decrypting
# as when the edited file has no vault-id but is decrypted by non-default id in secrets
# (vault_id=default, while a different vault-id decrypted)
# Keep the same vault-id (and version) as in the header
if cipher_name not in CIPHER_WRITE_WHITELIST:
# we want to get rid of files encrypted with the AES cipher
self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext,
force_save=True, vault_id=vault_id)
else:
self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext,
force_save=False, vault_id=vault_id)
def plaintext(self, filename):
b_vaulttext = self.read_data(filename)
vaulttext = to_text(b_vaulttext)
try:
plaintext = self.vault.decrypt(vaulttext, filename=filename)
return plaintext
except AnsibleError as e:
raise AnsibleVaultError("%s for %s" % (to_native(e), to_native(filename)))
# FIXME/TODO: make this use VaultSecret
def rekey_file(self, filename, new_vault_secret, new_vault_id=None):
# follow the symlink
filename = self._real_path(filename)
prev = os.stat(filename)
b_vaulttext = self.read_data(filename)
vaulttext = to_text(b_vaulttext)
display.vvvvv('Rekeying file "%s" to with new vault-id "%s" and vault secret %s' %
(filename, new_vault_id, new_vault_secret))
try:
plaintext, vault_id_used, _dummy = self.vault.decrypt_and_get_vault_id(vaulttext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
# This is more or less an assert, see #18247
if new_vault_secret is None:
raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename)
# FIXME: VaultContext...? could rekey to a different vault_id in the same VaultSecrets
# Need a new VaultLib because the new vault data can be a different
# vault lib format or cipher (for ex, when we migrate 1.0 style vault data to
# 1.1 style data we change the version and the cipher). This is where a VaultContext might help
# the new vault will only be used for encrypting, so it doesn't need the vault secrets
# (we will pass one in directly to encrypt)
new_vault = VaultLib(secrets={})
b_new_vaulttext = new_vault.encrypt(plaintext, new_vault_secret, vault_id=new_vault_id)
self.write_data(b_new_vaulttext, filename)
# preserve permissions
os.chmod(filename, prev.st_mode)
os.chown(filename, prev.st_uid, prev.st_gid)
display.vvvvv('Rekeyed file "%s" (decrypted with vault id "%s") was encrypted with new vault-id "%s" and vault secret %s' %
(filename, vault_id_used, new_vault_id, new_vault_secret))
def read_data(self, filename):
try:
if filename == '-':
data = sys.stdin.read()
else:
with open(filename, "rb") as fh:
data = fh.read()
except Exception as e:
raise AnsibleError(str(e))
return data
# TODO: add docstrings for arg types since this code is picky about that
def write_data(self, data, filename, shred=True):
"""Write the data bytes to given path
This is used to write a byte string to a file or stdout. It is used for
writing the results of vault encryption or decryption. It is used for
saving the ciphertext after encryption and it is also used for saving the
plaintext after decrypting a vault. The type of the 'data' arg should be bytes,
since in the plaintext case, the original contents can be of any text encoding
or arbitrary binary data.
When used to write the result of vault encryption, the val of the 'data' arg
should be a utf-8 encoded byte string and not a text typ and not a text type..
When used to write the result of vault decryption, the val of the 'data' arg
should be a byte string and not a text type.
:arg data: the byte string (bytes) data
:arg filename: filename to save 'data' to.
:arg shred: if shred==True, make sure that the original data is first shredded so that is cannot be recovered.
:returns: None
"""
# FIXME: do we need this now? data_bytes should always be a utf-8 byte string
b_file_data = to_bytes(data, errors='strict')
# get a ref to either sys.stdout.buffer for py3 or plain old sys.stdout for py2
# We need sys.stdout.buffer on py3 so we can write bytes to it since the plaintext
# of the vaulted object could be anything/binary/etc
output = getattr(sys.stdout, 'buffer', sys.stdout)
if filename == '-':
output.write(b_file_data)
else:
if os.path.isfile(filename):
if shred:
self._shred_file(filename)
else:
os.remove(filename)
with open(filename, "wb") as fh:
fh.write(b_file_data)
def shuffle_files(self, src, dest):
prev = None
# overwrite dest with src
if os.path.isfile(dest):
prev = os.stat(dest)
# old file 'dest' was encrypted, no need to _shred_file
os.remove(dest)
shutil.move(src, dest)
# reset permissions if needed
if prev is not None:
# TODO: selinux, ACLs, xattr?
os.chmod(dest, prev.st_mode)
os.chown(dest, prev.st_uid, prev.st_gid)
def _editor_shell_command(self, filename):
env_editor = os.environ.get('EDITOR', 'vi')
editor = shlex.split(env_editor)
editor.append(filename)
return editor
########################################
# CIPHERS #
########################################
class VaultAES256:
"""
Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
Keys are derived using PBKDF2
"""
# http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
# Note: strings in this class should be byte strings by default.
def __init__(self):
if not HAS_CRYPTOGRAPHY and not HAS_PYCRYPTO:
raise AnsibleError(NEED_CRYPTO_LIBRARY)
@staticmethod
def _create_key_cryptography(b_password, b_salt, key_length, iv_length):
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=2 * key_length + iv_length,
salt=b_salt,
iterations=10000,
backend=CRYPTOGRAPHY_BACKEND)
b_derivedkey = kdf.derive(b_password)
return b_derivedkey
@staticmethod
def _pbkdf2_prf(p, s):
hash_function = SHA256_pycrypto
return HMAC_pycrypto.new(p, s, hash_function).digest()
@classmethod
def _create_key_pycrypto(cls, b_password, b_salt, key_length, iv_length):
# make two keys and one iv
b_derivedkey = PBKDF2_pycrypto(b_password, b_salt, dkLen=(2 * key_length) + iv_length,
count=10000, prf=cls._pbkdf2_prf)
return b_derivedkey
@classmethod
def _gen_key_initctr(cls, b_password, b_salt):
# 16 for AES 128, 32 for AES256
key_length = 32
if HAS_CRYPTOGRAPHY:
# AES is a 128-bit block cipher, so IVs and counter nonces are 16 bytes
iv_length = algorithms.AES.block_size // 8
b_derivedkey = cls._create_key_cryptography(b_password, b_salt, key_length, iv_length)
b_iv = b_derivedkey[(key_length * 2):(key_length * 2) + iv_length]
elif HAS_PYCRYPTO:
# match the size used for counter.new to avoid extra work
iv_length = 16
b_derivedkey = cls._create_key_pycrypto(b_password, b_salt, key_length, iv_length)
b_iv = hexlify(b_derivedkey[(key_length * 2):(key_length * 2) + iv_length])
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in initctr)')
b_key1 = b_derivedkey[:key_length]
b_key2 = b_derivedkey[key_length:(key_length * 2)]
return b_key1, b_key2, b_iv
@staticmethod
def _encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv):
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
encryptor = cipher.encryptor()
padder = padding.PKCS7(algorithms.AES.block_size).padder()
b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize())
b_ciphertext += encryptor.finalize()
# COMBINE SALT, DIGEST AND DATA
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
b_hmac = hmac.finalize()
return to_bytes(hexlify(b_hmac), errors='surrogate_or_strict'), hexlify(b_ciphertext)
@staticmethod
def _encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv):
# PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
bs = AES_pycrypto.block_size
padding_length = (bs - len(b_plaintext) % bs) or bs
b_plaintext += to_bytes(padding_length * chr(padding_length), encoding='ascii', errors='strict')
# COUNTER.new PARAMETERS
# 1) nbits (integer) - Length of the counter, in bits.
# 2) initial_value (integer) - initial value of the counter. "iv" from _gen_key_initctr
ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16))
# AES.new PARAMETERS
# 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from _gen_key_initctr
# 2) MODE_CTR, is the recommended mode
# 3) counter=<CounterObject>
cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr)
# ENCRYPT PADDED DATA
b_ciphertext = cipher.encrypt(b_plaintext)
# COMBINE SALT, DIGEST AND DATA
hmac = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto)
return to_bytes(hmac.hexdigest(), errors='surrogate_or_strict'), hexlify(b_ciphertext)
@classmethod
def encrypt(cls, b_plaintext, secret):
if secret is None:
raise AnsibleVaultError('The secret passed to encrypt() was None')
b_salt = os.urandom(32)
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_hmac, b_ciphertext = cls._encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv)
elif HAS_PYCRYPTO:
b_hmac, b_ciphertext = cls._encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in encrypt)')
b_vaulttext = b'\n'.join([hexlify(b_salt), b_hmac, b_ciphertext])
# Unnecessary but getting rid of it is a backwards incompatible vault
# format change
b_vaulttext = hexlify(b_vaulttext)
return b_vaulttext
@classmethod
def _decrypt_cryptography(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
# b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
# EXIT EARLY IF DIGEST DOESN'T MATCH
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
try:
hmac.verify(_unhexlify(b_crypted_hmac))
except InvalidSignature as e:
raise AnsibleVaultError('HMAC verification failed: %s' % e)
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
decryptor = cipher.decryptor()
unpadder = padding.PKCS7(128).unpadder()
b_plaintext = unpadder.update(
decryptor.update(b_ciphertext) + decryptor.finalize()
) + unpadder.finalize()
return b_plaintext
@staticmethod
def _is_equal(b_a, b_b):
"""
Comparing 2 byte arrrays in constant time
to avoid timing attacks.
It would be nice if there was a library for this but
hey.
"""
if not (isinstance(b_a, binary_type) and isinstance(b_b, binary_type)):
raise TypeError('_is_equal can only be used to compare two byte strings')
# http://codahale.com/a-lesson-in-timing-attacks/
if len(b_a) != len(b_b):
return False
result = 0
for b_x, b_y in zip(b_a, b_b):
if PY3:
result |= b_x ^ b_y
else:
result |= ord(b_x) ^ ord(b_y)
return result == 0
@classmethod
def _decrypt_pycrypto(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
# EXIT EARLY IF DIGEST DOESN'T MATCH
hmac_decrypt = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto)
if not cls._is_equal(b_crypted_hmac, to_bytes(hmac_decrypt.hexdigest())):
return None
# SET THE COUNTER AND THE CIPHER
ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16))
cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr)
# DECRYPT PADDED DATA
b_plaintext = cipher.decrypt(b_ciphertext)
# UNPAD DATA
if PY3:
padding_length = b_plaintext[-1]
else:
padding_length = ord(b_plaintext[-1])
b_plaintext = b_plaintext[:-padding_length]
return b_plaintext
@classmethod
def decrypt(cls, b_vaulttext, secret):
b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext)
# TODO: would be nice if a VaultSecret could be passed directly to _decrypt_*
# (move _gen_key_initctr() to a AES256 VaultSecret or VaultContext impl?)
# though, likely needs to be python cryptography specific impl that basically
# creates a Cipher() with b_key1, a Mode.CTR() with b_iv, and a HMAC() with sign key b_key2
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_plaintext = cls._decrypt_cryptography(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
elif HAS_PYCRYPTO:
b_plaintext = cls._decrypt_pycrypto(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in decrypt)')
return b_plaintext
# Keys could be made bytes later if the code that gets the data is more
# naturally byte-oriented
CIPHER_MAPPING = {
u'AES256': VaultAES256,
}
| gpl-3.0 |
cliqz/socorro | alembic/versions/22ec34ad88fc_fixes_bug_963600_add.py | 14 | 1034 | """Fixes bug 963600 - add_new_release() and update_product_versions()
Revision ID: 22ec34ad88fc
Revises: 4bb277899d74
Create Date: 2014-01-24 17:17:06.598669
"""
# revision identifiers, used by Alembic.
revision = '22ec34ad88fc'
down_revision = '4bb277899d74'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
op.execute("""
DROP FUNCTION add_new_release(citext, citext, citext, numeric, citext, integer, text, boolean, boolean)
""")
load_stored_proc(op, ['add_new_release.sql', 'update_product_versions.sql'])
def downgrade():
op.execute("""
DROP FUNCTION add_new_release(citext, citext, citext, numeric, citext, integer, text, text, boolean, boolean)
""")
load_stored_proc(op, ['add_new_release.sql', 'udpate_product_versions.sql'])
| mpl-2.0 |
Javiercerna/MissionPlanner | Lib/site-packages/numpy/doc/broadcasting.py | 95 | 5511 | """
========================
Broadcasting over arrays
========================
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
have compatible shapes. Broadcasting provides a means of vectorizing
array operations so that looping occurs in C instead of Python. It does
this without making needless copies of data and usually leads to
efficient algorithm implementations. There are, however, cases where
broadcasting is a bad idea because it leads to inefficient use of memory
that slows computation.
NumPy operations are usually done on pairs of arrays on an
element-by-element basis. In the simplest case, the two arrays must
have exactly the same shape, as in the following example:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = np.array([2.0, 2.0, 2.0])
>>> a * b
array([ 2., 4., 6.])
NumPy's broadcasting rule relaxes this constraint when the arrays'
shapes meet certain constraints. The simplest broadcasting example occurs
when an array and a scalar value are combined in an operation:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
array([ 2., 4., 6.])
The result is equivalent to the previous example where ``b`` was an array.
We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
without actually making copies, so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
because broadcasting moves less memory around during the multiplication
(``b`` is a scalar rather than an array).
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
It starts with the trailing dimensions, and works its way forward. Two
dimensions are compatible when
1) they are equal, or
2) one of them is 1
If these conditions are not met, a
``ValueError: frames are not aligned`` exception is thrown, indicating that
the arrays have incompatible shapes. The size of the resulting array
is the maximum size along each dimension of the input arrays.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
each color in the image by a different value, you can multiply the image
by a one-dimensional array with 3 values. Lining up the sizes of the
trailing axes of these arrays according to the broadcast rules, shows that
they are compatible::
Image (3d array): 256 x 256 x 3
Scale (1d array): 3
Result (3d array): 256 x 256 x 3
When either of the dimensions compared is one, the larger of the two is
used. In other words, the smaller of two axes is stretched or "copied"
to match the other.
In the following example, both the ``A`` and ``B`` arrays have axes with
length one that are expanded to a larger size during the broadcast
operation::
A (4d array): 8 x 1 x 6 x 1
B (3d array): 7 x 1 x 5
Result (4d array): 8 x 7 x 6 x 5
Here are some more examples::
A (2d array): 5 x 4
B (1d array): 1
Result (2d array): 5 x 4
A (2d array): 5 x 4
B (1d array): 4
Result (2d array): 5 x 4
A (3d array): 15 x 3 x 5
B (3d array): 15 x 1 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 5
Result (3d array): 15 x 3 x 5
A (3d array): 15 x 3 x 5
B (2d array): 3 x 1
Result (3d array): 15 x 3 x 5
Here are examples of shapes that do not broadcast::
A (1d array): 3
B (1d array): 4 # trailing dimensions do not match
A (2d array): 2 x 1
B (3d array): 8 x 4 x 3 # second from last dimensions mismatched
An example of broadcasting in practice::
>>> x = np.arange(4)
>>> xx = x.reshape(4,1)
>>> y = np.ones(5)
>>> z = np.ones((3,4))
>>> x.shape
(4,)
>>> y.shape
(5,)
>>> x + y
<type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape
>>> xx.shape
(4, 1)
>>> y.shape
(5,)
>>> (xx + y).shape
(4, 5)
>>> xx + y
array([[ 1., 1., 1., 1., 1.],
[ 2., 2., 2., 2., 2.],
[ 3., 3., 3., 3., 3.],
[ 4., 4., 4., 4., 4.]])
>>> x.shape
(4,)
>>> z.shape
(3, 4)
>>> (x + z).shape
(3, 4)
>>> x + z
array([[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.],
[ 1., 2., 3., 4.]])
Broadcasting provides a convenient way of taking the outer product (or
any other outer operation) of two arrays. The following example shows an
outer addition operation of two 1-d arrays::
>>> a = np.array([0.0, 10.0, 20.0, 30.0])
>>> b = np.array([1.0, 2.0, 3.0])
>>> a[:, np.newaxis] + b
array([[ 1., 2., 3.],
[ 11., 12., 13.],
[ 21., 22., 23.],
[ 31., 32., 33.]])
Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
See `this article <http://www.scipy.org/EricsBroadcastingDoc>`_
for illustrations of broadcasting concepts.
"""
| gpl-3.0 |
davidnmurray/iris | lib/iris/experimental/regrid_conservative.py | 3 | 12088 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Support for conservative regridding via ESMPy.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import cartopy.crs as ccrs
import numpy as np
from iris.analysis._interpolation import get_xy_dim_coords
import iris
from iris.analysis._regrid import RectilinearRegridder
#: A static Cartopy Geodetic() instance for transforming to true-lat-lons.
_CRS_TRUELATLON = ccrs.Geodetic()
def _convert_latlons(crs, x_array, y_array):
"""
Convert x+y coords in a given crs to (x,y) values in true-lat-lons.
.. note::
Uses a plain Cartopy Geodetic to convert to true-lat-lons. This makes
no allowance for a non-spherical earth. But then, neither does ESMF.
"""
ll_values = _CRS_TRUELATLON.transform_points(crs, x_array, y_array)
return ll_values[..., 0], ll_values[..., 1]
def _make_esmpy_field(x_coord, y_coord, ref_name='field',
data=None, mask=None):
"""
Create an ESMPy ESMF.Field on given coordinates.
Create a ESMF.Grid from the coordinates, defining corners and centre
positions as lats+lons.
Add a grid mask if provided.
Create and return a Field mapped on this Grid, setting data if provided.
Args:
* x_coord, y_coord (:class:`iris.coords.Coord`):
One-dimensional coordinates of shape (nx,) and (ny,).
Their contiguous bounds define an ESMF.Grid of shape (nx, ny).
Kwargs:
* data (:class:`numpy.ndarray`, shape (nx,ny)):
Set the Field data content.
* mask (:class:`numpy.ndarray`, boolean, shape (nx,ny)):
Add a mask item to the grid, assigning it 0/1 where mask=False/True.
"""
# Lazy import so we can build the docs with no ESMF.
import ESMF
# Create a Grid object describing the coordinate cells.
dims = [len(coord.points) for coord in (x_coord, y_coord)]
dims = np.array(dims, dtype=np.int32) # specific type required by ESMF.
grid = ESMF.Grid(dims)
# Get all cell corner coordinates as true-lat-lons
x_bounds, y_bounds = np.meshgrid(x_coord.contiguous_bounds(),
y_coord.contiguous_bounds())
grid_crs = x_coord.coord_system.as_cartopy_crs()
lon_bounds, lat_bounds = _convert_latlons(grid_crs, x_bounds, y_bounds)
# Add grid 'coord' element for corners, and fill with corner values.
grid.add_coords(staggerlocs=[ESMF.StaggerLoc.CORNER])
grid_corners_x = grid.get_coords(0, ESMF.StaggerLoc.CORNER)
grid_corners_x[:] = lon_bounds.T
grid_corners_y = grid.get_coords(1, ESMF.StaggerLoc.CORNER)
grid_corners_y[:] = lat_bounds.T
# calculate the cell centre-points
# NOTE: we don't care about Iris' idea of where the points 'really' are
# *but* ESMF requires the data in the CENTER for conservative regrid,
# according to the documentation :
# - http://www.earthsystemmodeling.org/
# esmf_releases/public/last/ESMF_refdoc.pdf
# - section 22.2.3 : ESMF_REGRIDMETHOD
#
# We are currently determining cell centres in native coords, then
# converting these into true-lat-lons.
# It is confirmed by experiment that moving these centre location *does*
# changes the regrid results.
# TODO: work out why this is needed, and whether these centres are 'right'.
# Average cell corners in native coordinates, then translate to lats+lons
# (more costly, but presumably 'more correct' than averaging lats+lons).
x_centres = x_coord.contiguous_bounds()
x_centres = 0.5 * (x_centres[:-1] + x_centres[1:])
y_centres = y_coord.contiguous_bounds()
y_centres = 0.5 * (y_centres[:-1] + y_centres[1:])
x_points, y_points = np.meshgrid(x_centres, y_centres)
lon_points, lat_points = _convert_latlons(grid_crs, x_points, y_points)
# Add grid 'coord' element for centres + fill with centre-points values.
grid.add_coords(staggerlocs=[ESMF.StaggerLoc.CENTER])
grid_centers_x = grid.get_coords(0, ESMF.StaggerLoc.CENTER)
grid_centers_x[:] = lon_points.T
grid_centers_y = grid.get_coords(1, ESMF.StaggerLoc.CENTER)
grid_centers_y[:] = lat_points.T
# Add a mask item, if requested
if mask is not None:
grid.add_item(ESMF.GridItem.MASK,
[ESMF.StaggerLoc.CENTER])
grid_mask = grid.get_item(ESMF.GridItem.MASK)
grid_mask[:] = np.where(mask, 1, 0)
# create a Field based on this grid
field = ESMF.Field(grid, ref_name)
# assign data content, if provided
if data is not None:
field.data[:] = data
return field
def regrid_conservative_via_esmpy(source_cube, grid_cube):
"""
Perform a conservative regridding with ESMPy.
Regrids the data of a source cube onto a new grid defined by a destination
cube.
Args:
* source_cube (:class:`iris.cube.Cube`):
Source data. Must have two identifiable horizontal dimension
coordinates.
* grid_cube (:class:`iris.cube.Cube`):
Define the target horizontal grid: Only the horizontal dimension
coordinates are actually used.
Returns:
A new cube derived from source_cube, regridded onto the specified
horizontal grid.
Any additional coordinates which map onto the horizontal dimensions are
removed, while all other metadata is retained.
If there are coordinate factories with 2d horizontal reference surfaces,
the reference surfaces are also regridded, using ordinary bilinear
interpolation.
.. note::
Both source and destination cubes must have two dimension coordinates
identified with axes 'X' and 'Y' which share a coord_system with a
Cartopy CRS.
The grids are defined by :meth:`iris.coords.Coord.contiguous_bounds` of
these.
.. note::
Initialises the ESMF Manager, if it was not already called.
This implements default Manager operations (e.g. logging).
To alter this, make a prior call to ESMF.Manager().
"""
# Lazy import so we can build the docs with no ESMF.
import ESMF
# Get source + target XY coordinate pairs and check they are suitable.
src_coords = get_xy_dim_coords(source_cube)
dst_coords = get_xy_dim_coords(grid_cube)
src_cs = src_coords[0].coord_system
grid_cs = dst_coords[0].coord_system
if src_cs is None or grid_cs is None:
raise ValueError("Both 'src' and 'grid' Cubes must have a"
" coordinate system for their rectilinear grid"
" coordinates.")
if src_cs.as_cartopy_crs() is None or grid_cs.as_cartopy_crs() is None:
raise ValueError("Both 'src' and 'grid' Cubes coord_systems must have "
"a valid associated Cartopy CRS.")
def _valid_units(coord):
if isinstance(coord.coord_system, (iris.coord_systems.GeogCS,
iris.coord_systems.RotatedGeogCS)):
valid_units = 'degrees'
else:
valid_units = 'm'
return coord.units == valid_units
if not all(_valid_units(coord) for coord in src_coords + dst_coords):
raise ValueError("Unsupported units: must be 'degrees' or 'm'.")
# Initialise the ESMF manager in case it was not already done.
ESMF.Manager()
# Create a data array for the output cube.
src_dims_xy = [source_cube.coord_dims(coord)[0] for coord in src_coords]
# Size matches source, except for X+Y dimensions
dst_shape = np.array(source_cube.shape)
dst_shape[src_dims_xy] = [coord.shape[0] for coord in dst_coords]
# NOTE: result array is masked -- fix this afterward if all unmasked
fullcube_data = np.ma.zeros(dst_shape)
# Iterate 2d slices over all possible indices of the 'other' dimensions
all_other_dims = [i_dim for i_dim in range(source_cube.ndim)
if i_dim not in src_dims_xy]
all_combinations_of_other_inds = np.ndindex(*dst_shape[all_other_dims])
for other_indices in all_combinations_of_other_inds:
# Construct a tuple of slices to address the 2d xy field
slice_indices_array = np.array([slice(None)] * source_cube.ndim)
slice_indices_array[all_other_dims] = other_indices
slice_indices_tuple = tuple(slice_indices_array)
# Get the source data, reformed into the right dimension order, (x,y).
src_data_2d = source_cube.data[slice_indices_tuple]
if (src_dims_xy[0] > src_dims_xy[1]):
src_data_2d = src_data_2d.transpose()
# Work out whether we have missing data to define a source grid mask.
if np.ma.is_masked(src_data_2d):
srcdata_mask = np.ma.getmask(src_data_2d)
else:
srcdata_mask = None
# Construct ESMF Field objects on source and destination grids.
src_field = _make_esmpy_field(src_coords[0], src_coords[1],
data=src_data_2d, mask=srcdata_mask)
dst_field = _make_esmpy_field(dst_coords[0], dst_coords[1])
# Make Field for destination coverage fraction (for missing data calc).
coverage_field = ESMF.Field(dst_field.grid, 'validmask_dst')
# Do the actual regrid with ESMF.
mask_flag_values = np.array([1], dtype=np.int32)
regrid_method = ESMF.Regrid(src_field, dst_field,
src_mask_values=mask_flag_values,
regrid_method=ESMF.RegridMethod.CONSERVE,
unmapped_action=ESMF.UnmappedAction.IGNORE,
dst_frac_field=coverage_field)
regrid_method(src_field, dst_field)
data = dst_field.data
# Convert destination 'coverage fraction' into a missing-data mask.
# Set = wherever part of cell goes outside source grid, or overlaps a
# masked source cell.
coverage_tolerance_threshold = 1.0 - 1.0e-8
data.mask = coverage_field.data < coverage_tolerance_threshold
# Transpose ESMF result dims (X,Y) back to the order of the source
if (src_dims_xy[0] > src_dims_xy[1]):
data = data.transpose()
# Paste regridded slice back into parent array
fullcube_data[slice_indices_tuple] = data
# Remove the data mask if completely unused.
if not np.ma.is_masked(fullcube_data):
fullcube_data = np.array(fullcube_data)
# Generate a full 2d sample grid, as required for regridding orography
# NOTE: as seen in "regrid_bilinear_rectilinear_src_and_grid"
# TODO: can this not also be wound into the _create_cube method ?
src_cs = src_coords[0].coord_system
sample_grid_x, sample_grid_y = RectilinearRegridder._sample_grid(
src_cs, dst_coords[0], dst_coords[1])
# Return result as a new cube based on the source.
# TODO: please tidy this interface !!!
return RectilinearRegridder._create_cube(
fullcube_data,
src=source_cube,
x_dim=src_dims_xy[0],
y_dim=src_dims_xy[1],
src_x_coord=src_coords[0],
src_y_coord=src_coords[1],
grid_x_coord=dst_coords[0],
grid_y_coord=dst_coords[1],
sample_grid_x=sample_grid_x,
sample_grid_y=sample_grid_y,
regrid_callback=RectilinearRegridder._regrid)
| gpl-3.0 |
LaughingSun/three.js | utils/exporters/blender/addons/io_three/exporter/api/light.py | 104 | 1461 | from bpy import data, types
from .. import utilities, logger
def _lamp(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Lamp):
lamp = name
else:
lamp = data.lamps[name]
return func(lamp, *args, **kwargs)
return inner
@_lamp
def angle(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.angle(%s)", lamp)
return lamp.spot_size
@_lamp
def color(lamp):
"""
:param lamp:
:rtype: int
"""
logger.debug("light.color(%s)", lamp)
colour = (lamp.color.r, lamp.color.g, lamp.color.b)
return utilities.rgb2int(colour)
@_lamp
def distance(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.distance(%s)", lamp)
return lamp.distance
@_lamp
def intensity(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.intensity(%s)", lamp)
return round(lamp.energy, 2)
# mapping enum values to decay exponent
__FALLOFF_TO_EXP = {
'CONSTANT': 0,
'INVERSE_LINEAR': 1,
'INVERSE_SQUARE': 2,
'CUSTOM_CURVE': 0,
'LINEAR_QUADRATIC_WEIGHTED': 2
}
@_lamp
def falloff(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.falloff(%s)", lamp)
return __FALLOFF_TO_EXP[lamp.falloff_type]
| mit |
HomeRad/TorCleaner | tests/proxy/rfc2616/test_datedwarn.py | 1 | 4112 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2010 Bastian Kleineidam
"""
"""
import time
from .. import ProxyTest
from wc.http.date import get_date_rfc1123
class test_datedwarn_1old_0cur_0fut(ProxyTest):
def test_datedwarn_1old_0cur_0fut(self):
self.start_test()
def get_response_headers(self, content):
now = time.time()
warndate = get_date_rfc1123(now - 5)
warning = '119 smee "hulla" "%s"' % warndate
date = get_date_rfc1123(now)
return [
"Content-Type: text/plain",
"Content-Length: %d" % len(content),
"Warning: %s" % warning,
"Date: %s" % date,
]
def check_response_headers(self, response):
self.assertTrue(not response.has_header("Warning"))
class test_datedwarn_0old_0cur_1fut(ProxyTest):
def test_datedwarn_0old_0cur_1fut(self):
self.start_test()
def get_response_headers(self, content):
now = time.time()
warndate = get_date_rfc1123(now + 5)
warning = '119 smee "hulla" "%s"' % warndate
date = get_date_rfc1123(now)
return [
"Content-Type: text/plain",
"Content-Length: %d" % len(content),
"Warning: %s" % warning,
"Date: %s" % date,
]
def check_response_headers(self, response):
self.assertTrue(not response.has_header("Warning"))
class test_datedwarn_1old_1cur_1fut(ProxyTest):
def test_datedwarn_1old_1cur_1fut(self):
self.start_test()
def get_response_headers(self, content):
now = time.time()
futdate = get_date_rfc1123(now + 5)
olddate = get_date_rfc1123(now - 5)
futwarn= '119 smee "hulla" "%s"' % futdate
oldwarn = '119 smee "bulla" "%s"' % olddate
warn = '119 smee "wulla"'
date = get_date_rfc1123(now)
return [
"Content-Type: text/plain",
"Content-Length: %d" % len(content),
"Warning: %s" % oldwarn,
"Warning: %s" % futwarn,
"Warning: %s" % warn,
"Date: %s" % date,
]
def check_response_headers(self, response):
self.assertEqual(response.num_headers('Warning'), 1)
class test_datedwarn_1old_continuation(ProxyTest):
def test_datedwarn_1old_continuation(self):
self.start_test()
def get_response_headers(self, content):
now = time.time()
olddate = get_date_rfc1123(now - 5)
oldwarn1 = '119 smee '
oldwarn2 = '"bulla" "%s"' % olddate
date = get_date_rfc1123(now)
return [
"Content-Type: text/plain",
"Content-Length: %d" % len(content),
"Warning: %s" % oldwarn1,
" %s" % oldwarn2,
"Date: %s" % date,
]
def check_response_headers(self, response):
self.assertTrue(not response.has_header('Warning'))
class test_datedwarn_1cur_continuation(ProxyTest):
def test_datedwarn_1cur_continuation(self):
self.start_test()
def get_response_headers(self, content):
now = time.time()
date = get_date_rfc1123(now)
warn1 = '119 smee '
warn2 = '"bulla" "%s"' % date
return [
"Content-Type: text/plain",
"Content-Length: %d" % len(content),
"Warning: %s" % warn1,
" %s" % warn2,
"Date: %s" % date,
]
def check_response_headers(self, response):
self.assertEqual(response.num_headers('Warning'), 1)
class test_datedwarn_1cur_noquotes(ProxyTest):
def test_datedwarn_1cur_noquotes(self):
self.start_test()
def get_response_headers(self, content):
now = time.time()
date = get_date_rfc1123(now)
warn = '110 DataReactor "Response is stale" %s' % date
return [
"Content-Type: text/plain",
"Content-Length: %d" % len(content),
"Warning: %s" % warn,
"Date: %s" % date,
]
def check_response_headers(self, response):
self.assertEqual(response.num_headers('Warning'), 1)
| gpl-2.0 |
FHannes/intellij-community | python/testData/MockSdk3.2/Lib/numbers.py | 110 | 10330 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod, abstractproperty
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@abstractproperty
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractproperty
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@abstractproperty
def numerator(self):
raise NotImplementedError
@abstractproperty
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""someobject[self]"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
| apache-2.0 |
Neural-Network/TicTacToe | examples/rl/environments/cartpole/cart_fem.py | 30 | 1688 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with FEM on the CartPoleEnvironment
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
#########################################################################
__author__ = "Thomas Rueckstiess, Frank Sehnke"
from pybrain.tools.example_tools import ExTools
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.cartpole import CartPoleEnvironment, BalanceTask
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import FEM
from pybrain.rl.experiments import EpisodicExperiment
batch=2 #number of samples per learning step
prnts=100 #number of learning steps after results are printed
epis=4000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
for runs in range(numbExp):
# create environment
env = CartPoleEnvironment()
# create task
task = BalanceTask(env, 200, desiredValue=None)
# create controller network
net = buildNetwork(4, 1, bias=False)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, FEM(storeAllEvaluations = True))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
#print "Epsilon : ", agent.learner.sigma
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
| bsd-3-clause |
susansls/zulip | analytics/tests/test_views.py | 9 | 16348 | from __future__ import absolute_import
from django.utils.timezone import get_fixed_timezone, utc
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, \
datetime_to_timestamp
from zerver.models import Realm, UserProfile, Client, get_realm, \
get_user_profile_by_email
from analytics.lib.counts import CountStat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import RealmCount, UserCount, BaseCount, \
FillState, last_successful_fill
from analytics.views import stats, get_chart_data, sort_by_totals, \
sort_client_labels, rewrite_client_arrays
from datetime import datetime, timedelta
import mock
import ujson
from six.moves import range
from typing import List, Dict
class TestStatsEndpoint(ZulipTestCase):
def test_stats(self):
# type: () -> None
self.user = get_user_profile_by_email('[email protected]')
self.login(self.user.email)
result = self.client_get('/stats')
self.assertEqual(result.status_code, 200)
# Check that we get something back
self.assert_in_response("Zulip Analytics for", result)
class TestGetChartData(ZulipTestCase):
def setUp(self):
# type: () -> None
self.realm = get_realm('zulip')
self.user = get_user_profile_by_email('[email protected]')
self.login(self.user.email)
self.end_times_hour = [ceiling_to_hour(self.realm.date_created) + timedelta(hours=i)
for i in range(4)]
self.end_times_day = [ceiling_to_day(self.realm.date_created) + timedelta(days=i)
for i in range(4)]
def data(self, i):
# type: (int) -> List[int]
return [0, 0, i, 0]
def insert_data(self, stat, realm_subgroups, user_subgroups):
# type: (CountStat, List[str], List[str]) -> None
if stat.frequency == CountStat.HOUR:
insert_time = self.end_times_hour[2]
fill_time = self.end_times_hour[-1]
if stat.frequency == CountStat.DAY:
insert_time = self.end_times_day[2]
fill_time = self.end_times_day[-1]
RealmCount.objects.bulk_create([
RealmCount(property=stat.property, subgroup=subgroup, end_time=insert_time,
value=100+i, realm=self.realm)
for i, subgroup in enumerate(realm_subgroups)])
UserCount.objects.bulk_create([
UserCount(property=stat.property, subgroup=subgroup, end_time=insert_time,
value=200+i, realm=self.realm, user=self.user)
for i, subgroup in enumerate(user_subgroups)])
FillState.objects.create(property=stat.property, end_time=fill_time, state=FillState.DONE)
def test_number_of_humans(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.insert_data(stat, ['true', 'false'], [])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_day],
'frequency': CountStat.DAY,
'interval': CountStat.GAUGE,
'realm': {'bot': self.data(100), 'human': self.data(101)},
'display_order': None,
'result': 'success',
})
def test_messages_sent_over_time(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:is_bot:hour']
self.insert_data(stat, ['true', 'false'], ['false'])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_over_time'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_hour],
'frequency': CountStat.HOUR,
'interval': CountStat.HOUR,
'realm': {'bot': self.data(100), 'human': self.data(101)},
'user': {'human': self.data(200)},
'display_order': None,
'result': 'success',
})
def test_messages_sent_by_message_type(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:message_type:day']
self.insert_data(stat, ['public_stream', 'private_message'],
['public_stream', 'private_stream'])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_message_type'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_day],
'frequency': CountStat.DAY,
'interval': CountStat.DAY,
'realm': {'Public Streams': self.data(100), 'Private Streams': self.data(0),
'PMs & Group PMs': self.data(101)},
'user': {'Public Streams': self.data(200), 'Private Streams': self.data(201),
'PMs & Group PMs': self.data(0)},
'display_order': ['PMs & Group PMs', 'Public Streams', 'Private Streams'],
'result': 'success',
})
def test_messages_sent_by_client(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:client:day']
client1 = Client.objects.create(name='client 1')
_client1 = Client.objects.create(name='_client 1')
client2 = Client.objects.create(name='client 2')
client3 = Client.objects.create(name='client 3')
_client3 = Client.objects.create(name='_client 3')
client4 = Client.objects.create(name='client 4')
self.insert_data(stat, [client4.id, client3.id, client2.id],
[client1.id, _client1.id, client4.id, _client3.id])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_client'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_day],
'frequency': CountStat.DAY,
'interval': CountStat.DAY,
'realm': {'client 4': self.data(100), 'client 3': self.data(101),
'client 2': self.data(102)},
'user': {'client 1': self.data(401), 'client 4': self.data(202),
'client 3': self.data(203)},
'display_order': ['client 1', 'client 2', 'client 3', 'client 4'],
'result': 'success',
})
def test_include_empty_subgroups(self):
# type: () -> None
FillState.objects.create(
property='active_users:is_bot:day', end_time=self.end_times_day[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {'human': [0], 'bot': [0]})
self.assertFalse('user' in data)
FillState.objects.create(
property='messages_sent:is_bot:hour', end_time=self.end_times_hour[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_over_time'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {'human': [0], 'bot': [0]})
self.assertEqual(data['user'], {})
FillState.objects.create(
property='messages_sent:message_type:day', end_time=self.end_times_day[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_message_type'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {
'Public Streams': [0], 'Private Streams': [0], 'PMs & Group PMs': [0]})
self.assertEqual(data['user'], {
'Public Streams': [0], 'Private Streams': [0], 'PMs & Group PMs': [0]})
FillState.objects.create(
property='messages_sent:client:day', end_time=self.end_times_day[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_client'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {})
self.assertEqual(data['user'], {})
def test_start_and_end(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.insert_data(stat, ['true', 'false'], [])
end_time_timestamps = [datetime_to_timestamp(dt) for dt in self.end_times_day]
# valid start and end
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'start': end_time_timestamps[1],
'end': end_time_timestamps[2]})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['end_times'], end_time_timestamps[1:3])
self.assertEqual(data['realm'], {'bot': [0, 100], 'human': [0, 101]})
# start later then end
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'start': end_time_timestamps[2],
'end': end_time_timestamps[1]})
self.assert_json_error_contains(result, 'Start time is later than')
def test_min_length(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.insert_data(stat, ['true', 'false'], [])
# test min_length is too short to change anything
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'min_length': 2})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['end_times'], [datetime_to_timestamp(dt) for dt in self.end_times_day])
self.assertEqual(data['realm'], {'bot': self.data(100), 'human': self.data(101)})
# test min_length larger than filled data
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'min_length': 5})
self.assert_json_success(result)
data = ujson.loads(result.content)
end_times = [ceiling_to_day(self.realm.date_created) + timedelta(days=i) for i in range(-1, 4)]
self.assertEqual(data['end_times'], [datetime_to_timestamp(dt) for dt in end_times])
self.assertEqual(data['realm'], {'bot': [0]+self.data(100), 'human': [0]+self.data(101)})
def test_non_existent_chart(self):
# type: () -> None
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'does_not_exist'})
self.assert_json_error_contains(result, 'Unknown chart name')
def test_analytics_not_running(self):
# type: () -> None
# try to get data for a valid chart, but before we've put anything in the database
# (e.g. before update_analytics_counts has been run)
with mock.patch('logging.warning'):
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans'})
self.assert_json_error_contains(result, 'No analytics data available')
class TestGetChartDataHelpers(ZulipTestCase):
# last_successful_fill is in analytics/models.py, but get_chart_data is
# the only function that uses it at the moment
def test_last_successful_fill(self):
# type: () -> None
self.assertIsNone(last_successful_fill('non-existant'))
a_time = datetime(2016, 3, 14, 19).replace(tzinfo=utc)
one_hour_before = datetime(2016, 3, 14, 18).replace(tzinfo=utc)
fillstate = FillState.objects.create(property='property', end_time=a_time,
state=FillState.DONE)
self.assertEqual(last_successful_fill('property'), a_time)
fillstate.state = FillState.STARTED
fillstate.save()
self.assertEqual(last_successful_fill('property'), one_hour_before)
def test_sort_by_totals(self):
# type: () -> None
empty = [] # type: List[int]
value_arrays = {'c': [0, 1], 'a': [9], 'b': [1, 1, 1], 'd': empty}
self.assertEqual(sort_by_totals(value_arrays), ['a', 'b', 'c', 'd'])
def test_sort_client_labels(self):
# type: () -> None
data = {'realm': {'a': [16], 'c': [15], 'b': [14], 'e': [13], 'd': [12], 'h': [11]},
'user': {'a': [6], 'b': [5], 'd': [4], 'e': [3], 'f': [2], 'g': [1]}}
self.assertEqual(sort_client_labels(data), ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
class TestTimeRange(ZulipTestCase):
def test_time_range(self):
# type: () -> None
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
TZINFO = get_fixed_timezone(-100) # 100 minutes west of UTC
# Using 22:59 so that converting to UTC and applying floor_to_{hour,day} do not commute
a_time = datetime(2016, 3, 14, 22, 59).replace(tzinfo=TZINFO)
floor_hour = datetime(2016, 3, 14, 22).replace(tzinfo=TZINFO)
floor_day = datetime(2016, 3, 14).replace(tzinfo=TZINFO)
# test start == end
self.assertEqual(time_range(a_time, a_time, CountStat.HOUR, None), [])
self.assertEqual(time_range(a_time, a_time, CountStat.DAY, None), [])
# test start == end == boundary, and min_length == 0
self.assertEqual(time_range(floor_hour, floor_hour, CountStat.HOUR, 0), [floor_hour])
self.assertEqual(time_range(floor_day, floor_day, CountStat.DAY, 0), [floor_day])
# test start and end on different boundaries
self.assertEqual(time_range(floor_hour, floor_hour+HOUR, CountStat.HOUR, None),
[floor_hour, floor_hour+HOUR])
self.assertEqual(time_range(floor_day, floor_day+DAY, CountStat.DAY, None),
[floor_day, floor_day+DAY])
# test min_length
self.assertEqual(time_range(floor_hour, floor_hour+HOUR, CountStat.HOUR, 4),
[floor_hour-2*HOUR, floor_hour-HOUR, floor_hour, floor_hour+HOUR])
self.assertEqual(time_range(floor_day, floor_day+DAY, CountStat.DAY, 4),
[floor_day-2*DAY, floor_day-DAY, floor_day, floor_day+DAY])
class TestMapArrays(ZulipTestCase):
def test_map_arrays(self):
# type: () -> None
a = {'desktop app 1.0': [1, 2, 3],
'desktop app 2.0': [10, 12, 13],
'desktop app 3.0': [21, 22, 23],
'website': [1, 2, 3],
'ZulipiOS': [1, 2, 3],
'ZulipMobile': [1, 5, 7],
'ZulipPython': [1, 2, 3],
'API: Python': [1, 2, 3],
'SomethingRandom': [4, 5, 6],
'ZulipGitHubWebhook': [7, 7, 9],
'ZulipAndroid': [64, 63, 65]}
result = rewrite_client_arrays(a)
self.assertEqual(result,
{'Old desktop app': [32, 36, 39],
'Old iOS app': [1, 2, 3],
'New iOS app': [1, 5, 7],
'Website': [1, 2, 3],
'Python API': [2, 4, 6],
'SomethingRandom': [4, 5, 6],
'GitHub webhook': [7, 7, 9],
'Android app': [64, 63, 65]})
| apache-2.0 |
carletes/libcloud | libcloud/test/loadbalancer/test_gce.py | 32 | 9055 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Google Compute Engine Load Balancer Driver
"""
import sys
import unittest
from libcloud.common.google import (GoogleBaseAuthConnection,
GoogleInstalledAppAuthConnection,
GoogleBaseConnection)
from libcloud.compute.drivers.gce import (GCENodeDriver)
from libcloud.loadbalancer.drivers.gce import (GCELBDriver)
from libcloud.test.common.test_google import GoogleAuthMockHttp
from libcloud.test.compute.test_gce import GCEMockHttp
from libcloud.test import LibcloudTestCase
from libcloud.test.secrets import GCE_PARAMS, GCE_KEYWORD_PARAMS
class GCELoadBalancerTest(LibcloudTestCase):
GoogleBaseConnection._get_token_info_from_file = lambda x: None
GoogleBaseConnection._write_token_info_to_file = lambda x: None
GoogleInstalledAppAuthConnection.get_code = lambda x: '1234'
datacenter = 'us-central1-a'
def setUp(self):
GCEMockHttp.test = self
GCELBDriver.connectionCls.conn_classes = (GCEMockHttp, GCEMockHttp)
GCENodeDriver.connectionCls.conn_classes = (GCEMockHttp, GCEMockHttp)
GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp,
GoogleAuthMockHttp)
GCEMockHttp.type = None
kwargs = GCE_KEYWORD_PARAMS.copy()
kwargs['auth_type'] = 'IA'
kwargs['datacenter'] = self.datacenter
self.driver = GCELBDriver(*GCE_PARAMS, **kwargs)
def test_get_node_from_ip(self):
ip = '23.236.58.15'
expected_name = 'node-name'
node = self.driver._get_node_from_ip(ip)
self.assertEqual(node.name, expected_name)
dummy_ip = '8.8.8.8'
node = self.driver._get_node_from_ip(dummy_ip)
self.assertTrue(node is None)
def test_list_protocols(self):
expected_protocols = ['TCP', 'UDP']
protocols = self.driver.list_protocols()
self.assertEqual(protocols, expected_protocols)
def test_list_balancers(self):
balancers = self.driver.list_balancers()
balancers_all = self.driver.list_balancers(ex_region='all')
balancer_name = 'lcforwardingrule'
self.assertEqual(len(balancers), 2)
self.assertEqual(len(balancers_all), 2)
self.assertEqual(balancers[0].name, balancer_name)
def test_create_balancer(self):
balancer_name = 'libcloud-lb-demo-lb'
tp_name = '%s-tp' % (balancer_name)
port = '80'
protocol = 'tcp'
algorithm = None
node0 = self.driver.gce.ex_get_node('libcloud-lb-demo-www-000',
'us-central1-b')
node1 = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001',
'us-central1-b')
members = [node0, node1]
balancer = self.driver.create_balancer(balancer_name, port, protocol,
algorithm, members)
self.assertEqual(balancer.name, balancer_name)
self.assertEqual(balancer.extra['targetpool'].name, tp_name)
self.assertEqual(len(balancer.list_members()), 3)
def test_destory_balancer(self):
balancer_name = 'lcforwardingrule'
balancer = self.driver.get_balancer(balancer_name)
destroyed = balancer.destroy()
self.assertTrue(destroyed)
def test_get_balancer(self):
balancer_name = 'lcforwardingrule'
tp_name = 'lctargetpool'
balancer_ip = '173.255.119.224'
balancer = self.driver.get_balancer(balancer_name)
self.assertEqual(balancer.name, balancer_name)
self.assertEqual(balancer.extra['forwarding_rule'].name, balancer_name)
self.assertEqual(balancer.ip, balancer_ip)
self.assertEqual(balancer.extra['targetpool'].name, tp_name)
def test_attach_compute_node(self):
node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001',
'us-central1-b')
balancer = self.driver.get_balancer('lcforwardingrule')
member = self.driver._node_to_member(node, balancer)
# Detach member first
balancer.detach_member(member)
self.assertEqual(len(balancer.list_members()), 1)
# Attach Node
balancer.attach_compute_node(node)
self.assertEqual(len(balancer.list_members()), 2)
def test_detach_attach_member(self):
node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001',
'us-central1-b')
balancer = self.driver.get_balancer('lcforwardingrule')
member = self.driver._node_to_member(node, balancer)
# Check that balancer has 2 members
self.assertEqual(len(balancer.list_members()), 2)
# Remove a member and check that it now has 1 member
balancer.detach_member(member)
self.assertEqual(len(balancer.list_members()), 1)
# Reattach member and check that it has 2 members again
balancer.attach_member(member)
self.assertEqual(len(balancer.list_members()), 2)
def test_balancer_list_members(self):
balancer = self.driver.get_balancer('lcforwardingrule')
members = balancer.list_members()
self.assertEqual(len(members), 2)
member_ips = [m.ip for m in members]
self.assertTrue('23.236.58.15' in member_ips)
def test_ex_create_healthcheck(self):
healthcheck_name = 'lchealthcheck'
kwargs = {'host': 'lchost',
'path': '/lc',
'port': 8000,
'interval': 10,
'timeout': 10,
'unhealthy_threshold': 4,
'healthy_threshold': 3}
hc = self.driver.ex_create_healthcheck(healthcheck_name, **kwargs)
self.assertEqual(hc.name, healthcheck_name)
self.assertEqual(hc.path, '/lc')
self.assertEqual(hc.port, 8000)
self.assertEqual(hc.interval, 10)
def test_ex_list_healthchecks(self):
healthchecks = self.driver.ex_list_healthchecks()
self.assertEqual(len(healthchecks), 3)
self.assertEqual(healthchecks[0].name, 'basic-check')
def test_ex_balancer_detach_attach_healthcheck(self):
healthcheck = self.driver.gce.ex_get_healthcheck(
'libcloud-lb-demo-healthcheck')
balancer = self.driver.get_balancer('lcforwardingrule')
healthchecks = self.driver.ex_balancer_list_healthchecks(balancer)
self.assertEqual(len(healthchecks), 1)
# Detach Healthcheck
detach_healthcheck = self.driver.ex_balancer_detach_healthcheck(
balancer, healthcheck)
self.assertTrue(detach_healthcheck)
healthchecks = self.driver.ex_balancer_list_healthchecks(balancer)
self.assertEqual(len(healthchecks), 0)
# Reattach Healthcheck
attach_healthcheck = self.driver.ex_balancer_attach_healthcheck(
balancer, healthcheck)
self.assertTrue(attach_healthcheck)
healthchecks = self.driver.ex_balancer_list_healthchecks(balancer)
self.assertEqual(len(healthchecks), 1)
def test_ex_balancer_list_healthchecks(self):
balancer = self.driver.get_balancer('lcforwardingrule')
healthchecks = self.driver.ex_balancer_list_healthchecks(balancer)
self.assertEqual(healthchecks[0].name, 'libcloud-lb-demo-healthcheck')
def test_node_to_member(self):
node = self.driver.gce.ex_get_node('libcloud-lb-demo-www-001',
'us-central1-b')
balancer = self.driver.get_balancer('lcforwardingrule')
member = self.driver._node_to_member(node, balancer)
self.assertEqual(member.ip, node.public_ips[0])
self.assertEqual(member.id, node.name)
self.assertEqual(member.port, balancer.port)
def test_forwarding_rule_to_loadbalancer(self):
fwr = self.driver.gce.ex_get_forwarding_rule('lcforwardingrule')
balancer = self.driver._forwarding_rule_to_loadbalancer(fwr)
self.assertEqual(fwr.name, balancer.name)
self.assertEqual(fwr.address, balancer.ip)
self.assertEqual(fwr.extra['portRange'], balancer.port)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
dan1/horizon-proto | openstack_dashboard/contrib/sahara/content/data_processing/nodegroup_templates/tests.py | 12 | 12171 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api as dash_api
from openstack_dashboard.contrib.sahara import api
from openstack_dashboard.contrib.sahara.content.data_processing.utils \
import workflow_helpers
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse(
'horizon:project:data_processing.nodegroup_templates:index')
DETAILS_URL = reverse(
'horizon:project:data_processing.nodegroup_templates:details',
args=['id'])
CREATE_URL = reverse(
'horizon:project:data_processing.nodegroup_templates:' +
'configure-nodegroup-template')
class DataProcessingNodeGroupTests(test.TestCase):
@test.create_stubs({api.sahara: ('nodegroup_template_list',)})
def test_index(self):
api.sahara.nodegroup_template_list(IsA(http.HttpRequest), {}) \
.AndReturn(self.nodegroup_templates.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res,
'project/data_processing.nodegroup_templates/'
'nodegroup_templates.html')
self.assertContains(res, 'Node Group Templates')
self.assertContains(res, 'Name')
self.assertContains(res, 'Plugin')
@test.create_stubs({api.sahara: ('nodegroup_template_get',),
dash_api.nova: ('flavor_get',)})
def test_details(self):
flavor = self.flavors.first()
ngt = self.nodegroup_templates.first()
dash_api.nova.flavor_get(IsA(http.HttpRequest), flavor.id) \
.AndReturn(flavor)
api.sahara.nodegroup_template_get(IsA(http.HttpRequest),
IsA(unicode)) \
.MultipleTimes().AndReturn(ngt)
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(res,
'project/data_processing.nodegroup_templates/'
'details.html')
self.assertContains(res, 'sample-template')
self.assertContains(res, 'Template Overview')
@test.create_stubs({api.sahara: ('nodegroup_template_list',
'nodegroup_template_delete')})
def test_delete(self):
ngt = self.nodegroup_templates.first()
api.sahara.nodegroup_template_list(IsA(http.HttpRequest), {}) \
.AndReturn(self.nodegroup_templates.list())
api.sahara.nodegroup_template_delete(IsA(http.HttpRequest), ngt.id)
self.mox.ReplayAll()
form_data = {'action': 'nodegroup_templates__delete__%s' % ngt.id}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.sahara: ('nodegroup_template_get',
'plugin_get_version_details'),
dash_api.network: ('floating_ip_pools_list',
'security_group_list'),
dash_api.cinder: ('extension_supported',
'availability_zone_list')})
def test_copy(self):
ngt = self.nodegroup_templates.first()
configs = self.plugins_configs.first()
dash_api.cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones') \
.AndReturn(True)
dash_api.cinder.availability_zone_list(IsA(http.HttpRequest))\
.AndReturn(self.availability_zones.list())
api.sahara.nodegroup_template_get(IsA(http.HttpRequest),
ngt.id) \
.AndReturn(ngt)
api.sahara.plugin_get_version_details(IsA(http.HttpRequest),
ngt.plugin_name,
ngt.hadoop_version) \
.MultipleTimes().AndReturn(configs)
dash_api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn([])
dash_api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
self.mox.ReplayAll()
url = reverse(
'horizon:project:data_processing.nodegroup_templates:copy',
args=[ngt.id])
res = self.client.get(url)
workflow = res.context['workflow']
step = workflow.get_step("generalconfigaction")
self.assertEqual(step.action['nodegroup_name'].field.initial,
ngt.name + "-copy")
@test.create_stubs({api.sahara: ('client',
'nodegroup_template_create',
'plugin_get_version_details'),
dash_api.network: ('floating_ip_pools_list',
'security_group_list'),
dash_api.nova: ('flavor_list',),
dash_api.cinder: ('extension_supported',
'availability_zone_list')})
def test_create(self):
flavor = self.flavors.first()
ngt = self.nodegroup_templates.first()
configs = self.plugins_configs.first()
new_name = ngt.name + '-new'
self.mox.StubOutWithMock(
workflow_helpers, 'parse_configs_from_context')
dash_api.cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones') \
.AndReturn(True)
dash_api.cinder.availability_zone_list(IsA(http.HttpRequest))\
.AndReturn(self.availability_zones.list())
dash_api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn([flavor])
api.sahara.plugin_get_version_details(IsA(http.HttpRequest),
ngt.plugin_name,
ngt.hadoop_version) \
.MultipleTimes().AndReturn(configs)
dash_api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn([])
dash_api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
workflow_helpers.parse_configs_from_context(
IgnoreArg(), IgnoreArg()).AndReturn({})
api.sahara.nodegroup_template_create(
IsA(http.HttpRequest),
**{'name': new_name,
'plugin_name': ngt.plugin_name,
'hadoop_version': ngt.hadoop_version,
'description': ngt.description,
'flavor_id': flavor.id,
'volumes_per_node': None,
'volumes_size': None,
'volumes_availability_zone': None,
'node_processes': ['namenode'],
'node_configs': {},
'floating_ip_pool': None,
'security_groups': [],
'auto_security_group': True,
'availability_zone': None,
'is_proxy_gateway': False}) \
.AndReturn(True)
self.mox.ReplayAll()
res = self.client.post(
CREATE_URL,
{'nodegroup_name': new_name,
'plugin_name': ngt.plugin_name,
ngt.plugin_name + '_version': '1.2.1',
'hadoop_version': ngt.hadoop_version,
'description': ngt.description,
'flavor': flavor.id,
'availability_zone': None,
'storage': 'ephemeral_drive',
'volumes_per_node': 0,
'volumes_size': 0,
'volumes_availability_zone': None,
'floating_ip_pool': None,
'security_autogroup': True,
'processes': 'HDFS:namenode'})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.sahara: ('client',
'nodegroup_template_create',
'nodegroup_template_update',
'nodegroup_template_get',
'plugin_get_version_details'),
dash_api.network: ('floating_ip_pools_list',
'security_group_list'),
dash_api.nova: ('flavor_list',),
dash_api.cinder: ('extension_supported',
'availability_zone_list')})
def test_update(self):
flavor = self.flavors.first()
ngt = self.nodegroup_templates.first()
configs = self.plugins_configs.first()
new_name = ngt.name + '-updated'
UPDATE_URL = reverse(
'horizon:project:data_processing.nodegroup_templates:edit',
kwargs={'template_id': ngt.id})
self.mox.StubOutWithMock(
workflow_helpers, 'parse_configs_from_context')
dash_api.cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones') \
.AndReturn(True)
dash_api.cinder.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
dash_api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn([flavor])
api.sahara.plugin_get_version_details(IsA(http.HttpRequest),
ngt.plugin_name,
ngt.hadoop_version) \
.MultipleTimes().AndReturn(configs)
dash_api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn([])
dash_api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
workflow_helpers.parse_configs_from_context(
IgnoreArg(), IgnoreArg()).AndReturn({})
api.sahara.nodegroup_template_get(IsA(http.HttpRequest),
ngt.id) \
.AndReturn(ngt)
api.sahara.nodegroup_template_update(
request=IsA(http.HttpRequest),
ngt_id=ngt.id,
name=new_name,
plugin_name=ngt.plugin_name,
hadoop_version=ngt.hadoop_version,
flavor_id=flavor.id,
description=ngt.description,
volumes_per_node=None,
volumes_size=None,
volumes_availability_zone=None,
node_processes=['namenode'],
node_configs={},
floating_ip_pool=None,
security_groups=[],
auto_security_group=True,
availability_zone=None).AndReturn(True)
self.mox.ReplayAll()
res = self.client.post(
UPDATE_URL,
{'ng_id': ngt.id,
'nodegroup_name': new_name,
'plugin_name': ngt.plugin_name,
ngt.plugin_name + '_version': '1.2.1',
'hadoop_version': ngt.hadoop_version,
'description': ngt.description,
'flavor': flavor.id,
'availability_zone': None,
'storage': 'ephemeral_drive',
'volumes_per_node': 0,
'volumes_size': 0,
'volumes_availability_zone': None,
'floating_ip_pool': None,
'security_autogroup': True,
'processes': 'HDFS:namenode'})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
| apache-2.0 |
jindongh/boto | tests/unit/ec2/elb/test_listener.py | 114 | 4478 | #!/usr/bin/env python
import xml.sax
from tests.unit import unittest
import boto.resultset
from boto.ec2.elb.loadbalancer import LoadBalancer
from boto.ec2.elb.listener import Listener
LISTENERS_RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<DescribeLoadBalancersResult>
<LoadBalancerDescriptions>
<member>
<SecurityGroups/>
<CreatedTime>2013-07-09T19:18:00.520Z</CreatedTime>
<LoadBalancerName>elb-boto-unit-test</LoadBalancerName>
<HealthCheck>
<Interval>30</Interval>
<Target>TCP:8000</Target>
<HealthyThreshold>10</HealthyThreshold>
<Timeout>5</Timeout>
<UnhealthyThreshold>2</UnhealthyThreshold>
</HealthCheck>
<ListenerDescriptions>
<member>
<PolicyNames/>
<Listener>
<Protocol>HTTP</Protocol>
<LoadBalancerPort>80</LoadBalancerPort>
<InstanceProtocol>HTTP</InstanceProtocol>
<InstancePort>8000</InstancePort>
</Listener>
</member>
<member>
<PolicyNames/>
<Listener>
<Protocol>HTTP</Protocol>
<LoadBalancerPort>8080</LoadBalancerPort>
<InstanceProtocol>HTTP</InstanceProtocol>
<InstancePort>80</InstancePort>
</Listener>
</member>
<member>
<PolicyNames/>
<Listener>
<Protocol>TCP</Protocol>
<LoadBalancerPort>2525</LoadBalancerPort>
<InstanceProtocol>TCP</InstanceProtocol>
<InstancePort>25</InstancePort>
</Listener>
</member>
</ListenerDescriptions>
<Instances/>
<Policies>
<AppCookieStickinessPolicies/>
<OtherPolicies/>
<LBCookieStickinessPolicies/>
</Policies>
<AvailabilityZones>
<member>us-east-1a</member>
</AvailabilityZones>
<CanonicalHostedZoneName>elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com</CanonicalHostedZoneName>
<CanonicalHostedZoneNameID>Z3DZXE0Q79N41H</CanonicalHostedZoneNameID>
<Scheme>internet-facing</Scheme>
<SourceSecurityGroup>
<OwnerAlias>amazon-elb</OwnerAlias>
<GroupName>amazon-elb-sg</GroupName>
</SourceSecurityGroup>
<DNSName>elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com</DNSName>
<BackendServerDescriptions/>
<Subnets/>
</member>
</LoadBalancerDescriptions>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId>5763d932-e8cc-11e2-a940-11136cceffb8</RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>
"""
class TestListenerResponseParsing(unittest.TestCase):
def test_parse_complex(self):
rs = boto.resultset.ResultSet([
('member', LoadBalancer)
])
h = boto.handler.XmlHandler(rs, None)
xml.sax.parseString(LISTENERS_RESPONSE, h)
listeners = rs[0].listeners
self.assertEqual(
sorted([l.get_complex_tuple() for l in listeners]),
[
(80, 8000, 'HTTP', 'HTTP'),
(2525, 25, 'TCP', 'TCP'),
(8080, 80, 'HTTP', 'HTTP'),
]
)
class TestListenerGetItem(unittest.TestCase):
def test_getitem_for_http_listener(self):
listener = Listener(load_balancer_port=80,
instance_port=80,
protocol='HTTP',
instance_protocol='HTTP')
self.assertEqual(listener[0], 80)
self.assertEqual(listener[1], 80)
self.assertEqual(listener[2], 'HTTP')
self.assertEqual(listener[3], 'HTTP')
def test_getitem_for_https_listener(self):
listener = Listener(load_balancer_port=443,
instance_port=80,
protocol='HTTPS',
instance_protocol='HTTP',
ssl_certificate_id='look_at_me_im_an_arn')
self.assertEqual(listener[0], 443)
self.assertEqual(listener[1], 80)
self.assertEqual(listener[2], 'HTTPS')
self.assertEqual(listener[3], 'HTTP')
self.assertEqual(listener[4], 'look_at_me_im_an_arn')
if __name__ == '__main__':
unittest.main()
| mit |
tavaresdong/courses | ucb_cs61A/projects/scheme/buffer.py | 3 | 3806 | """The buffer module assists in iterating through lines and tokens."""
import math
class Buffer:
"""A Buffer provides a way of accessing a sequence of tokens across lines.
Its constructor takes an iterator, called "the source", that returns the
next line of tokens as a list each time it is queried, or None to indicate
the end of data.
The Buffer in effect concatenates the sequences returned from its source
and then supplies the items from them one at a time through its pop()
method, calling the source for more sequences of items only when needed.
In addition, Buffer provides a current method to look at the
next item to be supplied, without sequencing past it.
The __str__ method prints all tokens read so far, up to the end of the
current line, and marks the current token with >>.
>>> buf = Buffer(iter([['(', '+'], [15], [12, ')']]))
>>> buf.pop()
'('
>>> buf.pop()
'+'
>>> buf.current()
15
>>> print(buf)
1: ( +
2: >> 15
>>> buf.pop()
15
>>> buf.current()
12
>>> buf.pop()
12
>>> print(buf)
1: ( +
2: 15
3: 12 >> )
>>> buf.pop()
')'
>>> print(buf)
1: ( +
2: 15
3: 12 ) >>
>>> buf.pop() # returns None
"""
def __init__(self, source):
self.index = 0
self.lines = []
self.source = source
self.current_line = ()
self.current()
def pop(self):
"""Remove the next item from self and return it. If self has
exhausted its source, returns None."""
current = self.current()
self.index += 1
return current
def current(self):
"""Return the current element, or None if none exists."""
while not self.more_on_line:
self.index = 0
try:
self.current_line = next(self.source)
self.lines.append(self.current_line)
except StopIteration:
self.current_line = ()
return None
return self.current_line[self.index]
@property
def more_on_line(self):
return self.index < len(self.current_line)
def __str__(self):
"""Return recently read contents; current element marked with >>."""
# Format string for right-justified line numbers
n = len(self.lines)
msg = '{0:>' + str(math.floor(math.log10(n))+1) + "}: "
# Up to three previous lines and current line are included in output
s = ''
for i in range(max(0, n-4), n-1):
s += msg.format(i+1) + ' '.join(map(str, self.lines[i])) + '\n'
s += msg.format(n)
s += ' '.join(map(str, self.current_line[:self.index]))
s += ' >> '
s += ' '.join(map(str, self.current_line[self.index:]))
return s.strip()
# Try to import readline for interactive history
try:
import readline
except:
pass
class InputReader:
"""An InputReader is an iterable that prompts the user for input."""
def __init__(self, prompt):
self.prompt = prompt
def __iter__(self):
while True:
yield input(self.prompt)
self.prompt = ' ' * len(self.prompt)
class LineReader:
"""A LineReader is an iterable that prints lines after a prompt."""
def __init__(self, lines, prompt, comment=";"):
self.lines = lines
self.prompt = prompt
self.comment = comment
def __iter__(self):
while self.lines:
line = self.lines.pop(0).strip('\n')
if (self.prompt is not None and line != "" and
not line.lstrip().startswith(self.comment)):
print(self.prompt + line)
self.prompt = ' ' * len(self.prompt)
yield line
raise EOFError
| mit |
dreibh/planetlab-lxc-plcapi | PLC/Timestamp.py | 1 | 5841 | #
# Utilities to handle timestamps / durations from/to integers and strings
#
# datetime.{datetime,timedelta} are powerful tools, but these objects are not
# natively marshalled over xmlrpc
#
import time, calendar
import datetime
from PLC.Faults import *
from PLC.Parameter import Parameter, Mixed
# a dummy class mostly used as a namespace
class Timestamp:
debug = False
# debug=True
# this is how we expose times to SQL
sql_format = "%Y-%m-%d %H:%M:%S"
sql_format_utc = "%Y-%m-%d %H:%M:%S UTC"
# this one (datetime.isoformat) would work too but that's less readable - we support this input though
iso_format = "%Y-%m-%dT%H:%M:%S"
# sometimes it's convenient to understand more formats
input_formats = [ sql_format,
sql_format_utc,
iso_format,
"%Y-%m-%d %H:%M",
"%Y-%m-%d %H:%M UTC",
]
# for timestamps we usually accept either an int, or an ISO string,
# the datetime.datetime stuff can in general be used locally,
# but not sure it can be marshalled over xmlrpc though
@staticmethod
def Parameter (doc):
return Mixed (Parameter (int, doc + " (unix timestamp)"),
Parameter (str, doc + " (formatted as %s)"%Timestamp.sql_format),
)
@staticmethod
def sql_validate (input, timezone=False, check_future = False):
"""
Validates the specified GMT timestamp, returns a
standardized string suitable for SQL input.
Input may be a number (seconds since UNIX epoch back in 1970,
or a string (in one of the supported input formats).
If timezone is True, the resulting string contains
timezone information, which is hard-wired as 'UTC'
If check_future is True, raises an exception if timestamp is in
the past.
Returns a GMT timestamp string suitable to feed SQL.
"""
output_format = (Timestamp.sql_format_utc if timezone
else Timestamp.sql_format)
if Timestamp.debug:
print('sql_validate, in:', input, end=' ')
if isinstance(input, str):
sql = ''
# calendar.timegm() is the inverse of time.gmtime()
for time_format in Timestamp.input_formats:
try:
timestamp = calendar.timegm(time.strptime(input, time_format))
sql = time.strftime(output_format, time.gmtime(timestamp))
break
# wrong format: ignore
except ValueError:
pass
# could not parse it
if not sql:
raise PLCInvalidArgument("Cannot parse timestamp %r - not in any of %r formats"%(input,Timestamp.input_formats))
elif isinstance(input, (int, float)):
try:
timestamp = int(input)
sql = time.strftime(output_format, time.gmtime(timestamp))
except Exception as e:
raise PLCInvalidArgument("Timestamp %r not recognized -- %r"%(input,e))
else:
raise PLCInvalidArgument("Timestamp %r - unsupported type %r"%(input,type(input)))
if check_future and input < time.time():
raise PLCInvalidArgument("'%s' not in the future" % sql)
if Timestamp.debug: print('sql_validate, out:',sql)
return sql
@staticmethod
def sql_validate_utc (timestamp):
"For convenience, return sql_validate(intput, timezone=True, check_future=False)"
return Timestamp.sql_validate (timestamp, timezone=True, check_future=False)
@staticmethod
def cast_long(input):
"""
Translates input timestamp as a unix timestamp.
Input may be a number (seconds since UNIX epoch, i.e., 1970-01-01
00:00:00 GMT), a string (in one of the supported input formats above).
"""
if Timestamp.debug:
print('cast_long, in:', input, end=' ')
if isinstance(input, str):
timestamp = 0
for time_format in Timestamp.input_formats:
try:
result = calendar.timegm(time.strptime(input, time_format))
if Timestamp.debug:
print('out:', result)
return result
# wrong format: ignore
except ValueError:
pass
raise PLCInvalidArgument("Cannot parse timestamp %r - not in any of %r formats"%(input, Timestamp.input_formats))
elif isinstance(input, (int, float)):
result = int(input)
if Timestamp.debug:
print('out:',result)
return result
else:
raise PLCInvalidArgument("Timestamp %r - unsupported type %r"%(input,type(input)))
# utility for displaying durations
# be consistent in avoiding the datetime stuff
class Duration:
MINUTE = 60
HOUR = 3600
DAY = 3600*24
@staticmethod
def to_string(duration):
result=[]
left = duration
(days, left) = divmod(left, Duration.DAY)
if days:
result.append("%d d)"%td.days)
(hours, left) = divmod (left, Duration.HOUR)
if hours:
result.append("%d h"%hours)
(minutes, seconds) = divmod (left, Duration.MINUTE)
if minutes: result.append("%d m"%minutes)
if seconds: result.append("%d s"%seconds)
if not result: result = ['void']
return "-".join(result)
@staticmethod
def validate (duration):
# support seconds only for now, works for int/long/str
try:
return int(duration)
except:
raise PLCInvalidArgument("Could not parse duration %r"%duration)
| bsd-3-clause |
mitsuhiko/click | src/click/shell_completion.py | 1 | 16736 | import os
import re
from .core import Argument
from .core import MultiCommand
from .core import Option
from .core import ParameterSource
from .parser import split_arg_string
from .utils import echo
def shell_complete(cli, ctx_args, prog_name, complete_var, instruction):
"""Perform shell completion for the given CLI program.
:param cli: Command being called.
:param ctx_args: Extra arguments to pass to
``cli.make_context``.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
:param instruction: Value of ``complete_var`` with the completion
instruction and shell, in the form ``instruction_shell``.
:return: Status code to exit with.
"""
shell, _, instruction = instruction.partition("_")
comp_cls = get_completion_class(shell)
if comp_cls is None:
return 1
comp = comp_cls(cli, ctx_args, prog_name, complete_var)
if instruction == "source":
echo(comp.source())
return 0
if instruction == "complete":
echo(comp.complete())
return 0
return 1
class CompletionItem:
"""Represents a completion value and metadata about the value. The
default metadata is ``type`` to indicate special shell handling,
and ``help`` if a shell supports showing a help string next to the
value.
Arbitrary parameters can be passed when creating the object, and
accessed using ``item.attr``. If an attribute wasn't passed,
accessing it returns ``None``.
:param value: The completion suggestion.
:param type: Tells the shell script to provide special completion
support for the type. Click uses ``"dir"`` and ``"file"``.
:param help: String shown next to the value if supported.
:param kwargs: Arbitrary metadata. The built-in implementations
don't use this, but custom type completions paired with custom
shell support could use it.
"""
__slots__ = ("value", "type", "help", "_info")
def __init__(self, value, type="plain", help=None, **kwargs):
self.value = value
self.type = type
self.help = help
self._info = kwargs
def __getattr__(self, name):
return self._info.get(name)
# Only Bash >= 4.4 has the nosort option.
_SOURCE_BASH = """\
%(complete_func)s() {
local IFS=$'\\n'
local response
response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \
%(complete_var)s=bash_complete $1)
for completion in $response; do
IFS=',' read type value <<< "$completion"
if [[ $type == 'dir' ]]; then
COMREPLY=()
compopt -o dirnames
elif [[ $type == 'file' ]]; then
COMREPLY=()
compopt -o default
elif [[ $type == 'plain' ]]; then
COMPREPLY+=($value)
fi
done
return 0
}
%(complete_func)s_setup() {
complete -o nosort -F %(complete_func)s %(prog_name)s
}
%(complete_func)s_setup;
"""
_SOURCE_ZSH = """\
#compdef %(prog_name)s
%(complete_func)s() {
local -a completions
local -a completions_with_descriptions
local -a response
(( ! $+commands[%(prog_name)s] )) && return 1
response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \
%(complete_var)s=zsh_complete %(prog_name)s)}")
for type key descr in ${response}; do
if [[ "$type" == "plain" ]]; then
if [[ "$descr" == "_" ]]; then
completions+=("$key")
else
completions_with_descriptions+=("$key":"$descr")
fi
elif [[ "$type" == "dir" ]]; then
_path_files -/
elif [[ "$type" == "file" ]]; then
_path_files -f
fi
done
if [ -n "$completions_with_descriptions" ]; then
_describe -V unsorted completions_with_descriptions -U
fi
if [ -n "$completions" ]; then
compadd -U -V unsorted -a completions
fi
}
compdef %(complete_func)s %(prog_name)s;
"""
_SOURCE_FISH = """\
function %(complete_func)s;
set -l response;
for value in (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \
COMP_CWORD=(commandline -t) %(prog_name)s);
set response $response $value;
end;
for completion in $response;
set -l metadata (string split "," $completion);
if test $metadata[1] = "dir";
__fish_complete_directories $metadata[2];
else if test $metadata[1] = "file";
__fish_complete_path $metadata[2];
else if test $metadata[1] = "plain";
echo $metadata[2];
end;
end;
end;
complete --no-files --command %(prog_name)s --arguments \
"(%(complete_func)s)";
"""
class ShellComplete:
"""Base class for providing shell completion support. A subclass for
a given shell will override attributes and methods to implement the
completion instructions (``source`` and ``complete``).
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
.. versionadded:: 8.0
"""
name = None
"""Name to register the shell as with :func:`add_completion_class`.
This is used in completion instructions (``{name}_source`` and
``{name}_complete``).
"""
source_template = None
"""Completion script template formatted by :meth:`source`. This must
be provided by subclasses.
"""
def __init__(self, cli, ctx_args, prog_name, complete_var):
self.cli = cli
self.ctx_args = ctx_args
self.prog_name = prog_name
self.complete_var = complete_var
@property
def func_name(self):
"""The name of the shell function defined by the completion
script.
"""
safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), re.ASCII)
return f"_{safe_name}_completion"
def source_vars(self):
"""Vars for formatting :attr:`source_template`.
By default this provides ``complete_func``, ``complete_var``,
and ``prog_name``.
"""
return {
"complete_func": self.func_name,
"complete_var": self.complete_var,
"prog_name": self.prog_name,
}
def source(self):
"""Produce the shell script that defines the completion
function. By default this ``%``-style formats
:attr:`source_template` with the dict returned by
:meth:`source_vars`.
"""
return self.source_template % self.source_vars()
def get_completion_args(self):
"""Use the env vars defined by the shell script to return a
tuple of ``args, incomplete``. This must be implemented by
subclasses.
"""
raise NotImplementedError
def get_completions(self, args, incomplete):
"""Determine the context and last complete command or parameter
from the complete args. Call that object's ``shell_complete``
method to get the completions for the incomplete value.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)
if ctx is None:
return []
obj, incomplete = _resolve_incomplete(ctx, args, incomplete)
return obj.shell_complete(ctx, incomplete)
def format_completion(self, item):
"""Format a completion item into the form recognized by the
shell script. This must be implemented by subclasses.
:param item: Completion item to format.
"""
raise NotImplementedError
def complete(self):
"""Produce the completion data to send back to the shell.
By default this calls :meth:`get_completion_args`, gets the
completions, then calls :meth:`format_completion` for each
completion.
"""
args, incomplete = self.get_completion_args()
completions = self.get_completions(args, incomplete)
out = [self.format_completion(item) for item in completions]
return "\n".join(out)
class BashComplete(ShellComplete):
"""Shell completion for Bash."""
name = "bash"
source_template = _SOURCE_BASH
def _check_version(self):
import subprocess
output = subprocess.run(["bash", "--version"], stdout=subprocess.PIPE)
match = re.search(r"version (\d)\.(\d)\.\d", output.stdout.decode())
if match is not None:
major, minor = match.groups()
if major < "4" or major == "4" and minor < "4":
raise RuntimeError(
"Shell completion is not supported for Bash"
" versions older than 4.4."
)
else:
raise RuntimeError(
"Couldn't detect Bash version, shell completion is not supported."
)
def source(self):
self._check_version()
return super().source()
def get_completion_args(self):
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
return args, incomplete
def format_completion(self, item: CompletionItem):
return f"{item.type},{item.value}"
class ZshComplete(ShellComplete):
"""Shell completion for Zsh."""
name = "zsh"
source_template = _SOURCE_ZSH
def get_completion_args(self):
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
return args, incomplete
def format_completion(self, item: CompletionItem):
return f"{item.type}\n{item.value}\n{item.help if item.help else '_'}"
class FishComplete(ShellComplete):
"""Shell completion for Fish."""
name = "fish"
source_template = _SOURCE_FISH
def get_completion_args(self):
cwords = split_arg_string(os.environ["COMP_WORDS"])
incomplete = os.environ["COMP_CWORD"]
args = cwords[1:]
# Fish stores the partial word in both COMP_WORDS and
# COMP_CWORD, remove it from complete args.
if incomplete and args and args[-1] == incomplete:
args.pop()
return args, incomplete
def format_completion(self, item: CompletionItem):
if item.help:
return f"{item.type},{item.value}\t{item.help}"
return f"{item.type},{item.value}"
_available_shells = {
"bash": BashComplete,
"fish": FishComplete,
"zsh": ZshComplete,
}
def add_completion_class(cls, name=None):
"""Register a :class:`ShellComplete` subclass under the given name.
The name will be provided by the completion instruction environment
variable during completion.
:param cls: The completion class that will handle completion for the
shell.
:param name: Name to register the class under. Defaults to the
class's ``name`` attribute.
"""
if name is None:
name = cls.name
_available_shells[name] = cls
def get_completion_class(shell):
"""Look up a registered :class:`ShellComplete` subclass by the name
provided by the completion instruction environment variable. If the
name isn't registered, returns ``None``.
:param shell: Name the class is registered under.
"""
return _available_shells.get(shell)
def _is_incomplete_argument(ctx, param):
"""Determine if the given parameter is an argument that can still
accept values.
:param ctx: Invocation context for the command represented by the
parsed complete args.
:param param: Argument object being checked.
"""
if not isinstance(param, Argument):
return False
value = ctx.params[param.name]
return (
param.nargs == -1
or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE
or (
param.nargs > 1
and isinstance(value, (tuple, list))
and len(value) < param.nargs
)
)
def _start_of_option(value):
"""Check if the value looks like the start of an option."""
return value and not value[0].isalnum()
def _is_incomplete_option(args, param):
"""Determine if the given parameter is an option that needs a value.
:param args: List of complete args before the incomplete value.
:param param: Option object being checked.
"""
if not isinstance(param, Option):
return False
if param.is_flag:
return False
last_option = None
for index, arg in enumerate(reversed(args)):
if index + 1 > param.nargs:
break
if _start_of_option(arg):
last_option = arg
return last_option is not None and last_option in param.opts
def _resolve_context(cli, ctx_args, prog_name, args):
"""Produce the context hierarchy starting with the command and
traversing the complete arguments. This only follows the commands,
it doesn't trigger input prompts or callbacks.
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param args: List of complete args before the incomplete value.
"""
ctx_args["resilient_parsing"] = True
ctx = cli.make_context(prog_name, args.copy(), **ctx_args)
args = ctx.protected_args + ctx.args
while args:
if isinstance(ctx.command, MultiCommand):
if not ctx.command.chain:
name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True)
args = ctx.protected_args + ctx.args
else:
while args:
name, cmd, args = ctx.command.resolve_command(ctx, args)
if cmd is None:
return ctx
sub_ctx = cmd.make_context(
name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
resilient_parsing=True,
)
args = sub_ctx.args
ctx = sub_ctx
args = sub_ctx.protected_args + sub_ctx.args
else:
break
return ctx
def _resolve_incomplete(ctx, args, incomplete):
"""Find the Click object that will handle the completion of the
incomplete value. Return the object and the incomplete value.
:param ctx: Invocation context for the command represented by
the parsed complete args.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
# Different shells treat an "=" between a long option name and
# value differently. Might keep the value joined, return the "="
# as a separate item, or return the split name and value. Always
# split and discard the "=" to make completion easier.
if incomplete == "=":
incomplete = ""
elif "=" in incomplete and _start_of_option(incomplete):
name, _, incomplete = incomplete.partition("=")
args.append(name)
# The "--" marker tells Click to stop treating values as options
# even if they start with the option character. If it hasn't been
# given and the incomplete arg looks like an option, the current
# command will provide option name completions.
if "--" not in args and _start_of_option(incomplete):
return ctx.command, incomplete
params = ctx.command.get_params(ctx)
# If the last complete arg is an option name with an incomplete
# value, the option will provide value completions.
for param in params:
if _is_incomplete_option(args, param):
return param, incomplete
# It's not an option name or value. The first argument without a
# parsed value will provide value completions.
for param in params:
if _is_incomplete_argument(ctx, param):
return param, incomplete
# There were no unparsed arguments, the command may be a group that
# will provide command name completions.
return ctx.command, incomplete
| bsd-3-clause |
luanlv/ResizeImage | node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/input.py | 457 | 112827 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
is_path_section_charset = set('=+?!')
is_path_section_match_re = re.compile('_(dir|file|path)s?$')
def IsPathSection(section):
# If section ends in one of these characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in is_path_section_charset:
section = section[:-1]
return section in path_sections or is_path_section_match_re.search(section)
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node at key path '" + '.'.join(keypath) + \
"': " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if not isinstance(build_file_data, dict):
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, data,
aux_data, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
# Save the keys so we can return data that changed.
data_keys = set(data)
aux_data_keys = set(aux_data)
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, data,
aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
data_out = {}
for key in data:
if key == 'target_build_files':
continue
if key not in data_keys:
data_out[key] = data[key]
aux_data_out = {}
for key in aux_data:
if key not in aux_data_keys:
aux_data_out[key] = aux_data[key]
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
data_out,
aux_data_out,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The "aux_data" dict that was passed to LoadTargetBuildFileParallel
self.aux_data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, data0, aux_data0, dependencies0) = result
self.data['target_build_files'].add(build_file_path0)
for key in data0:
self.data[key] = data0[key]
for key in aux_data0:
self.aux_data[key] = aux_data0[key]
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
parallel_state.aux_data = aux_data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
data_in = {}
data_in['target_build_files'] = data['target_build_files']
aux_data_in = {}
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(8)
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
data_in, aux_data_in,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
canonical_int_re = re.compile('(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
return isinstance(string, str) and canonical_int_re.match(string)
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) == list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) == list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if (not contents[-1] == '/' and
not isinstance(item, str) and not isinstance(item, int)):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if isinstance(output, list):
if output and isinstance(output[0], list):
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise GypError(conditions_key + ' must be a list')
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be length 2 or 3, not ' + str(len(condition)))
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, phase, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, phase, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return flat_list
def FindCycles(self, path=None):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
if path is None:
path = [self]
results = []
for node in self.dependents:
if node in path:
cycle = [node]
for part in path:
cycle.append(part)
if part == node:
break
results.append(tuple(cycle))
else:
results.extend(node.FindCycles([node] + path))
return list(set(results))
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
if self.ref not in dependencies:
dependencies.append(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException(
'Some targets not reachable, cycle in dependency graph detected: ' +
' '.join(set(flat_list) ^ set(targets)))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
bad_files = []
for file in dependency_nodes.iterkeys():
if not file in flat_list:
bad_files.append(file)
common_path_prefix = os.path.commonprefix(dependency_nodes)
cycles = []
for cycle in root_node.FindCycles():
simplified_paths = []
for node in cycle:
assert(node.ref.startswith(common_path_prefix))
simplified_paths.append(node.ref[len(common_path_prefix):])
cycles.append('Cycle: %s' % ' -> '.join(simplified_paths))
raise DependencyGraphNode.CircularException, \
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles)
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].iterkeys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
value = the_dict[list_key]
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file):
# TODO: Check if MSVC allows this for loadable_module targets.
if target_dict.get('type', None) not in ('static_library', 'shared_library'):
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'Some build systems, e.g. MSVC08, '
'cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, aux_data,
variables, includes, depth, check,
generator_input_info)
else:
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
# TODO(thakis): Get vpx_scale/arm/scalesystemdependent.c to be renamed to
# scalesystemdependent_arm_additions.c or similar.
if 'arm' not in variables.get('target_arch', ''):
ValidateSourcesInTarget(target, target_dict, build_file)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| apache-2.0 |
daizhengy/RDS | trove/tests/unittests/guestagent/test_backups.py | 4 | 18819 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import mock
from mock import patch, ANY
import trove.guestagent.strategies.backup.base as backupBase
import trove.guestagent.strategies.restore.base as restoreBase
from trove.guestagent.strategies.restore.mysql_impl import MySQLRestoreMixin
from trove.guestagent.strategies.backup import mysql_impl
from trove.common import utils
from trove.common import exception
BACKUP_XTRA_CLS = ("trove.guestagent.strategies.backup."
"mysql_impl.InnoBackupEx")
RESTORE_XTRA_CLS = ("trove.guestagent.strategies.restore."
"mysql_impl.InnoBackupEx")
BACKUP_XTRA_INCR_CLS = ("trove.guestagent.strategies.backup."
"mysql_impl.InnoBackupExIncremental")
RESTORE_XTRA_INCR_CLS = ("trove.guestagent.strategies.restore."
"mysql_impl.InnoBackupExIncremental")
BACKUP_SQLDUMP_CLS = ("trove.guestagent.strategies.backup."
"mysql_impl.MySQLDump")
RESTORE_SQLDUMP_CLS = ("trove.guestagent.strategies.restore."
"mysql_impl.MySQLDump")
BACKUP_CBBACKUP_CLS = ("trove.guestagent.strategies.backup."
"experimental.couchbase_impl.CbBackup")
RESTORE_CBBACKUP_CLS = ("trove.guestagent.strategies.restore."
"experimental.couchbase_impl.CbBackup")
PIPE = " | "
ZIP = "gzip"
UNZIP = "gzip -d -c"
ENCRYPT = "openssl enc -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
DECRYPT = "openssl enc -d -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s"
" /var/lib/mysql 2>/tmp/innobackupex.log")
XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''}
XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'}
XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream'
' --incremental --incremental-lsn=%(lsn)s'
' %(extra_opts)s /var/lib/mysql 2>/tmp/innobackupex.log')
SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s "
"--opt --password=password -u os_admin"
" 2>/tmp/mysqldump.log")
SQLDUMP_BACKUP = SQLDUMP_BACKUP_RAW % {'extra_opts': ''}
SQLDUMP_BACKUP_EXTRA_OPTS = (SQLDUMP_BACKUP_RAW %
{'extra_opts': '--events --routines --triggers'})
XTRA_RESTORE_RAW = "sudo xbstream -x -C %(restore_location)s"
XTRA_RESTORE = XTRA_RESTORE_RAW % {'restore_location': '/var/lib/mysql'}
XTRA_INCR_PREPARE = ("sudo innobackupex --apply-log"
" --redo-only /var/lib/mysql"
" --defaults-file=/var/lib/mysql/backup-my.cnf"
" --ibbackup xtrabackup %(incr)s"
" 2>/tmp/innoprepare.log")
SQLDUMP_RESTORE = "sudo mysql"
PREPARE = ("sudo innobackupex --apply-log /var/lib/mysql "
"--defaults-file=/var/lib/mysql/backup-my.cnf "
"--ibbackup xtrabackup 2>/tmp/innoprepare.log")
CRYPTO_KEY = "default_aes_cbc_key"
CBBACKUP_CMD = "tar cpPf - /tmp/backups"
CBBACKUP_RESTORE = "sudo tar xpPf -"
class GuestAgentBackupTest(testtools.TestCase):
def setUp(self):
super(GuestAgentBackupTest, self).setUp()
self.orig = mysql_impl.get_auth_password
mysql_impl.get_auth_password = mock.Mock(
return_value='password')
self.orig_exec_with_to = utils.execute_with_timeout
def tearDown(self):
super(GuestAgentBackupTest, self).tearDown()
mysql_impl.get_auth_password = self.orig
utils.execute_with_timeout = self.orig_exec_with_to
def test_backup_decrypted_xtrabackup_command(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = False
RunnerClass = utils.import_class(BACKUP_XTRA_CLS)
bkup = RunnerClass(12345, extra_opts="")
self.assertEqual(XTRA_BACKUP + PIPE + ZIP, bkup.command)
self.assertEqual("12345.xbstream.gz", bkup.manifest)
def test_backup_decrypted_xtrabackup_with_extra_opts_command(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = False
RunnerClass = utils.import_class(BACKUP_XTRA_CLS)
bkup = RunnerClass(12345, extra_opts="--no-lock")
self.assertEqual(XTRA_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command)
self.assertEqual("12345.xbstream.gz", bkup.manifest)
def test_backup_encrypted_xtrabackup_command(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = True
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(BACKUP_XTRA_CLS)
bkup = RunnerClass(12345, extra_opts="")
self.assertEqual(XTRA_BACKUP + PIPE + ZIP + PIPE + ENCRYPT,
bkup.command)
self.assertEqual("12345.xbstream.gz.enc", bkup.manifest)
def test_backup_xtrabackup_incremental(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = False
RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS)
opts = {'lsn': '54321', 'extra_opts': ''}
expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP
bkup = RunnerClass(12345, extra_opts="", lsn="54321")
self.assertEqual(expected, bkup.command)
self.assertEqual("12345.xbstream.gz", bkup.manifest)
def test_backup_xtrabackup_incremental_with_extra_opts_command(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = False
RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS)
opts = {'lsn': '54321', 'extra_opts': '--no-lock'}
expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP
bkup = RunnerClass(12345, extra_opts="--no-lock", lsn="54321")
self.assertEqual(expected, bkup.command)
self.assertEqual("12345.xbstream.gz", bkup.manifest)
def test_backup_xtrabackup_incremental_encrypted(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = True
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS)
opts = {'lsn': '54321', 'extra_opts': ''}
expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP + PIPE + ENCRYPT
bkup = RunnerClass(12345, extra_opts="", lsn="54321")
self.assertEqual(expected, bkup.command)
self.assertEqual("12345.xbstream.gz.enc", bkup.manifest)
def test_backup_decrypted_mysqldump_command(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = False
RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS)
bkup = RunnerClass(12345, extra_opts="")
self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP, bkup.command)
self.assertEqual("12345.gz", bkup.manifest)
def test_backup_decrypted_mysqldump_with_extra_opts_command(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = False
RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS)
bkup = RunnerClass(12345, extra_opts="--events --routines --triggers")
self.assertEqual(SQLDUMP_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command)
self.assertEqual("12345.gz", bkup.manifest)
def test_backup_encrypted_mysqldump_command(self):
backupBase.BackupRunner.is_zipped = True
backupBase.BackupRunner.is_encrypted = True
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS)
bkup = RunnerClass(12345, user="user",
password="password", extra_opts="")
self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP + PIPE + ENCRYPT,
bkup.command)
self.assertEqual("12345.gz.enc", bkup.manifest)
def test_restore_decrypted_xtrabackup_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = False
RunnerClass = utils.import_class(RESTORE_XTRA_CLS)
restr = RunnerClass(None, restore_location="/var/lib/mysql",
location="filename", checksum="md5")
self.assertEqual(UNZIP + PIPE + XTRA_RESTORE, restr.restore_cmd)
self.assertEqual(PREPARE, restr.prepare_cmd)
def test_restore_encrypted_xtrabackup_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = True
restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(RESTORE_XTRA_CLS)
restr = RunnerClass(None, restore_location="/var/lib/mysql",
location="filename", checksum="md5")
self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE,
restr.restore_cmd)
self.assertEqual(PREPARE, restr.prepare_cmd)
def test_restore_xtrabackup_incremental_prepare_command(self):
RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS)
restr = RunnerClass(None, restore_location="/var/lib/mysql",
location="filename", checksum="m5d")
# Final prepare command (same as normal xtrabackup)
self.assertEqual(PREPARE, restr.prepare_cmd)
# Incremental backup prepare command
expected = XTRA_INCR_PREPARE % {'incr': '--incremental-dir=/foo/bar/'}
observed = restr._incremental_prepare_cmd('/foo/bar/')
self.assertEqual(expected, observed)
# Full backup prepare command
expected = XTRA_INCR_PREPARE % {'incr': ''}
observed = restr._incremental_prepare_cmd(None)
self.assertEqual(expected, observed)
def test_restore_decrypted_xtrabackup_incremental_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = False
RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS)
restr = RunnerClass(None, restore_location="/var/lib/mysql",
location="filename", checksum="m5d")
# Full restore command
expected = UNZIP + PIPE + XTRA_RESTORE
self.assertEqual(expected, restr.restore_cmd)
# Incremental backup restore command
opts = {'restore_location': '/foo/bar/'}
expected = UNZIP + PIPE + (XTRA_RESTORE_RAW % opts)
observed = restr._incremental_restore_cmd('/foo/bar/')
self.assertEqual(expected, observed)
def test_restore_encrypted_xtrabackup_incremental_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = True
restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS)
restr = RunnerClass(None, restore_location="/var/lib/mysql",
location="filename", checksum="md5")
# Full restore command
expected = DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE
self.assertEqual(expected, restr.restore_cmd)
# Incremental backup restore command
opts = {'restore_location': '/foo/bar/'}
expected = DECRYPT + PIPE + UNZIP + PIPE + (XTRA_RESTORE_RAW % opts)
observed = restr._incremental_restore_cmd('/foo/bar/')
self.assertEqual(expected, observed)
def test_restore_decrypted_mysqldump_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = False
RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS)
restr = RunnerClass(None, restore_location="/var/lib/mysql",
location="filename", checksum="md5")
self.assertEqual(UNZIP + PIPE + SQLDUMP_RESTORE, restr.restore_cmd)
def test_restore_encrypted_mysqldump_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = True
restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS)
restr = RunnerClass(None, restore_location="/var/lib/mysql",
location="filename", checksum="md5")
self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + SQLDUMP_RESTORE,
restr.restore_cmd)
def test_backup_encrypted_cbbackup_command(self):
backupBase.BackupRunner.is_encrypted = True
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS)
utils.execute_with_timeout = mock.Mock(return_value=None)
bkp = RunnerClass(12345)
self.assertIsNotNone(bkp)
self.assertEqual(
CBBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command)
self.assertIn("gz.enc", bkp.manifest)
def test_backup_not_encrypted_cbbackup_command(self):
backupBase.BackupRunner.is_encrypted = False
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS)
utils.execute_with_timeout = mock.Mock(return_value=None)
bkp = RunnerClass(12345)
self.assertIsNotNone(bkp)
self.assertEqual(CBBACKUP_CMD + PIPE + ZIP, bkp.command)
self.assertIn("gz", bkp.manifest)
def test_restore_decrypted_cbbackup_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = False
RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS)
restr = RunnerClass(None, restore_location="/tmp",
location="filename", checksum="md5")
self.assertEqual(UNZIP + PIPE + CBBACKUP_RESTORE, restr.restore_cmd)
def test_restore_encrypted_cbbackup_command(self):
restoreBase.RestoreRunner.is_zipped = True
restoreBase.RestoreRunner.is_encrypted = True
restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY
RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS)
restr = RunnerClass(None, restore_location="/tmp",
location="filename", checksum="md5")
self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + CBBACKUP_RESTORE,
restr.restore_cmd)
def test_reset_root_password_on_mysql_restore(self):
with patch.object(utils, 'execute_with_timeout',
return_value=True) as exec_call:
with patch.object(MySQLRestoreMixin,
'_start_mysqld_safe_with_init_file',
return_value=True):
inst = MySQLRestoreMixin()
inst.reset_root_password()
self.assertEqual(2, exec_call.call_count,
"'execute_with_timeout' "
"called an unexpected number of times")
exec_call.assert_any_call("sudo", "chmod", "a+r",
ANY)
# Make sure the temporary error log got deleted as root
# (see bug/1423759).
exec_call.assert_any_call("rm", "-f", ANY, run_as_root=True,
root_helper="sudo")
class CouchbaseBackupTests(testtools.TestCase):
def setUp(self):
super(CouchbaseBackupTests, self).setUp()
self.backup_runner = utils.import_class(
BACKUP_CBBACKUP_CLS)
def tearDown(self):
super(CouchbaseBackupTests, self).tearDown()
def test_backup_success(self):
self.backup_runner.__exit__ = mock.Mock()
self.backup_runner.run = mock.Mock()
self.backup_runner._run_pre_backup = mock.Mock()
self.backup_runner._run_post_backup = mock.Mock()
utils.execute_with_timeout = mock.Mock(return_value=None)
with self.backup_runner(12345):
pass
self.assertTrue(self.backup_runner.run)
self.assertTrue(self.backup_runner._run_pre_backup)
self.assertTrue(self.backup_runner._run_post_backup)
def test_backup_failed_due_to_run_backup(self):
self.backup_runner.run = mock.Mock(
side_effect=exception.ProcessExecutionError('test'))
self.backup_runner._run_pre_backup = mock.Mock()
self.backup_runner._run_post_backup = mock.Mock()
utils.execute_with_timeout = mock.Mock(return_value=None)
self.assertRaises(exception.ProcessExecutionError,
self.backup_runner(12345).__enter__)
class CouchbaseRestoreTests(testtools.TestCase):
def setUp(self):
super(CouchbaseRestoreTests, self).setUp()
self.restore_runner = utils.import_class(
RESTORE_CBBACKUP_CLS)(
'swift', location='http://some.where',
checksum='True_checksum',
restore_location='/tmp/somewhere')
def tearDown(self):
super(CouchbaseRestoreTests, self).tearDown()
def test_restore_success(self):
expected_content_length = 123
self.restore_runner._run_restore = mock.Mock(
return_value=expected_content_length)
self.restore_runner.pre_restore = mock.Mock()
self.restore_runner.post_restore = mock.Mock()
actual_content_length = self.restore_runner.restore()
self.assertEqual(
expected_content_length, actual_content_length)
def test_restore_failed_due_to_pre_restore(self):
self.restore_runner.post_restore = mock.Mock()
self.restore_runner.pre_restore = mock.Mock(
side_effect=exception.ProcessExecutionError('Error'))
self.restore_runner._run_restore = mock.Mock()
self.assertRaises(exception.ProcessExecutionError,
self.restore_runner.restore)
def test_restore_failed_due_to_run_restore(self):
self.restore_runner.pre_restore = mock.Mock()
self.restore_runner._run_restore = mock.Mock(
side_effect=exception.ProcessExecutionError('Error'))
self.restore_runner.post_restore = mock.Mock()
self.assertRaises(exception.ProcessExecutionError,
self.restore_runner.restore)
| apache-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/numpy/compat/_inspect.py | 114 | 7553 | """Subset of inspect module from upstream python
We use this instead of upstream because upstream inspect is slow to import, and
significanly contributes to numpy import times. Importing this copy has almost
no overhead.
"""
from __future__ import division, absolute_import, print_function
import types
__all__ = ['getargspec', 'formatargspec']
# ----------------------------------------------------------- type-checking
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None
"""
return isinstance(object, types.MethodType)
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)
"""
return isinstance(object, types.FunctionType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables
"""
return isinstance(object, types.CodeType)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None.
"""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
# The following acrobatics are for anonymous (tuple) arguments.
# Which we do not need to support, so remove to avoid importing
# the dis module.
for i in range(nargs):
if args[i][:1] in ['', '.']:
raise TypeError("tuple function arguments are not supported")
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.__code__)
return args, varargs, varkw, func.__defaults__
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame.
"""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + ', '.join(seq) + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element.
"""
if type(object) in [list, tuple]:
return join([strseq(_o, convert, join) for _o in object])
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + ', '.join(specs) + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
| gpl-2.0 |
erh3cq/hyperspy | hyperspy/_signals/signal1d.py | 2 | 61717 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
import scipy.interpolate
import scipy as sp
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval, SpikesRemovalInteractive
from hyperspy.models.model1d import Model1D
from hyperspy.misc.lowess_smooth import lowess
from hyperspy.defaults_parser import preferences
from hyperspy.signal_tools import (
Signal1DCalibration,
SmoothingSavitzkyGolay,
SmoothingLowess,
SmoothingTV,
ButterworthFilter)
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea, _get_background_estimator
from hyperspy._signals.lazy import LazySignal
from hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC, SPIKES_REMOVAL_TOOL_DOCSTRING
from hyperspy.docstrings.signal import (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
from hyperspy.docstrings.plot import (
BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
medfilt_radius=5, maxpeakn=30000, peakgroup=10,
subchannel=True,):
"""Find peaks along a 1D line.
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the first
derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each peak.
Sorted by position.
'slope_thresh' and 'amp_thresh', control sensitivity: higher values
will neglect wider peaks (slope) and smaller features (amp),
respectively.
Parameters
----------
y : array
1D input array, e.g. a spectrum
x : array (optional)
1D array describing the calibration of y (must have same shape as y)
slope_thresh : float (optional)
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float (optional)
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10% of max(y).
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int (optional)
number of points around the "top part" of the peak that
are taken to estimate the peak height; for spikes or
very narrow peaks, keep PeakGroup=1 or 2; for broad or
noisy peaks, make PeakGroup larger to reduce the effect
of noise;
default is set to 10.
maxpeakn : int (optional)
number of maximum detectable peaks;
default is set to 30000.
subchannel : bool (optional)
default is set to True.
Returns
-------
P : structured array of shape (npeaks)
contains fields: 'position', 'width', and 'height' for each peak.
Examples
--------
>>> x = np.arange(0,50,0.01)
>>> y = np.cos(x)
>>> peaks = find_peaks_ohaver(y, x, 0, 0)
Notes
-----
Original code from T. C. O'Haver, 1995.
Version 2 Last revised Oct 27, 2006 Converted to Python by
Michael Sarahan, Feb 2011.
Revised to handle edges better. MCS, Mar 2011
"""
if x is None:
x = np.arange(len(y), dtype=np.int64)
if not amp_thresh:
amp_thresh = 0.1 * y.max()
peakgroup = np.round(peakgroup)
if medfilt_radius:
d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))
else:
d = np.gradient(y)
n = np.round(peakgroup / 2 + 1)
peak_dt = np.dtype([('position', np.float),
('height', np.float),
('width', np.float)])
P = np.array([], dtype=peak_dt)
peak = 0
for j in range(len(y) - 4):
if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing
if np.sign(d[j + 1]) == 0:
continue
# if slope of derivative is larger than slope_thresh
if d[j] - d[j + 1] > slope_thresh:
# if height of peak is larger than amp_thresh
if y[j] > amp_thresh:
# the next section is very slow, and actually messes
# things up for images (discrete pixels),
# so by default, don't do subchannel precision in the
# 1D peakfind step.
if subchannel:
xx = np.zeros(peakgroup)
yy = np.zeros(peakgroup)
s = 0
for k in range(peakgroup):
groupindex = int(j + k - n + 1)
if groupindex < 1:
xx = xx[1:]
yy = yy[1:]
s += 1
continue
elif groupindex > y.shape[0] - 1:
xx = xx[:groupindex - 1]
yy = yy[:groupindex - 1]
break
xx[k - s] = x[groupindex]
yy[k - s] = y[groupindex]
avg = np.average(xx)
stdev = np.std(xx)
xxf = (xx - avg) / stdev
# Fit parabola to log10 of sub-group with
# centering and scaling
yynz = yy != 0
coef = np.polyfit(
xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
c1 = coef[2]
c2 = coef[1]
c3 = coef[0]
with np.errstate(invalid='ignore'):
width = np.linalg.norm(stdev * 2.35703 /
(np.sqrt(2) * np.sqrt(-1 *
c3)))
# if the peak is too narrow for least-squares
# technique to work well, just use the max value
# of y in the sub-group of points near peak.
if peakgroup < 7:
height = np.max(yy)
position = xx[np.argmin(np.abs(yy - height))]
else:
position = - ((stdev * c2 / (2 * c3)) - avg)
height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
# Fill results array P. One row for each peak
# detected, containing the
# peak position (x-value) and peak height (y-value).
else:
position = x[j]
height = y[j]
# no way to know peak width without
# the above measurements.
width = 0
if (not np.isnan(position) and 0 < position < x[-1]):
P = np.hstack((P,
np.array([(position, height, width)],
dtype=peak_dt)))
peak += 1
# return only the part of the array that contains peaks
# (not the whole maxpeakn x 3 array)
if len(P) > maxpeakn:
minh = np.sort(P['height'])[-maxpeakn]
P = P[P['height'] >= minh]
# Sorts the values as a function of position
P.sort(0)
return P
def interpolate1D(number_of_interpolation_points, data):
ip = number_of_interpolation_points
ch = len(data)
old_ax = np.linspace(0, 100, ch)
new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
interpolator = scipy.interpolate.interp1d(old_ax, data)
return interpolator(new_ax)
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get('mask', None)
ref = kwargs.get('ref', None)
interpolate = kwargs.get('interpolate', True)
ip = kwargs.get('ip', 5)
data_slice = kwargs.get('data_slice', slice(None))
if bool(mask):
# asarray is required for consistensy as argmax
# returns a numpy scalar array
return np.asarray(np.nan)
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1
def _shift1D(data, **kwargs):
shift = kwargs.get('shift', 0.)
original_axis = kwargs.get('original_axis', None)
fill_value = kwargs.get('fill_value', np.nan)
kind = kwargs.get('kind', 'linear')
offset = kwargs.get('offset', 0.)
scale = kwargs.get('scale', 1.)
size = kwargs.get('size', 2)
if np.isnan(shift) or shift == 0:
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(original_axis,
data,
bounds_error=False,
fill_value=fill_value,
kind=kind)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
class Signal1D(BaseSignal, CommonSignal1D):
"""
"""
_signal_dimension = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 1:
self.axes_manager.set_signal_dimension(1)
def _get_spikes_diagnosis_histogram_data(self, signal_mask=None,
navigation_mask=None,
**kwargs):
self._check_signal_dimension_equals_one()
dc = self.data
if signal_mask is not None:
dc = dc[..., ~signal_mask]
if navigation_mask is not None:
dc = dc[~navigation_mask, :]
der = np.abs(np.diff(dc, 1, -1))
n = ((~navigation_mask).sum() if navigation_mask else
self.axes_manager.navigation_size)
# arbitrary cutoff for number of spectra necessary before histogram
# data is compressed by finding maxima of each spectrum
tmp = BaseSignal(der) if n < 2000 else BaseSignal(
np.ravel(der.max(-1)))
# get histogram signal using smart binning and plot
return tmp.get_histogram(**kwargs)
def spikes_diagnosis(self, signal_mask=None,
navigation_mask=None,
**kwargs):
"""Plots a histogram to help in choosing the threshold for
spikes removal.
Parameters
----------
%s
%s
**kwargs : dict
Keyword arguments pass to
:py:meth:`~hyperspy.signal.signal.BaseSignal.get_histogram`
See also
--------
spikes_removal_tool
"""
tmph = self._get_spikes_diagnosis_histogram_data(signal_mask,
navigation_mask,
**kwargs)
tmph.plot()
# Customize plot appearance
plt.gca().set_title('')
plt.gca().fill_between(tmph.axes_manager[0].axis,
tmph.data,
facecolor='#fddbc7',
interpolate=True,
color='none')
ax = tmph._plot.signal_plot.ax
axl = tmph._plot.signal_plot.ax_lines[0]
axl.set_line_properties(color='#b2182b')
plt.xlabel('Derivative magnitude')
plt.ylabel('Log(Counts)')
ax.set_yscale('log')
ax.set_ylim(10 ** -1, plt.ylim()[1])
ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
plt.draw()
spikes_diagnosis.__doc__ %= (SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
def spikes_removal_tool(self, signal_mask=None, navigation_mask=None,
threshold='auto', interactive=True,
display=True, toolkit=None):
self._check_signal_dimension_equals_one()
if interactive:
sr = SpikesRemovalInteractive(self,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
threshold=threshold)
return sr.gui(display=display, toolkit=toolkit)
else:
SpikesRemoval(self,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
threshold=threshold)
spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (
SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG, "", DISPLAY_DT, TOOLKIT_DT)
def create_model(self, dictionary=None):
"""Create a model for the current data.
Returns
-------
model : `Model1D` instance.
"""
model = Model1D(self, dictionary=dictionary)
return model
def shift1D(
self,
shift_array,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Shift the data in place over the signal axis by the amount specified
by an array.
Parameters
----------
shift_array : numpy array
An array containing the shifting amount. It must have
`axes_manager._navigation_shape_in_array` shape.
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.any(shift_array):
# Nothing to do, the shift array if filled with zeros
return
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
# Figure out min/max shifts, and translate to shifts in index as well
minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
if minimum < 0:
ihigh = 1 + axis.value2index(
axis.high_value + minimum,
rounding=math.floor)
else:
ihigh = axis.high_index + 1
if maximum > 0:
ilow = axis.value2index(axis.offset + maximum,
rounding=math.ceil)
else:
ilow = axis.low_index
if expand:
if self._lazy:
ind = axis.index_in_array
pre_shape = list(self.data.shape)
post_shape = list(self.data.shape)
pre_chunks = list(self.data.chunks)
post_chunks = list(self.data.chunks)
pre_shape[ind] = axis.high_index - ihigh + 1
post_shape[ind] = ilow - axis.low_index
for chunks, shape in zip((pre_chunks, post_chunks),
(pre_shape, post_shape)):
maxsize = min(np.max(chunks[ind]), shape[ind])
num = np.ceil(shape[ind] / maxsize)
chunks[ind] = tuple(len(ar) for ar in
np.array_split(np.arange(shape[ind]),
num))
pre_array = da.full(tuple(pre_shape),
fill_value,
chunks=tuple(pre_chunks))
post_array = da.full(tuple(post_shape),
fill_value,
chunks=tuple(post_chunks))
self.data = da.concatenate((pre_array, self.data, post_array),
axis=ind)
else:
padding = []
for i in range(self.data.ndim):
if i == axis.index_in_array:
padding.append((axis.high_index - ihigh + 1,
ilow - axis.low_index))
else:
padding.append((0, 0))
self.data = np.pad(self.data, padding, mode='constant',
constant_values=(fill_value,))
axis.offset += minimum
axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),
original_axis=axis.axis,
fill_value=fill_value,
kind=interpolation_method,
offset=axis.offset,
scale=axis.scale,
size=axis.size,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=False)
if crop and not expand:
_logger.debug("Cropping %s from index %i to %i"
% (self, ilow, ihigh))
self.crop(axis.index_in_axes_manager,
ilow,
ihigh)
self.events.data_changed.trigger(obj=self)
shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def interpolate_in_between(
self,
start,
end,
delta=3,
show_progressbar=None,
parallel=None,
max_workers=None,
**kwargs,
):
"""Replace the data in a given range by interpolation.
The operation is performed in place.
Parameters
----------
start, end : int or float
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
delta : int or float
The windows around the (start, end) to use for interpolation
%s
%s
%s
**kwargs :
All extra keyword arguments are passed to
:py:func:`scipy.interpolate.interp1d`. See the function documentation
for details.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
i1 = axis._get_index(start)
i2 = axis._get_index(end)
if isinstance(delta, float):
delta = int(delta / axis.scale)
i0 = int(np.clip(i1 - delta, 0, np.inf))
i3 = int(np.clip(i2 + delta, 0, axis.size))
def interpolating_function(dat):
dat_int = sp.interpolate.interp1d(
list(range(i0, i1)) + list(range(i2, i3)),
dat[i0:i1].tolist() + dat[i2:i3].tolist(),
**kwargs)
dat[i1:i2] = dat_int(list(range(i1, i2)))
return dat
self._map_iterate(interpolating_function,
ragged=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=max_workers)
self.events.data_changed.trigger(obj=self)
interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def estimate_shift1D(
self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
show_progressbar=None,
parallel=None,
max_workers=None,
):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : `BaseSignal` of bool.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
%s
%s
Returns
-------
An array with the result of the estimation in the axis units.
Although the computation is performed in batches if the signal is
lazy, the result is computed in memory because it depends on the
current state of the axes that could change later on in the workflow.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (('mask', mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,
max_workers=max_workers,
)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
if self._lazy:
# We must compute right now because otherwise any changes to the
# axes_manager of the signal later in the workflow may result in
# a wrong shift_array
shift_array = shift_array.compute()
return shift_array
estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def align1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
also_align=None,
mask=None,
show_progressbar=None):
"""Estimate the shifts in the signal axis using
cross-correlation and use the estimation to align the data in place.
This method can only estimate the shift by comparing
unidimensional
features that should not change the position.
To decrease memory usage, time of computation and improve
accuracy it is convenient to select the feature of interest
setting the `start` and `end` keywords. By default interpolation is
used to obtain subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
also_align : list of signals, None
A list of BaseSignal instances that has exactly the same
dimensions as this one and that will be aligned using the shift map
estimated using the this signal.
mask : `BaseSignal` or bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
Returns
-------
An array with the result of the estimation.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
See also
--------
estimate_shift1D
"""
if also_align is None:
also_align = []
self._check_signal_dimension_equals_one()
if self._lazy:
_logger.warning('In order to properly expand, the lazy '
'reference signal will be read twice (once to '
'estimate shifts, and second time to shift '
'appropriatelly), which might take a long time. '
'Use expand=False to only pass through the data '
'once.')
shift_array = self.estimate_shift1D(
start=start,
end=end,
reference_indices=reference_indices,
max_shift=max_shift,
interpolate=interpolate,
number_of_interpolation_points=number_of_interpolation_points,
mask=mask,
show_progressbar=show_progressbar)
signals_to_shift = [self] + also_align
for signal in signals_to_shift:
signal.shift1D(shift_array=shift_array,
interpolation_method=interpolation_method,
crop=crop,
fill_value=fill_value,
expand=expand,
show_progressbar=show_progressbar)
align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)
def integrate_in_range(self, signal_range='interactive',
display=True, toolkit=None):
"""Sums the spectrum over an energy range, giving the integrated
area.
The energy range can either be selected through a GUI or the command
line.
Parameters
----------
signal_range : a tuple of this form (l, r) or "interactive"
l and r are the left and right limits of the range. They can be
numbers or None, where None indicates the extremes of the interval.
If l and r are floats the `signal_range` will be in axis units (for
example eV). If l and r are integers the `signal_range` will be in
index units. When `signal_range` is "interactive" (default) the
range is selected using a GUI. Note that ROIs can be used
in place of a tuple.
Returns
--------
integrated_spectrum : `BaseSignal` subclass
See Also
--------
integrate_simpson
Examples
--------
Using the GUI
>>> s = hs.signals.Signal1D(range(1000))
>>> s.integrate_in_range() #doctest: +SKIP
Using the CLI
>>> s_int = s.integrate_in_range(signal_range=(560,None))
Selecting a range in the axis units, by specifying the
signal range with floats.
>>> s_int = s.integrate_in_range(signal_range=(560.,590.))
Selecting a range using the index, by specifying the
signal range with integers.
>>> s_int = s.integrate_in_range(signal_range=(100,120))
"""
from hyperspy.misc.utils import deprecation_warning
msg = (
"The `Signal1D.integrate_in_range` method is deprecated and will "
"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
"instead.")
deprecation_warning(msg)
if signal_range == 'interactive':
self_copy = self.deepcopy()
ia = IntegrateArea(self_copy, signal_range)
ia.gui(display=display, toolkit=toolkit)
integrated_signal1D = self_copy
else:
integrated_signal1D = self._integrate_in_range_commandline(
signal_range)
return integrated_signal1D
def _integrate_in_range_commandline(self, signal_range):
e1 = signal_range[0]
e2 = signal_range[1]
integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
return integrated_signal1D
def calibrate(self, display=True, toolkit=None):
"""
Calibrate the spectral dimension using a gui.
It displays a window where the new calibration can be set by:
* setting the values of offset, units and scale directly
* or selecting a range by dragging the mouse on the spectrum figure
and setting the new values for the given range limits
Parameters
----------
%s
%s
Notes
-----
For this method to work the output_dimension must be 1.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
calibration = Signal1DCalibration(self)
return calibration.gui(display=display, toolkit=toolkit)
calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def smooth_savitzky_golay(
self,
polynomial_order=None,
window_length=None,
differential_order=0,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Apply a Savitzky-Golay filter to the data in place.
If `polynomial_order` or `window_length` or `differential_order` are
None the method is run in interactive mode.
Parameters
----------
polynomial_order : int, optional
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
window_length : int, optional
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer.
differential_order: int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
%s
%s
%s
%s
Notes
-----
More information about the filter in `scipy.signal.savgol_filter`.
"""
self._check_signal_dimension_equals_one()
if (polynomial_order is not None and
window_length is not None):
axis = self.axes_manager.signal_axes[0]
self.map(savgol_filter, window_length=window_length,
polyorder=polynomial_order, deriv=differential_order,
delta=axis.scale, ragged=False, parallel=parallel, max_workers=max_workers)
else:
# Interactive mode
smoother = SmoothingSavitzkyGolay(self)
smoother.differential_order = differential_order
if polynomial_order is not None:
smoother.polynomial_order = polynomial_order
if window_length is not None:
smoother.window_length = window_length
return smoother.gui(display=display, toolkit=toolkit)
smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_lowess(
self,
smoothing_parameter=None,
number_of_iterations=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Lowess data smoothing in place.
If `smoothing_parameter` or `number_of_iterations` are None the method
is run in interactive mode.
Parameters
----------
smoothing_parameter: float or None
Between 0 and 1. The fraction of the data used
when estimating each y-value.
number_of_iterations: int or None
The number of residual-based reweightings
to perform.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None or number_of_iterations is None:
smoother = SmoothingLowess(self)
if smoothing_parameter is not None:
smoother.smoothing_parameter = smoothing_parameter
if number_of_iterations is not None:
smoother.number_of_iterations = number_of_iterations
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(lowess,
x=self.axes_manager[-1].axis,
f=smoothing_parameter,
n_iter=number_of_iterations,
show_progressbar=show_progressbar,
ragged=False,
parallel=parallel,
max_workers=max_workers)
smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_tv(
self,
smoothing_parameter=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Total variation data smoothing in place.
Parameters
----------
smoothing_parameter: float or None
Denoising weight relative to L2 minimization. If None the method
is run in interactive mode.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None:
smoother = SmoothingTV(self)
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(_tv_denoise_1d, weight=smoothing_parameter,
ragged=False,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers)
smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def filter_butterworth(self,
cutoff_frequency_ratio=None,
type='low',
order=2, display=True, toolkit=None):
"""
Butterworth filter in place.
Parameters
----------
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
smoother = ButterworthFilter(self)
if cutoff_frequency_ratio is not None:
smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
smoother.type = type
smoother.order = order
smoother.apply()
else:
return smoother.gui(display=display, toolkit=toolkit)
filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def _remove_background_cli(
self, signal_range, background_estimator, fast=True,
zero_fill=False, show_progressbar=None, model=None,
return_model=False):
""" See :py:meth:`~hyperspy._signal1d.signal1D.remove_background`. """
if model is None:
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if background_estimator not in model:
model.append(background_estimator)
background_estimator.estimate_parameters(
self,
signal_range[0],
signal_range[1],
only_current=False)
if not fast:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar,
iterpath='serpentine')
model.reset_signal_range()
if self._lazy:
result = self - model.as_signal(show_progressbar=show_progressbar)
else:
try:
axis = self.axes_manager.signal_axes[0]
scale_factor = axis.scale if self.metadata.Signal.binned else 1
bkg = background_estimator.function_nd(axis.axis) * scale_factor
result = self - bkg
except MemoryError:
result = self - model.as_signal(
show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[:signal_range[0]] = 0
if return_model:
if fast:
# Calculate the variance for each navigation position only when
# using fast, otherwise the chisq is already calculated when
# doing the multifit
d = result.data[..., np.where(model.channel_switches)[0]]
variance = model._get_variance(only_current=False)
d *= d / (1. * variance) # d = difference^2 / variance.
model.chisq.data = d.sum(-1)
result = (result, model)
return result
def remove_background(
self,
signal_range='interactive',
background_type='Power law',
polynomial_order=2,
fast=True,
zero_fill=False,
plot_remainder=True,
show_progressbar=None,
return_model=False,
display=True,
toolkit=None):
"""
Remove the background, either in place using a GUI or returned as a new
spectrum using the command line. The fast option is not accurate for
most background types - except Gaussian, Offset and
Power law - but it is useful to estimate the initial fitting parameters
before performing a full fit.
Parameters
----------
signal_range : "interactive", tuple of ints or floats, optional
If this argument is not specified, the signal range has to be
selected using a GUI. And the original spectrum will be replaced.
If tuple is given, the a spectrum will be returned.
background_type : str
The type of component which should be used to fit the background.
Possible components: Doniach, Gaussian, Lorentzian, Offset,
Polynomial, PowerLaw, Exponential, SkewNormal, SplitVoigt, Voigt.
If Polynomial is used, the polynomial order can be specified
polynomial_order : int, default 2
Specify the polynomial order if a Polynomial background is used.
fast : bool
If True, perform an approximative estimation of the parameters.
If False, the signal is fitted using non-linear least squares
afterwards. This is slower compared to the estimation but
often more accurate.
zero_fill : bool
If True, all spectral channels lower than the lower bound of the
fitting range will be set to zero (this is the default behavior
of Gatan's DigitalMicrograph). Setting this value to False
allows for inspection of the quality of background fit throughout
the pre-fitting region.
plot_remainder : bool
If True, add a (green) line previewing the remainder signal after
background removal. This preview is obtained from a Fast calculation
so the result may be different if a NLLS calculation is finally
performed.
return_model : bool
If True, the background model is returned. The chi² can be obtained
from this model using
:py:meth:`~hyperspy.models.model1d.Model1D.chisqd`.
%s
%s
%s
Returns
-------
{None, signal, background_model or (signal, background_model)}
If signal_range is not 'interactive', the signal with background
substracted is returned. If return_model is True, returns the
background model, otherwise, the GUI widget dictionary is returned
if `display=False` - see the display parameter documentation.
Examples
--------
Using GUI, replaces spectrum s
>>> s = hs.signals.Signal1D(range(1000))
>>> s.remove_background() #doctest: +SKIP
Using command line, returns a Signal1D:
>>> s.remove_background(signal_range=(400,450),
background_type='PowerLaw')
<Signal1D, title: , dimensions: (|1000)>
Using a full model to fit the background:
>>> s.remove_background(signal_range=(400,450), fast=False)
<Signal1D, title: , dimensions: (|1000)>
Returns background substracted and the model:
>>> s.remove_background(signal_range=(400,450),
fast=False,
return_model=True)
(<Signal1D, title: , dimensions: (|1000)>, <Model1D>)
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
# Create model here, so that we can return it
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if signal_range == 'interactive':
br = BackgroundRemoval(self, background_type=background_type,
polynomial_order=polynomial_order,
fast=fast,
plot_remainder=plot_remainder,
show_progressbar=show_progressbar,
zero_fill=zero_fill,
model=model)
gui_dict = br.gui(display=display, toolkit=toolkit)
if return_model:
return model
else:
# for testing purposes
return gui_dict
else:
background_estimator = _get_background_estimator(
background_type, polynomial_order)[0]
result = self._remove_background_cli(
signal_range=signal_range,
background_estimator=background_estimator,
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar,
model=model,
return_model=return_model)
return result
remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)
@interactive_range_selector
def crop_signal1D(self, left_value=None, right_value=None,):
"""Crop in place the spectral dimension.
Parameters
----------
left_value, righ_value : int, float or None
If int the values are taken as indices. If float they are
converted to indices using the spectral axis calibration.
If left_value is None crops from the beginning of the axis.
If right_value is None crops up to the end of the axis. If
both are
None the interactive cropping interface is activated
enabling
cropping the spectrum using a span selector in the signal
plot.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
try:
left_value, right_value = left_value
except TypeError:
# It was not a ROI, we carry on
pass
self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
start=left_value, end=right_value)
def gaussian_filter(self, FWHM):
"""Applies a Gaussian filter in the spectral dimension in place.
Parameters
----------
FWHM : float
The Full Width at Half Maximum of the gaussian in the
spectral axis units
Raises
------
ValueError
If FWHM is equal or less than zero.
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if FWHM <= 0:
raise ValueError(
"FWHM must be greater than zero")
axis = self.axes_manager.signal_axes[0]
FWHM *= 1 / axis.scale
self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
def hanning_taper(self, side='both', channels=None, offset=0):
"""Apply a hanning taper to the data in place.
Parameters
----------
side : 'left', 'right' or 'both'
Specify which side to use.
channels : None or int
The number of channels to taper. If None 5% of the total
number of channels are tapered.
offset : int
Returns
-------
channels
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.issubdtype(self.data.dtype, np.floating):
raise TypeError("The data dtype should be `float`. It can be "
"changed by using the `change_dtype('float')` "
"method of the signal.")
# TODO: generalize it
self._check_signal_dimension_equals_one()
if channels is None:
channels = int(round(len(self()) * 0.02))
if channels < 20:
channels = 20
dc = self._data_aligned_with_axes
if self._lazy and offset != 0:
shp = dc.shape
if len(shp) == 1:
nav_shape = ()
nav_chunks = ()
else:
nav_shape = shp[:-1]
nav_chunks = dc.chunks[:-1]
zeros = da.zeros(nav_shape + (offset,),
chunks=nav_chunks + ((offset,),))
if side == 'left' or side == 'both':
if self._lazy:
tapered = dc[..., offset:channels + offset]
tapered *= np.hanning(2 * channels)[:channels]
therest = dc[..., channels + offset:]
thelist = [] if offset == 0 else [zeros]
thelist.extend([tapered, therest])
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., offset:channels + offset] *= (
np.hanning(2 * channels)[:channels])
dc[..., :offset] *= 0.
if side == 'right' or side == 'both':
rl = None if offset == 0 else -offset
if self._lazy:
therest = dc[..., :-channels - offset]
tapered = dc[..., -channels - offset:rl]
tapered *= np.hanning(2 * channels)[-channels:]
thelist = [therest, tapered]
if offset != 0:
thelist.append(zeros)
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., -channels - offset:rl] *= (
np.hanning(2 * channels)[-channels:])
if offset != 0:
dc[..., -offset:] *= 0.
if self._lazy:
self.data = dc
self.events.data_changed.trigger(obj=self)
return channels
def find_peaks1D_ohaver(self, xdim=None,
slope_thresh=0,
amp_thresh=None,
subchannel=True,
medfilt_radius=5,
maxpeakn=30000,
peakgroup=10,
parallel=None,
max_workers=None):
"""Find positive peaks along a 1D Signal. It detects peaks by looking
for downward zero-crossings in the first derivative that exceed
'slope_thresh'.
'slope_thresh' and 'amp_thresh', control sensitivity: higher
values will neglect broad peaks (slope) and smaller features (amp),
respectively.
`peakgroup` is the number of points around the top of the peak
that are taken to estimate the peak height. For spikes or very
narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,
make `peakgroup` larger to reduce the effect of noise.
Parameters
----------
slope_thresh : float, optional
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float, optional
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10%% of max(y).
medfilt_radius : int, optional
median filter window to apply to smooth the data
(see :py:func:`scipy.signal.medfilt`);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int, optional
number of points around the "top part" of the peak
that are taken to estimate the peak height;
default is set to 10
maxpeakn : int, optional
number of maximum detectable peaks;
default is set to 5000.
subchannel : bool, default True
default is set to True.
%s
%s
Returns
-------
structured array of shape (npeaks) containing fields: 'position',
'width', and 'height' for each peak.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
# TODO: add scipy.signal.find_peaks_cwt
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0].axis
peaks = self.map(find_peaks_ohaver,
x=axis,
slope_thresh=slope_thresh,
amp_thresh=amp_thresh,
medfilt_radius=medfilt_radius,
maxpeakn=maxpeakn,
peakgroup=peakgroup,
subchannel=subchannel,
ragged=True,
parallel=parallel,
max_workers=max_workers,
inplace=False)
return peaks.data
find_peaks1D_ohaver.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG)
def estimate_peak_width(
self,
factor=0.5,
window=None,
return_interval=False,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Estimate the width of the highest intensity of peak
of the spectra at a given fraction of its maximum.
It can be used with asymmetric peaks. For accurate results any
background must be previously substracted.
The estimation is performed by interpolation using cubic splines.
Parameters
----------
factor : 0 < float < 1
The default, 0.5, estimates the FWHM.
window : None or float
The size of the window centred at the peak maximum
used to perform the estimation.
The window size must be chosen with care: if it is narrower
than the width of the peak at some positions or if it is
so wide that it includes other more intense peaks this
method cannot compute the width and a NaN is stored instead.
return_interval: bool
If True, returns 2 extra signals with the positions of the
desired height fraction at the left and right of the
peak.
%s
%s
%s
Returns
-------
width or [width, left, right], depending on the value of
`return_interval`.
Notes
-----
Parallel operation of this function is not supported
on Windows platforms.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
if not 0 < factor < 1:
raise ValueError("factor must be between 0 and 1.")
if parallel != False and os.name in ["nt", "dos"]: # pragma: no cover
# Due to a scipy bug where scipy.interpolate.UnivariateSpline
# appears to not be thread-safe on Windows, we raise a warning
# here. See https://github.com/hyperspy/hyperspy/issues/2320
# Until/if the scipy bug is fixed, we should do this.
_logger.warning(
"Parallel operation is not supported on Windows. "
"Setting `parallel=False`"
)
parallel = False
axis = self.axes_manager.signal_axes[0]
# x = axis.axis
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and maxval > 0
def estimating_function(spectrum,
window=None,
factor=0.5,
axis=None):
x = axis.axis
if window is not None:
vmax = axis.index2value(spectrum.argmax())
slices = axis._get_array_slices(
slice(vmax - window * 0.5, vmax + window * 0.5))
spectrum = spectrum[slices]
x = x[slices]
spline = scipy.interpolate.UnivariateSpline(
x,
spectrum - factor * spectrum.max(),
s=0)
roots = spline.roots()
if len(roots) == 2:
return np.array(roots)
else:
return np.full((2,), np.nan)
both = self._map_iterate(estimating_function,
window=window,
factor=factor,
axis=axis,
ragged=False,
inplace=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=None)
left, right = both.T.split()
width = right - left
if factor == 0.5:
width.metadata.General.title = (
self.metadata.General.title + " FWHM")
left.metadata.General.title = (
self.metadata.General.title + " FWHM left position")
right.metadata.General.title = (
self.metadata.General.title + " FWHM right position")
else:
width.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum" % factor)
left.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum left position" % factor)
right.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum right position" % factor)
for signal in (left, width, right):
signal.axes_manager.set_signal_dimension(0)
signal.set_signal_type("")
if return_interval is True:
return [width, left, right]
else:
return width
estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def plot(self,
navigator="auto",
plot_markers=True,
autoscale='v',
norm="auto",
axes_manager=None,
navigator_kwds={},
**kwargs):
"""%s
%s
%s
"""
for c in autoscale:
if c not in ['x', 'v']:
raise ValueError("`autoscale` only accepts 'x', 'v' as "
"valid characters.")
super().plot(navigator=navigator,
plot_markers=plot_markers,
autoscale=autoscale,
norm=norm,
axes_manager=axes_manager,
navigator_kwds=navigator_kwds,
**kwargs)
plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING)
class LazySignal1D(LazySignal, Signal1D):
"""
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axes_manager.set_signal_dimension(1)
| gpl-3.0 |
rahushen/ansible | test/units/modules/network/nxos/nxos_module.py | 46 | 3583 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
from units.modules.utils import set_module_args as _set_module_args
def set_module_args(args):
if 'provider' not in args:
args['provider'] = {'transport': args.get('transport') or 'cli'}
return _set_module_args(args)
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(module_name, name, device=''):
path = os.path.join(fixture_path, module_name, device, name)
if not os.path.exists(path):
path = os.path.join(fixture_path, module_name, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestNxosModule(ModuleTestCase):
def execute_module_devices(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
module_name = self.module.__name__.rsplit('.', 1)[1]
local_fixture_path = os.path.join(fixture_path, module_name)
models = []
for path in os.listdir(local_fixture_path):
path = os.path.join(local_fixture_path, path)
if os.path.isdir(path):
models.append(os.path.basename(path))
if not models:
models = ['']
retvals = {}
for model in models:
retvals[model] = self.execute_module(failed, changed, commands, sort, device=model)
return retvals
def execute_module(self, failed=False, changed=False, commands=None, sort=True, device=''):
self.load_fixtures(commands, device=device)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None, device=''):
pass
| gpl-3.0 |
40423132/2017springcd_hw | local_publishconf.py | 188 | 1674 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# 因為 publishconf.py 在 pelicanconf.py 之後, 因此若兩處有相同變數的設定, 將以較後讀入的 publishconf.py 中的設定為主.
# 請注意, 為了在近端讓 Tipue search 傳回的搜尋結果連結正確, 必須使用 ./
SITEURL = './'
# 此設定用於近端靜態網頁查驗, 因此使用相對 URL
RELATIVE_URLS = True
# 為了要讓 local 與 gh-pages 上都能夠使用 Tipue search, 可能要採用不同的 theme
THEME = 'theme/pelican-bootstrap3_local'
#BOOTSTRAP_THEME = 'readable'
#BOOTSTRAP_THEME = 'readable-old'
BOOTSTRAP_THEME = 'united'
#PYGMENTS_STYLE = 'paraiso-drak'
#PYGMENTS_STYLE = 'fruity'
# 為了同時兼容 render_math, 必須放棄 fruity
PYGMENTS_STYLE = 'monokai'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = "kmolab"
#GOOGLE_ANALYTICS = ""
# 設定網誌以 md 檔案建立的 file system date 為準, 無需自行設定
DEFAULT_DATE = 'fs'
# 近端的 code hightlight
MD_EXTENSIONS = ['fenced_code', 'extra', 'codehilite(linenums=True)']
# 若要依照日期存檔呼叫
#ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
#ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = 'pages/{slug}/'
PAGE_SAVE_AS = 'pages/{slug}/index.html'
SHOW_ARTICLE_AUTHOR = True
| agpl-3.0 |
edquist/autopyfactory | autopyfactory/plugins/batchsubmit/CondorGRAMBatchSubmitPlugin.py | 1 | 3021 | #!/bin/env python
#
# AutoPyfactory batch plugin for Condor
#
from CondorCEBatchSubmitPlugin import CondorCEBatchSubmitPlugin
from autopyfactory import jsd
class CondorGRAMBatchSubmitPlugin(CondorCEBatchSubmitPlugin):
def __init__(self, apfqueue, config=None):
if not config:
qcl = apfqueue.factory.qcl
else:
qcl = config
newqcl = qcl.clone().filterkeys('batchsubmit.condorgram', 'batchsubmit.condorce')
super(CondorGRAMBatchSubmitPlugin, self).__init__(apfqueue, config=newqcl)
try:
self.globus = self._globusrsl(apfqueue, qcl)
except Exception, e:
self.log.error("Caught exception: %s " % str(e))
raise
self.log.info('CondorGRAMBatchSubmitPlugin: Object initialized.')
def _globusrsl(self, apfqueue, qcl):
'''
tries to build globusrsl line.
Entries have been renamed by the subplugins (e.g. CondorGT2), with new patterns:
-- batchsubmit.condorgram.gram.XYZ
-- batchsubmit.condorgram.gram.globusrsl
-- batchsubmit.condorgram.gram.globusrsladd
'''
self.log.debug('Starting.') # with new architecture there is no logger yet
globus = ""
optlist = []
for opt in qcl.options(self.apfqname):
if opt.startswith('batchsubmit.condorgram.gram.') and\
opt != 'batchsubmit.condorgram.gram.globusrsl' and\
opt != 'batchsubmit.condorgram.gram.globusrsladd':
optlist.append(opt)
globusrsl = qcl.generic_get(self.apfqname, 'batchsubmit.condorgram.gram.globusrsl')
globusrsladd = qcl.generic_get(self.apfqname, 'batchsubmit.condorgram.gram.globusrsladd')
if globusrsl:
globus = globusrsl
else:
for opt in optlist:
key = opt.split('batchsubmit.condorgram.gram.')[1]
value = qcl.generic_get(self.apfqname, opt)
if value != "":
globus += '(%s=%s)' %(key, value)
if globusrsladd:
globus += globusrsladd
self.log.debug('Leaving with value = %s.' %globus) # with new architecture there is no logger yet
return globus
def _addJSD(self):
'''
add things to the JSD object
'''
self.log.debug('CondorGRAMBatchSubmitPlugin.addJSD: Starting.')
# -- globusrsl --
if self.globus:
self.JSD.add('globusrsl', '%s' %self.globus)
###globusrsl = "globusrsl=(jobtype=%s)" %self.jobtype
###if self.queue:
### globusrsl += "(queue=%s)" % self.queue
###self.JSD.add(globusrsl)
# -- fixed stuffs --
self.JSD.add('copy_to_spool', 'True')
super(CondorGRAMBatchSubmitPlugin, self)._addJSD()
self.log.debug('CondorGRAMBatchSubmitPlugin.addJSD: Leaving.')
| gpl-3.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/lib2to3/btm_matcher.py | 326 | 6834 | """A bottom-up tree matching algorithm implementation meant to speed
up 2to3's matching process. After the tree patterns are reduced to
their rarest linear path, a linear Aho-Corasick automaton is
created. The linear automaton traverses the linear paths from the
leaves to the root of the AST and returns a set of nodes for further
matching. This reduces significantly the number of candidate nodes."""
__author__ = "George Boutsioukis <[email protected]>"
import logging
import itertools
from collections import defaultdict
from . import pytree
from .btm_utils import reduce_tree
class BMNode(object):
"""Class for a node of the Aho-Corasick automaton used in matching"""
count = itertools.count()
def __init__(self):
self.transition_table = {}
self.fixers = []
self.id = next(BMNode.count)
self.content = ''
class BottomMatcher(object):
"""The main matcher class. After instantiating the patterns should
be added using the add_fixer method"""
def __init__(self):
self.match = set()
self.root = BMNode()
self.nodes = [self.root]
self.fixers = []
self.logger = logging.getLogger("RefactoringTool")
def add_fixer(self, fixer):
"""Reduces a fixer's pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached"""
self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer)
def add(self, pattern, start):
"Recursively adds a linear pattern to the AC automaton"
#print("adding pattern", pattern, "to", start)
if not pattern:
#print("empty pattern")
return [start]
if isinstance(pattern[0], tuple):
#alternatives
#print("alternatives")
match_nodes = []
for alternative in pattern[0]:
#add all alternatives, and add the rest of the pattern
#to each end node
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
#single token
#not last
if pattern[0] not in start.transition_table:
#transition did not exist, create new
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
#transition exists already, follow
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes
def run(self, leaves):
"""The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys
"""
current_ac_node = self.root
results = defaultdict(list)
for leaf in leaves:
current_ast_node = leaf
while current_ast_node:
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
if isinstance(child, pytree.Leaf) and child.value == u";":
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
#name
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results:
results[fixer] = []
results[fixer].append(current_ast_node)
else:
#matching failed, reset automaton
current_ac_node = self.root
if (current_ast_node.parent is not None
and current_ast_node.parent.was_checked):
#the rest of the tree upwards has been checked, next leaf
break
#recheck the rejected node once from the root
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results.keys():
results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results
def print_ac(self):
"Prints a graphviz diagram of the BM automaton(for debugging)"
print("digraph g{")
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print("%d -> %d [label=%s] //%s" %
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if subnode_key == 1:
print(subnode.content)
print_node(subnode)
print_node(self.root)
print("}")
# taken from pytree.py for debugging; only used by print_ac
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
| gpl-2.0 |
manishpatell/erpcustomizationssaiimpex123qwe | addons/purchase_requisition/wizard/purchase_requisition_partner.py | 373 | 2320 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_requisition_partner(osv.osv_memory):
_name = "purchase.requisition.partner"
_description = "Purchase Requisition Partner"
_columns = {
'partner_id': fields.many2one('res.partner', 'Supplier', required=True,domain=[('supplier', '=', True)]),
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(purchase_requisition_partner, self).view_init(cr, uid, fields_list, context=context)
record_id = context and context.get('active_id', False) or False
tender = self.pool.get('purchase.requisition').browse(cr, uid, record_id, context=context)
if not tender.line_ids:
raise osv.except_osv(_('Error!'), _('Define product(s) you want to include in the call for bids.'))
return res
def create_order(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.requisition').make_purchase_order(cr, uid, active_ids, data.partner_id.id, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
noobnl/android_kernel_samsung_d2-jb_2.5.1 | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
lynxis/tunneldigger-testing | test_nose.py | 1 | 1858 | #!/usr/bin/env python3
import logging
import lxc
import os
import tunneldigger
# random hash
CONTEXT = None
# lxc container
SERVER = None
CLIENT = None
# pids of tunneldigger client and server
SERVER_PID = None
CLIENT_PID = None
LOG = logging.getLogger("test_nose")
def setup_module():
global CONTEXT, SERVER, CLIENT, SERVER_PID, CLIENT_PID
CONTEXT = tunneldigger.get_random_context()
LOG.info("using context %s", CONTEXT)
CLIENT, SERVER = tunneldigger.prepare_containers(CONTEXT, os.environ['CLIENT_REV'], os.environ['SERVER_REV'])
SERVER_PID = tunneldigger.run_server(SERVER)
CLIENT_PID = tunneldigger.run_client(CLIENT)
# explicit no Exception when ping fails
# it's better to poll the client for a ping rather doing a long sleep
tunneldigger.check_ping(CLIENT, '192.168.254.1', 20)
def teardown_module():
tunneldigger.clean_up(CONTEXT, CLIENT, SERVER)
class TestTunneldigger(object):
def test_ping_tunneldigger_server(self):
""" even we check earlier if the ping is working, we want to fail the check here.
If we fail in setup_module, nose will return UNKNOWN state, because the setup fails and
not a "test" """
if not tunneldigger.check_ping(CLIENT, '192.168.254.1', 3):
raise RuntimeError("fail to ping server")
def test_wget_tunneldigger_server(self):
ret = CLIENT.attach_wait(lxc.attach_run_command, ["wget", "-t", "2", "-T", "4", "http://192.168.254.1:8080/test_8m", '-O', '/dev/null'])
if ret != 0:
raise RuntimeError("failed to run the tests")
def test_ensure_tunnel_up_for_5m(self):
# get id of l2tp0 iface
## ip -o l | awk -F: '{ print $1 }'
# sleep 5 minutes
# get id of l2tp0 iface
## ip -o l | awk -F: '{ print $1 }'
# assert early_id == later_id
pass
| mit |
ganeshgore/myremolab | server/src/weblab/admin/bot/information_retriever.py | 2 | 3812 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005-2009 University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
import os, glob
from weblab.admin.bot.misc import show_time, flush
import cPickle as pickle
FILE_NAME_TEMPLATE = "logs" + os.sep + "botclient_%s__SCEN_%s_CONFIG_0.pickle"
FILL_NUMBER = 2
RANGES = [
lambda : xrange(1, 5),
lambda : xrange(5, 151, 5)
]
def get_raw_information(date, verbose = True):
file_name = FILE_NAME_TEMPLATE % (date, '%s')
raw_information_filename = 'raw_information_%s.dump' % date
if os.path.exists(raw_information_filename):
if verbose:
print "Retrieving cache", show_time()
flush()
raw_information = pickle.load(open(raw_information_filename))
if verbose:
print "Retrieved cached raw_information", show_time()
flush()
return raw_information
if verbose:
print "Generating new raw_information", show_time()
flush()
# else generate this information file
def calculate_positions(initial):
data = {}
for r in RANGES:
for i in r():
data[i] = str(initial).zfill(FILL_NUMBER)
initial += 1
return data, initial
def calculate_protocols():
# returns
# { "JSON" : { 1 : "01", 2 : "02", 5 : "03" ... } ...}
# being each "01", "02" the name of the file for
# that protocol and for that number of users
protocols = {
"JSON" : {},
"SOAP" : {},
"XMLRPC" : {}
}
protocols = {}
data, initial = calculate_positions(0)
protocols["JSON"] = data
data, initial = calculate_positions(initial)
protocols["SOAP"] = data
data, _ = calculate_positions(initial)
protocols["XMLRPC"] = data
return protocols
protocols = calculate_protocols()
def get_results(protocol, number):
#
# Given a protocol and a number of users (1,2,3,4,5,10,15...),
# it returns the results stored in that scenario
#
found_resources = sorted(glob.glob(file_name % "*"), lambda x,y: len(x) - len(y))
if len(found_resources) == 0:
raise Exception("No similar file found: %s" % file_name)
regular_length = len(found_resources[len(found_resources)/2]) # Take the one in the middle
number_length = regular_length - len(file_name % '')
filename = file_name % str(int(protocols[protocol][number])).zfill(number_length)
results = pickle.load(open(filename))
return results
def generate_data(protocol):
# Given a protocol, it returns the following tuple
#
# x = [1,2,3,4,5,10,15,20,25]
# y = [results_of_1, results_of_2 ...]
#
x = []
y = []
for r in RANGES:
for n in r():
x.append(n)
results = get_results(protocol, n)
y.append(results)
return x,y
raw_information = {}
for protocol in protocols.keys():
x, y = generate_data(protocol)
raw_information[protocol] = (x,y)
# raw_information stores:
# { "JSON" : ([1,2,3,4,5,10...], [results_of_1, results_of_2, results_of_3 ...]) }
# Save as a cache
pickle.dump(raw_information, open(raw_information_filename,'w'))
if verbose:
print "Raw_information generated", show_time()
flush()
return raw_information
| bsd-2-clause |
captiosus/treadmill | tests/runtime/linux/finish_test.py | 1 | 29795 | """Unit test for treadmill.runtime.linux._finish.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import io
import json
import os
import shutil
import tarfile
import tempfile
import time
import unittest
import mock
import treadmill
import treadmill.rulefile
from treadmill import firewall
from treadmill import fs
from treadmill import iptables
from treadmill import supervisor
from treadmill.apptrace import events
from treadmill.runtime.linux import _finish as app_finish
class LinuxRuntimeFinishTest(unittest.TestCase):
"""Tests for treadmill.runtime.linux._finish"""
def setUp(self):
# Access protected module _base_service
# pylint: disable=W0212
self.root = tempfile.mkdtemp()
self.tm_env = mock.Mock(
root=self.root,
# nfs_dir=os.path.join(self.root, 'mnt', 'nfs'),
apps_dir=os.path.join(self.root, 'apps'),
archives_dir=os.path.join(self.root, 'archives'),
metrics_dir=os.path.join(self.root, 'metrics'),
svc_cgroup=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_localdisk=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_network=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
rules=mock.Mock(
spec_set=treadmill.rulefile.RuleMgr,
)
)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('shutil.copy', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
@mock.patch('treadmill.utils.datetime_utcnow', mock.Mock(
return_value=datetime.datetime(2015, 1, 22, 14, 14, 36, 537918)))
@mock.patch('treadmill.appcfg.manifest.read', mock.Mock())
@mock.patch('treadmill.runtime.linux._finish._kill_apps_by_root',
mock.Mock())
@mock.patch('treadmill.sysinfo.hostname',
mock.Mock(return_value='xxx.xx.com'))
@mock.patch('treadmill.fs.archive_filesystem',
mock.Mock(return_value=True))
@mock.patch('treadmill.apphook.cleanup', mock.Mock())
@mock.patch('treadmill.iptables.rm_ip_set', mock.Mock())
@mock.patch('treadmill.rrdutils.flush_noexc', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.iptables.rm_ip_set', mock.Mock())
@mock.patch('treadmill.supervisor.control_service', mock.Mock())
@mock.patch('treadmill.zkutils.get',
mock.Mock(return_value={
'server': 'nonexist',
'auth': 'nonexist',
}))
def test_finish(self):
"""Tests container finish procedure and freeing of the resources.
"""
# Access protected module _kill_apps_by_root
# pylint: disable=W0212
manifest = {
'app': 'proid.myapp',
'cell': 'test',
'cpu': '100%',
'disk': '100G',
'environment': 'dev',
'memory': '100M',
'name': 'proid.myapp#001',
'proid': 'foo',
'shared_network': False,
'task': '001',
'uniqueid': '0000000ID1234',
'archive': [
'/var/tmp/treadmill'
],
'endpoints': [
{
'port': 8000,
'name': 'http',
'real_port': 5000,
'proto': 'tcp',
},
{
'port': 54321,
'type': 'infra',
'name': 'ssh',
'real_port': 54321,
'proto': 'tcp',
}
],
'ephemeral_ports': {
'tcp': [45024],
'udp': [62422],
},
'services': [
{
'name': 'web_server',
'command': '/bin/false',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'vring': {
'some': 'settings'
}
}
treadmill.appcfg.manifest.read.return_value = manifest
app_unique_name = 'proid.myapp-001-0000000ID1234'
mock_cgroup_client = self.tm_env.svc_cgroup.make_client.return_value
mock_ld_client = self.tm_env.svc_localdisk.make_client.return_value
mock_nwrk_client = self.tm_env.svc_network.make_client.return_value
localdisk = {
'block_dev': '/dev/foo',
}
mock_ld_client.get.return_value = localdisk
network = {
'vip': '192.168.0.2',
'gateway': '192.168.254.254',
'veth': 'testveth.0',
'external_ip': '172.31.81.67',
}
mock_nwrk_client.get.return_value = network
app_dir = os.path.join(self.tm_env.apps_dir, app_unique_name)
data_dir = os.path.join(app_dir, 'data')
# Create content in app root directory, verify that it is archived.
fs.mkdir_safe(os.path.join(data_dir, 'root', 'xxx'))
fs.mkdir_safe(os.path.join(data_dir, 'services'))
# Simulate daemontools finish script, marking the app is done.
with io.open(os.path.join(data_dir, 'exitinfo'), 'wb') as f:
json.dump(
{'service': 'web_server', 'return_code': 0, 'signal': 0},
f
)
app_finish.finish(self.tm_env, app_dir)
treadmill.supervisor.control_service.assert_called_with(
app_dir, supervisor.ServiceControlAction.down,
wait=supervisor.ServiceWaitAction.down
)
# All resource service clients are properly created
self.tm_env.svc_cgroup.make_client.assert_called_with(
os.path.join(data_dir, 'resources', 'cgroups')
)
self.tm_env.svc_localdisk.make_client.assert_called_with(
os.path.join(data_dir, 'resources', 'localdisk')
)
self.tm_env.svc_network.make_client.assert_called_with(
os.path.join(data_dir, 'resources', 'network')
)
treadmill.runtime.linux._finish._kill_apps_by_root.assert_called_with(
os.path.join(data_dir, 'root')
)
# Verify that we tested the archiving for the app root volume
treadmill.fs.archive_filesystem.assert_called_with(
'/dev/foo',
os.path.join(data_dir, 'root'),
os.path.join(data_dir,
'001_xxx.xx.com_20150122_141436537918.tar'),
mock.ANY
)
# Cleanup the block device
mock_ld_client.delete.assert_called_with(app_unique_name)
# Cleanup the cgroup resource
mock_cgroup_client.delete.assert_called_with(app_unique_name)
# Cleanup network resources
mock_nwrk_client.get.assert_called_with(app_unique_name)
self.tm_env.rules.unlink_rule.assert_has_calls(
[
mock.call(chain=iptables.PREROUTING_DNAT,
rule=firewall.DNATRule(
proto='tcp',
dst_ip='172.31.81.67', dst_port=5000,
new_ip='192.168.0.2', new_port=8000
),
owner=app_unique_name),
mock.call(chain=iptables.POSTROUTING_SNAT,
rule=firewall.SNATRule(
proto='tcp',
src_ip='192.168.0.2', src_port=8000,
new_ip='172.31.81.67', new_port=5000
),
owner=app_unique_name),
mock.call(chain=iptables.PREROUTING_DNAT,
rule=firewall.DNATRule(
proto='tcp',
dst_ip='172.31.81.67', dst_port=54321,
new_ip='192.168.0.2', new_port=54321
),
owner=app_unique_name),
mock.call(chain=iptables.POSTROUTING_SNAT,
rule=firewall.SNATRule(
proto='tcp',
src_ip='192.168.0.2', src_port=54321,
new_ip='172.31.81.67', new_port=54321
),
owner=app_unique_name),
mock.call(chain=iptables.PREROUTING_DNAT,
rule=firewall.DNATRule(
proto='tcp',
dst_ip='172.31.81.67', dst_port=45024,
new_ip='192.168.0.2', new_port=45024
),
owner=app_unique_name),
mock.call(chain=iptables.PREROUTING_DNAT,
rule=firewall.DNATRule(
proto='udp',
dst_ip='172.31.81.67', dst_port=62422,
new_ip='192.168.0.2', new_port=62422
),
owner=app_unique_name),
],
any_order=True
)
self.assertEqual(self.tm_env.rules.unlink_rule.call_count, 6)
treadmill.iptables.rm_ip_set.assert_has_calls(
[
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.0.2,tcp:54321'),
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.0.2,tcp:45024'),
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.0.2,udp:62422'),
mock.call(treadmill.iptables.SET_VRING_CONTAINERS,
'192.168.0.2'),
],
any_order=True
)
self.assertEqual(treadmill.iptables.rm_ip_set.call_count, 4)
mock_nwrk_client.delete.assert_called_with(app_unique_name)
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.FinishedTraceEvent(
instanceid='proid.myapp#001',
rc=0,
signal=0,
payload={
'service': 'web_server',
'signal': 0,
'return_code': 0
}
)
)
treadmill.rrdutils.flush_noexc.assert_called_with(
os.path.join(self.root, 'metrics', 'apps',
app_unique_name + '.rrd')
)
shutil.copy.assert_called_with(
os.path.join(self.root, 'metrics', 'apps',
app_unique_name + '.rrd'),
os.path.join(data_dir, 'metrics.rrd')
)
@mock.patch('shutil.copy', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
@mock.patch('treadmill.apphook.cleanup', mock.Mock())
@mock.patch('treadmill.runtime.linux._finish._kill_apps_by_root',
mock.Mock())
@mock.patch('treadmill.appcfg.manifest.read', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname',
mock.Mock(return_value='myhostname'))
@mock.patch('treadmill.cgroups.delete', mock.Mock())
@mock.patch('treadmill.cgutils.reset_memory_limit_in_bytes',
mock.Mock(return_value=[]))
@mock.patch('treadmill.fs.archive_filesystem',
mock.Mock(return_value=True))
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.iptables.rm_ip_set', mock.Mock())
@mock.patch('treadmill.supervisor.control_service', mock.Mock())
@mock.patch('treadmill.zkutils.get', mock.Mock(return_value=None))
@mock.patch('treadmill.rrdutils.flush_noexc', mock.Mock())
def test_finish_error(self):
"""Tests container finish procedure when app is improperly finished."""
manifest = {
'app': 'proid.myapp',
'cell': 'test',
'cpu': '100%',
'disk': '100G',
'environment': 'dev',
'memory': '100M',
'name': 'proid.myapp#001',
'proid': 'foo',
'shared_network': False,
'task': '001',
'uniqueid': '0000000001234',
'archive': [
'/var/tmp/treadmill'
],
'endpoints': [
{
'port': 8000,
'name': 'http',
'real_port': 5000,
'proto': 'tcp',
}
],
'services': [
{
'name': 'web_server',
'command': '/bin/false',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'ephemeral_ports': {
'tcp': [],
'udp': [],
},
'vring': {
'some': 'settings'
}
}
treadmill.appcfg.manifest.read.return_value = manifest
app_unique_name = 'proid.myapp-001-0000000001234'
mock_ld_client = self.tm_env.svc_localdisk.make_client.return_value
localdisk = {
'block_dev': '/dev/foo',
}
mock_ld_client.get.return_value = localdisk
mock_nwrk_client = self.tm_env.svc_network.make_client.return_value
network = {
'vip': '192.168.0.2',
'gateway': '192.168.254.254',
'veth': 'testveth.0',
'external_ip': '172.31.81.67',
}
mock_nwrk_client.get.return_value = network
app_dir = os.path.join(self.tm_env.apps_dir, app_unique_name)
data_dir = os.path.join(app_dir, 'data')
# Create content in app root directory, verify that it is archived.
fs.mkdir_safe(os.path.join(data_dir, 'root', 'xxx'))
fs.mkdir_safe(os.path.join(data_dir, 'services'))
# Simulate daemontools finish script, marking the app is done.
with io.open(os.path.join(data_dir, 'exitinfo'), 'wb') as f:
json.dump(
{'service': 'web_server', 'return_code': 1, 'signal': 3},
fp=f
)
app_finish.finish(self.tm_env, app_dir)
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.FinishedTraceEvent(
instanceid='proid.myapp#001',
rc=1,
signal=3,
payload={
'service': 'web_server',
'signal': 3,
'return_code': 1,
}
)
)
treadmill.rrdutils.flush_noexc.assert_called_with(
os.path.join(self.root, 'metrics', 'apps',
app_unique_name + '.rrd')
)
shutil.copy.assert_called_with(
os.path.join(self.tm_env.metrics_dir, 'apps',
app_unique_name + '.rrd'),
os.path.join(data_dir, 'metrics.rrd')
)
@mock.patch('shutil.copy', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
@mock.patch('treadmill.appcfg.manifest.read', mock.Mock())
@mock.patch('treadmill.apphook.cleanup', mock.Mock())
@mock.patch('treadmill.runtime.linux._finish._kill_apps_by_root',
mock.Mock())
@mock.patch('treadmill.sysinfo.hostname',
mock.Mock(return_value='hostname'))
@mock.patch('treadmill.fs.archive_filesystem',
mock.Mock(return_value=True))
@mock.patch('treadmill.rulefile.RuleMgr.unlink_rule', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.iptables.rm_ip_set', mock.Mock())
@mock.patch('treadmill.zkutils.get', mock.Mock(return_value=None))
@mock.patch('treadmill.rrdutils.flush_noexc', mock.Mock())
def test_finish_aborted(self):
"""Tests container finish procedure when node is aborted.
"""
manifest = {
'app': 'proid.myapp',
'cell': 'test',
'cpu': '100%',
'disk': '100G',
'environment': 'dev',
'host_ip': '172.31.81.67',
'memory': '100M',
'name': 'proid.myapp#001',
'proid': 'foo',
'shared_network': False,
'task': '001',
'uniqueid': '0000000ID1234',
'archive': [
'/var/tmp/treadmill'
],
'endpoints': [
{
'port': 8000,
'name': 'http',
'real_port': 5000,
'proto': 'tcp',
}
],
'services': [
{
'name': 'web_server',
'command': '/bin/false',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'ephemeral_ports': {
'tcp': [],
'udp': [],
},
'vring': {
'some': 'settings'
}
}
treadmill.appcfg.manifest.read.return_value = manifest
app_unique_name = 'proid.myapp-001-0000000ID1234'
mock_ld_client = self.tm_env.svc_localdisk.make_client.return_value
localdisk = {
'block_dev': '/dev/foo',
}
mock_ld_client.get.return_value = localdisk
mock_nwrk_client = self.tm_env.svc_network.make_client.return_value
network = {
'vip': '192.168.0.2',
'gateway': '192.168.254.254',
'veth': 'testveth.0',
'external_ip': '172.31.81.67',
}
mock_nwrk_client.get.return_value = network
app_dir = os.path.join(self.root, 'apps', app_unique_name)
data_dir = os.path.join(app_dir, 'data')
# Create content in app root directory, verify that it is archived.
fs.mkdir_safe(os.path.join(data_dir, 'root', 'xxx'))
fs.mkdir_safe(os.path.join(data_dir, 'services'))
# Simulate daemontools finish script, marking the app is done.
with io.open(os.path.join(data_dir, 'aborted'), 'wb') as aborted:
aborted.write('{"why": "reason", "payload": "test"}')
app_finish.finish(self.tm_env, app_dir)
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.AbortedTraceEvent(
instanceid='proid.myapp#001',
why='reason',
payload='test'
)
)
treadmill.rrdutils.flush_noexc.assert_called_with(
os.path.join(self.root, 'metrics', 'apps',
app_unique_name + '.rrd')
)
shutil.copy.assert_called_with(
os.path.join(self.root, 'metrics', 'apps',
app_unique_name + '.rrd'),
os.path.join(data_dir, 'metrics.rrd')
)
treadmill.appevents.post.reset()
with io.open(os.path.join(data_dir, 'aborted'), 'w') as aborted:
aborted.write('')
app_finish.finish(self.tm_env, app_dir)
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.AbortedTraceEvent(
instanceid='proid.myapp#001',
why='unknown',
payload=None
)
)
@mock.patch('treadmill.subproc.check_call', mock.Mock(return_value=0))
def test_finish_no_manifest(self):
"""Test app finish on directory with no app.json.
"""
app_finish.finish(self.tm_env, self.root)
@mock.patch('shutil.copy', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
@mock.patch('treadmill.apphook.cleanup', mock.Mock())
@mock.patch('treadmill.utils.datetime_utcnow', mock.Mock(
return_value=datetime.datetime(2015, 1, 22, 14, 14, 36, 537918)))
@mock.patch('treadmill.appcfg.manifest.read', mock.Mock())
@mock.patch('treadmill.runtime.linux._finish._kill_apps_by_root',
mock.Mock())
@mock.patch('treadmill.sysinfo.hostname',
mock.Mock(return_value='xxx.ms.com'))
@mock.patch('treadmill.fs.archive_filesystem',
mock.Mock(return_value=True))
@mock.patch('treadmill.iptables.rm_ip_set', mock.Mock())
@mock.patch('treadmill.rrdutils.flush_noexc', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.iptables.rm_ip_set', mock.Mock())
@mock.patch('treadmill.supervisor.control_service', mock.Mock())
@mock.patch('treadmill.zkutils.get',
mock.Mock(return_value={
'server': 'nonexist',
'auth': 'nonexist',
}))
@mock.patch('treadmill.zkutils.put', mock.Mock())
def test_finish_no_resources(self):
"""Test app finish on directory when all resources are already freed.
"""
# Access protected module _kill_apps_by_root
# pylint: disable=W0212
manifest = {
'app': 'proid.myapp',
'cell': 'test',
'cpu': '100%',
'disk': '100G',
'environment': 'dev',
'memory': '100M',
'name': 'proid.myapp#001',
'proid': 'foo',
'shared_network': False,
'task': '001',
'uniqueid': '0000000ID1234',
'archive': [
'/var/tmp/treadmill'
],
'endpoints': [
{
'port': 8000,
'name': 'http',
'real_port': 5000
},
{
'port': 54321,
'type': 'infra',
'name': 'ssh',
'real_port': 54321
}
],
'ephemeral_ports': {
'tcp': [45024],
'udp': [62422],
},
'services': [
{
'command': '/bin/false',
'restart_count': 3,
'name': 'web_server'
}
],
'vring': {
'some': 'settings'
}
}
treadmill.appcfg.manifest.read.return_value = manifest
app_unique_name = 'proid.myapp-001-0000000ID1234'
mock_cgroup_client = self.tm_env.svc_cgroup.make_client.return_value
mock_ld_client = self.tm_env.svc_localdisk.make_client.return_value
mock_nwrk_client = self.tm_env.svc_network.make_client.return_value
# All resource managers return None
mock_cgroup_client.get.return_value = None
mock_ld_client.get.return_value = None
mock_nwrk_client.get.return_value = None
app_dir = os.path.join(self.tm_env.apps_dir, app_unique_name)
data_dir = os.path.join(app_dir, 'data')
# Create content in app root directory, verify that it is archived.
fs.mkdir_safe(os.path.join(data_dir, 'root', 'xxx'))
fs.mkdir_safe(os.path.join(data_dir, 'services'))
# Simulate daemontools finish script, marking the app is done.
with io.open(os.path.join(data_dir, 'exitinfo'), 'wb') as f:
json.dump(
{'service': 'web_server', 'return_code': 0, 'signal': 0},
f
)
treadmill.runtime.linux._finish.finish(self.tm_env, app_dir)
treadmill.supervisor.control_service.assert_called_with(
app_dir, supervisor.ServiceControlAction.down,
wait=supervisor.ServiceWaitAction.down
)
# All resource service clients are properly created
self.tm_env.svc_cgroup.make_client.assert_called_with(
os.path.join(data_dir, 'resources', 'cgroups')
)
self.tm_env.svc_localdisk.make_client.assert_called_with(
os.path.join(data_dir, 'resources', 'localdisk')
)
self.tm_env.svc_network.make_client.assert_called_with(
os.path.join(data_dir, 'resources', 'network')
)
treadmill.runtime.linux._finish._kill_apps_by_root.assert_called_with(
os.path.join(data_dir, 'root')
)
# Cleanup the network resources
mock_nwrk_client.get.assert_called_with(app_unique_name)
# Cleanup the block device
mock_ld_client.delete.assert_called_with(app_unique_name)
# Cleanup the cgroup resource
mock_cgroup_client.delete.assert_called_with(app_unique_name)
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.FinishedTraceEvent(
instanceid='proid.myapp#001',
rc=0,
signal=0,
payload={
'service': 'web_server',
'signal': 0,
'return_code': 0
}
)
)
treadmill.rrdutils.flush_noexc.assert_called_with(
os.path.join(self.root, 'metrics', 'apps',
app_unique_name + '.rrd')
)
shutil.copy.assert_called_with(
os.path.join(self.root, 'metrics', 'apps',
app_unique_name + '.rrd'),
os.path.join(data_dir, 'metrics.rrd')
)
def test__copy_metrics(self):
"""Test that metrics are copied safely.
"""
# Access protected module _copy_metrics
# pylint: disable=W0212
with io.open(os.path.join(self.root, 'in.rrd'), 'w'):
pass
app_finish._copy_metrics(os.path.join(self.root, 'in.rrd'),
self.root)
self.assertTrue(os.path.exists(os.path.join(self.root, 'metrics.rrd')))
os.unlink(os.path.join(self.root, 'metrics.rrd'))
app_finish._copy_metrics(os.path.join(self.root, 'nosuchthing.rrd'),
self.root)
self.assertFalse(
os.path.exists(os.path.join(self.root, 'metrics.rrd')))
def test__archive_logs(self):
"""Tests archiving local logs."""
# Access protected module _archive_logs
#
# pylint: disable=W0212
data_dir = os.path.join(self.root, 'xxx.yyy-1234-qwerty', 'data')
fs.mkdir_safe(data_dir)
archives_dir = os.path.join(self.root, 'archives')
fs.mkdir_safe(archives_dir)
sys_archive = os.path.join(archives_dir,
'xxx.yyy-1234-qwerty.sys.tar.gz')
app_archive = os.path.join(archives_dir,
'xxx.yyy-1234-qwerty.app.tar.gz')
app_finish._archive_logs(self.tm_env, 'xxx.yyy-1234-qwerty', data_dir)
self.assertTrue(os.path.exists(sys_archive))
self.assertTrue(os.path.exists(app_archive))
os.unlink(sys_archive)
os.unlink(app_archive)
def _touch_file(path):
"""Touch file, appending path to container_dir."""
fpath = os.path.join(data_dir, path)
fs.mkdir_safe(os.path.dirname(fpath))
io.open(fpath, 'w').close()
_touch_file('sys/foo/data/log/current')
_touch_file('sys/bla/data/log/current')
_touch_file('sys/bla/data/log/xxx')
_touch_file('services/xxx/data/log/current')
_touch_file('services/xxx/data/log/whatever')
_touch_file('a.json')
_touch_file('a.rrd')
_touch_file('log/current')
_touch_file('whatever')
app_finish._archive_logs(self.tm_env, 'xxx.yyy-1234-qwerty', data_dir)
tar = tarfile.open(sys_archive)
files = sorted([member.name for member in tar.getmembers()])
self.assertEqual(
files,
['a.json', 'a.rrd', 'log/current',
'sys/bla/data/log/current', 'sys/foo/data/log/current']
)
tar.close()
tar = tarfile.open(app_archive)
files = sorted([member.name for member in tar.getmembers()])
self.assertEqual(
files,
['services/xxx/data/log/current']
)
tar.close()
def test__archive_cleanup(self):
"""Tests cleanup of local logs."""
# Access protected module _ARCHIVE_LIMIT, _cleanup_archive_dir
#
# pylint: disable=W0212
fs.mkdir_safe(self.tm_env.archives_dir)
# Cleanup does not care about file extensions, it will cleanup
# oldest file if threshold is exceeded.
app_finish._ARCHIVE_LIMIT = 20
file1 = os.path.join(self.tm_env.archives_dir, '1')
with io.open(file1, 'w') as f:
f.write('x' * 10)
app_finish._cleanup_archive_dir(self.tm_env)
self.assertTrue(os.path.exists(file1))
os.utime(file1, (time.time() - 1, time.time() - 1))
file2 = os.path.join(self.tm_env.archives_dir, '2')
with io.open(file2, 'w') as f:
f.write('x' * 10)
app_finish._cleanup_archive_dir(self.tm_env)
self.assertTrue(os.path.exists(file1))
with io.open(os.path.join(self.tm_env.archives_dir, '2'), 'w') as f:
f.write('x' * 15)
app_finish._cleanup_archive_dir(self.tm_env)
self.assertFalse(os.path.exists(file1))
self.assertTrue(os.path.exists(file2))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
terhorst/psmcpp | smcpp/commands/command.py | 2 | 7728 | # Base class; subclasses will automatically show up as subcommands
import numpy as np
import argparse
import os
import os.path
import sys
from .. import logging, _smcpp
import smcpp.defaults
logger = logging.getLogger(__name__)
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
return ivalue
class ConsoleCommand:
def __init__(self, parser):
pass
class Command:
def __init__(self, parser):
'''Configure parser and parse args.'''
parser.add_argument('-v', '--verbose', action='count', default=0,
help="increase debugging output, specify multiply times for more")
parser.add_argument('--seed', type=int, default=0, help=argparse.SUPPRESS)
parser.add_argument('--cores', type=int, default=None,
help="Number of worker processes / threads "
"to use in parallel calculations")
def main(self, args):
np.random.seed(args.seed)
logging.setup_logging(args.verbose)
smcpp.defaults.cores = args.cores
class EstimationCommand(Command):
def __init__(self, parser):
super().__init__(parser)
add_common_estimation_args(parser)
def main(self, args):
if not os.path.isdir(args.outdir):
os.makedirs(args.outdir)
# Initialize the logger
# Do this before calling super().main() so that
# any debugging output generated there gets logged
logging.add_debug_log(os.path.join(args.outdir, ".debug.txt"))
super().main(args)
logger.debug(sys.argv)
logger.debug(args)
def add_common_estimation_args(parser):
parser.add_argument("-o", "--outdir", help="output directory", default=".")
parser.add_argument("--base", help="base for file output. outputted files will have the form {base}.final.json, etc.",
default="model")
parser.add_argument('--timepoints', type=float, default=None, nargs=2,
help="start time of model (in generations)")
data = parser.add_argument_group('data parameters')
data.add_argument('--length-cutoff', help=argparse.SUPPRESS, type=int, default=None)
data.add_argument('--nonseg-cutoff', '-c',
help="recode nonsegregating spans > cutoff as missing. "
"default: do not recode.",
type=int)
data.add_argument('--thinning', help="only emit full SFS every <k>th site. (k > 0)",
default=None, type=check_positive, metavar="k")
data.add_argument('-w', default=100, help="window size. sites are grouped into blocks of size <w>. "
"each block is coded as polymorphic or nonpolymorphic. "
"the default w=100 matches PSMC. setting w=1 performs no windowing "
"but may be more susceptible to model violations, in particular tracts "
" of hypermutated sites.", type=int)
optimizer = parser.add_argument_group("Optimization parameters")
optimizer.add_argument("--no-initialize", action="store_true", default=False, help=argparse.SUPPRESS)
optimizer.add_argument('--em-iterations', type=int,
help="number of EM steps to perform", default=20)
optimizer.add_argument('--algorithm',
choices=["Powell", "L-BFGS-B", "TNC"],
default="L-BFGS-B", help="optimization algorithm. Powell's method "
"is used by {P,MS}MC and does not require gradients. It may "
"be faster in some cases.")
optimizer.add_argument('--multi', default=False, action="store_true",
help="update multiple blocks of coordinates at once")
optimizer.add_argument("--ftol", type=float,
default=smcpp.defaults.ftol,
help="stopping criterion for relative improvement in loglik "
"in EM algorithm. algorithm will terminate when "
"|loglik' - loglik| / loglik < ftol")
optimizer.add_argument('--xtol', type=float,
default=smcpp.defaults.xtol,
help=r"x tolerance for optimizer. "
"optimizer will stop when |x' - x|_\infty < xtol")
optimizer.add_argument('--Nmax', type=float,
default=smcpp.defaults.maximum_population_size,
help="upper bound on scaled effective population size")
optimizer.add_argument('--Nmin', type=float,
default=smcpp.defaults.minimum_population_size,
help="lower bound on scaled effective population size")
optimizer.add_argument('--regularization-penalty', '-rp',
type=float, help="regularization penalty",
default=smcpp.defaults.regularization_penalty)
optimizer.add_argument('--lambda', dest="lambda_", type=float, help=argparse.SUPPRESS)
add_hmm_args(parser)
def add_hmm_args(parser):
polarization = parser.add_mutually_exclusive_group(required=False)
polarization.add_argument("--unfold", action="store_true", default=False,
help="use unfolded SFS (alias for -p 0.0)")
polarization.add_argument('--polarization-error', '-p',
metavar='p', type=float, default=0.5,
help="uncertainty parameter for polarized SFS: observation (a,b) "
"has probability [(1-p)*CSFS_{a,b} + p*CSFS_{2-a,n-2-b}]. "
"default: 0.5")
def add_model_parameters(parser):
model = parser.add_argument_group('Model parameters')
# model.add_argument('--timepoints', type=str, default="h",
# help="starting and ending time points of model. "
# "this can be either a comma separated list of two numbers `t1,tK`"
# "indicating starting and ending generations, "
# "a single value, indicating the starting time point, "
# "or the special value 'h' "
# "indicating that they should be determined based on the data using an "
# "heuristic calculation.")
model.add_argument('--knots', type=int,
default=smcpp.defaults.knots,
help="number of knots to use in internal representation")
# model.add_argument('--hs', type=int,
# default=2,
# help="ratio of (# hidden states) / (# knots). Must "
# "be an integer >= 1. Larger values will consume more "
# "memory and CPU but are potentially more accurate. ")
model.add_argument('--spline',
choices=["cubic", "pchip", "piecewise"],
default=smcpp.defaults.spline,
help="type of model representation "
"(smooth spline or piecewise constant) to use")
return model
def add_pop_parameters(parser):
pop_params = parser.add_argument_group('Population-genetic parameters')
pop_params.add_argument('mu', type=float,
help="mutation rate per base pair per generation")
pop_params.add_argument('-r', type=float,
help="recombination rate per base pair per generation. "
"default: estimate from data.")
return pop_params
| gpl-3.0 |
lowandrew/OLCTools | olctools/accessoryFunctions/accessoryFunctions.py | 1 | 43913 | #!/usr/bin/env python3
# noinspection PyProtectedMember
from Bio.Application import _Option, AbstractCommandline, _Switch
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio import SeqIO
from subprocess import Popen, PIPE, STDOUT
from collections import defaultdict
import subprocess
import datetime
import logging
import shutil
import shlex
import time
import glob
import os
import re
import sys
__author__ = 'adamkoziol', 'andrewlow'
def dependency_check(dependency):
"""
Checks a program to see if it's installed (or at least, checks whether or not some sort of executable
for it is on your path).
:param dependency: Name of program you want to check, as a string.
:return: True if dependency is present, False if it isn't.
"""
check = shutil.which(dependency)
if not check:
return False
else:
return True
def find_paired_reads(fastq_directory, forward_id='_R1', reverse_id='_R2'):
"""
Looks at a directory to try to find paired fastq files. Should be able to find anything fastq.
:param fastq_directory: Complete path to directory containing fastq files.
:param forward_id: Identifier for forward reads. Default R1.
:param reverse_id: Identifier for reverse reads. Default R2.
:return: List containing pairs of fastq files, in format [[forward_1, reverse_1], [forward_2, reverse_2]], etc.
"""
pair_list = list()
fastq_files = glob.glob(os.path.join(fastq_directory, '*.f*q*'))
for name in fastq_files:
if forward_id in name and os.path.isfile(name.replace(forward_id, reverse_id)):
pair_list.append([name, name.replace(forward_id, reverse_id)])
return pair_list
def find_unpaired_reads(fastq_directory, forward_id='_R1', reverse_id='_R2'):
"""
Looks at a directory to try to find unpaired fastq files.
:param fastq_directory: Complete path to directory containing fastq files.
:param forward_id: Identifier for paired reads, forward.
:param reverse_id: Identifier for paired reads, reverse.
:return: List of paths to unpaired files.
"""
unpaired_list = list()
fastq_files = glob.glob(os.path.join(fastq_directory, '*.f*q*'))
for name in fastq_files:
if forward_id not in name and reverse_id not in name:
unpaired_list.append(name)
elif forward_id in name and not os.path.isfile(name.replace(forward_id, reverse_id)):
unpaired_list.append(name)
elif reverse_id in name and not os.path.isfile(name.replace(reverse_id, forward_id)):
unpaired_list.append(name)
return unpaired_list
def download_file(address, output_name, hour_start=18, hour_end=6, day_start=5, day_end=6, timeout=600):
"""
Downloads a file, between specified hours. (Hour start has to be greater than hour end for this to work in current
iteration).
:param address: Address of file that you want to download.
:param output_name: Where you want to save the file to.
:param hour_start: Start of window where downloading is acceptable. Default 6PM (1800h)
:param hour_end: End of window where downloading is acceptable. Default 6AM (600h)
:param day_start: Start of window where it's always OK to download. Default Saturday (day 5).
:param day_end: End of window where it's always OK to download. Default Sunday (day 6).
:param timeout: How often to check if you're outside the acceptable download window (default 600 seconds).
:return:
"""
out = open(os.devnull, 'w')
returncode = 28 # While loop is based on returncode given by curl, so need to initialize it to something.
while returncode != 0: # 0 means that the file has already been downloaded completely, so stop looping then.
# Figure out what hour it is. If not in acceptable download window, wait a while before checking again.
hour = datetime.datetime.now().time().hour
minute = datetime.datetime.now().time().minute
day = datetime.datetime.today().weekday()
acceptable_hour = not(hour_end < hour < hour_start) # True if current hour is between start and end.
acceptable_day = day_start <= day <= day_end # True if current day is a weekend day.
if not(acceptable_hour or acceptable_day):
print('Current time is {hour}:{minute}. I am not allowed to start downloading until'
' {start_hour}:00.'.format(hour=hour, minute=minute, start_hour=hour_start))
time.sleep(timeout)
# If the file doesn't already exist, start downloading it.
elif not os.path.exists(output_name):
cmd = 'curl -o {outname} --max-time {timeout} {address}'.format(timeout=timeout,
address=address,
outname=output_name)
returncode = subprocess.call(cmd, shell=True, stdout=out, stderr=out)
# If the file does already exist, download it starting from filesize offset.
else:
file_size = os.path.getsize(output_name)
cmd = 'curl -o {outname} --max-time {timeout} -C {file_size} {address}'.format(timeout=timeout,
address=address,
outname=output_name,
file_size=file_size)
returncode = subprocess.call(cmd, shell=True, stdout=out, stderr=out)
def write_to_logfile(out, err, logfile, samplelog=None, sampleerr=None, analysislog=None, analysiserr=None):
"""
Writes out and err (both should be strings) to logfile.
"""
# Run log
with open(logfile + '_out.txt', 'a+') as outfile:
outfile.write(out + '\n')
with open(logfile + '_err.txt', 'a+') as outfile:
outfile.write(err + '\n')
# Sample log
if samplelog:
with open(samplelog, 'a+') as outfile:
outfile.write(out + '\n')
with open(sampleerr, 'a+') as outfile:
outfile.write(err + '\n')
# Analysis log
if analysislog:
with open(analysislog, 'a+') as outfile:
outfile.write(out + '\n')
with open(analysiserr, 'a+') as outfile:
outfile.write(err + '\n')
def clear_logfile(logfile):
"""
As logfiles are appended to each time the same data are processed, sometimes it is desirable to clear out
logsfiles from previous iterations
:param logfile: Base name of logfile
"""
try:
os.remove(logfile + '_out.txt')
except IOError:
pass
try:
os.remove(logfile + '_err.txt')
except IOError:
pass
def run_subprocess(command):
"""
command is the command to run, as a string.
runs a subprocess, returns stdout and stderr from the subprocess as strings.
"""
x = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = x.communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
return out, err
def get_version(exe):
"""
:param exe: :type list required
"""
assert isinstance(exe, list)
return Popen(exe, stdout=PIPE, stderr=STDOUT).stdout.read()
def logstr(*args):
yield "{}\n".__add__("-".__mul__(60)).__mul__(len(args)).format(*args)
def make_path(inpath):
"""
from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL
:param inpath: string of the supplied path
"""
if not os.path.isfile(inpath):
try:
os.makedirs(inpath)
except FileExistsError:
pass
else:
raise OSError
def make_dict():
"""Makes Perl-style dictionaries"""
return defaultdict(make_dict)
class CustomLogs(logging.StreamHandler):
"""
Uses the logging module to create custom-coloured logs. The colours correspond to the level
Modified from:
http://uran198.github.io/en/python/2016/07/12/colorful-python-logging.html
https://plumberjack.blogspot.com/2010/12/colorizing-logging-output-in-terminals.html
"""
# Dictionary mapping logging level to colours
level_map = {
logging.DEBUG: '\033[1;92m',
logging.INFO: '\033[1;94m',
logging.ERROR: '\033[1;91m',
logging.WARNING: '\033[1;93m',
logging.CRITICAL: '\033[1;95m'
}
def emit(self, record):
try:
# Format the record
try:
self.stream.write(self.format(record))
# If several variables are passed to the logger, try to flatten everything into a string
except TypeError:
record.msg = '{msg} {args}'.format(msg=record.msg, args=' '.join(record.args))
record.args = list()
self.stream.write(self.format(record))
# Write the formatted record to the stream
self.stream.write(getattr(self, 'terminator', '\n'))
# Flush the output to terminal
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def colorize(self, message, record):
if record.levelno in self.level_map:
# Extract the colour corresponding to the current level
color = self.level_map[record.levelno]
# Add the colour to the message. Reset the formatting with '\x1b[0m'
message = ''.join((color, message, '\x1b[0m'))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
parts = message.split('\n', 1)
# Add the custom formatted date to the message
parts[0] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ' ' + parts[0]
parts[0] = self.colorize(parts[0], record)
# Reconstitute the message from its updated parts
message = '\n'.join(parts)
return message
class SetupLogging(object):
"""
Runs the CustomLogs class
"""
def __init__(self, log_level=logging.INFO, debug=False, filehandle=str(), logfile_level=None):
# Create a logging object
logger = logging.getLogger()
# Set whether debug level messages should be displayed
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Use CustomLogs to modify the handler
# Only add a handler if it hasn't been added by another script in the pipeline
if not logger.handlers:
logger.addHandler(CustomLogs())
if filehandle:
# create a file handler
handler = logging.FileHandler(filename=filehandle,
mode='w')
if logfile_level:
handler.setLevel(logfile_level)
else:
handler.setLevel(log_level)
logger.addHandler(handler)
def printtime(string, start, option=None, output=None):
"""Prints a string with colour options with the elapsed time
# Reset
Color_Off='\033[0m' # Text Reset
# Regular Colors
Black='\033[0;30m' # Black
Red='\033[0;31m' # Red
Green='\033[0;32m' # Green
Yellow='\033[0;33m' # Yellow
Blue='\033[0;34m' # Blue
Purple='\033[0;35m' # Purple
Cyan='\033[0;36m' # Cyan
White='\033[0;37m' # White
# Bold
BBlack='\033[1;30m' # Black
BRed='\033[1;31m' # Red
BGreen='\033[1;32m' # Green
BYellow='\033[1;33m' # Yellow
BBlue='\033[1;34m' # Blue
BPurple='\033[1;35m' # Purple
BCyan='\033[1;36m' # Cyan
BWhite='\033[1;37m' # White
# Underline
UBlack='\033[4;30m' # Black
URed='\033[4;31m' # Red
UGreen='\033[4;32m' # Green
UYellow='\033[4;33m' # Yellow
UBlue='\033[4;34m' # Blue
UPurple='\033[4;35m' # Purple
UCyan='\033[4;36m' # Cyan
UWhite='\033[4;37m' # White
# Background
On_Black='\033[40m' # Black
On_Red='\033[41m' # Red
On_Green='\033[42m' # Green
On_Yellow='\033[43m' # Yellow
On_Blue='\033[44m' # Blue
On_Purple='\033[45m' # Purple
On_Cyan='\033[46m' # Cyan
On_White='\033[47m' # White
# High Intensity
IBlack='\033[0;90m' # Black
IRed='\033[0;91m' # Red
IGreen='\033[0;92m' # Green
IYellow='\033[0;93m' # Yellow
IBlue='\033[0;94m' # Blue
IPurple='\033[0;95m' # Purple
ICyan='\033[0;96m' # Cyan
IWhite='\033[0;97m' # White
# Bold High Intensity
BIBlack='\033[1;90m' # Black
BIRed='\033[1;91m' # Red
BIGreen='\033[1;92m' # Green
BIYellow='\033[1;93m' # Yellow
BIBlue='\033[1;94m' # Blue
BIPurple='\033[1;95m' # Purple
BICyan='\033[1;96m' # Cyan
BIWhite='\033[1;97m' # White
# High Intensity backgrounds
On_IBlack='\033[0;100m' # Black
On_IRed='\033[0;101m' # Red
On_IGreen='\033[0;102m' # Green
On_IYellow='\033[0;103m' # Yellow
On_IBlue='\033[0;104m' # Blue
On_IPurple='\033[0;105m' # Purple
On_ICyan='\033[0;106m' # Cyan
On_IWhite='\033[0;107m' # White
:param string: a string to be printed
:param start: integer of the starting time
:param option: Additional option for the text style
:param output: name and path of the logfile to store the message
"""
# If not option is provided, default to bold high-intensity white
if not option:
# option = '\033[1;97m'
option = '\033[1;94m'
# Add the string formatting option to the message. Reset the format back to normal at the end with \033[0m
print('{} [Elapsed Time: {:.2f} seconds] {} \033[0m'.format(option, time.time() - start, string))
if output:
try:
with open(output, 'a') as log:
log.write('[Elapsed Time: {:.2f} seconds] {}\n'.format(time.time() - start, string))
except FileNotFoundError:
pass
class Dotter(object):
def globalcounter(self):
"""Resets the globalcount to 0"""
self.globalcount = 0
def dotter(self):
"""Prints formatted time to stdout at the start of a line, as well as a "."
whenever the length of the line is equal or lesser than 80 "." long"""
if self.globalcount <= 80:
sys.stdout.write('.')
self.globalcount += 1
else:
sys.stdout.write('\n.')
self.globalcount = 1
def __init__(self):
self.globalcount = 0
# Initialise globalcount
globalcount = 0
def globalcounter():
"""Resets the globalcount to 0"""
global globalcount
globalcount = 0
def dotter():
"""Prints formatted time to stdout at the start of a line, as well as a "."
whenever the length of the line is equal or lesser than 80 "." long"""
# Use a global variable
global globalcount
if globalcount <= 80:
sys.stdout.write('.')
globalcount += 1
else:
sys.stdout.write('\n.')
globalcount = 1
def execute(command, outfile=""):
"""
Allows for dots to be printed to the terminal while waiting for a long system call to run
:param command: the command to be executed
:param outfile: optional string of an output file
from https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
"""
# Initialise count
count = 0
# Initialise the starting time
start = int(time.time())
maxtime = 0
# Removing Shell=True to prevent excess memory use thus shlex split if needed
if type(command) is not list:
command = shlex.split(command)
# Run the commands - direct stdout to PIPE and stderr to stdout
# DO NOT USE subprocess.PIPE if not writing it!
if outfile:
process = Popen(command, stdout=PIPE, stderr=STDOUT)
else:
devnull = open(os.devnull, 'wb')
process = Popen(command, stdout=devnull, stderr=STDOUT)
# Write the initial time
sys.stdout.write('[{:}] '.format(time.strftime('%H:%M:%S')))
# Create the output file - if not provided, then nothing should happen
writeout = open(outfile, "ab+") if outfile else ""
# Poll process for new output until finished
while True:
# If an output file name is provided
if outfile:
# Get stdout into a variable
nextline = process.stdout.readline()
# Print stdout to the file
writeout.write(nextline)
# Break from the loop if the command is finished
if process.poll() is not None:
break
# Adding sleep commands slowed down this method when there was lots of output. Difference between the start time
# of the analysis and the current time. Action on each second passed
currenttime = int(time.time())
if currenttime - start > maxtime:
# Set the max time for each iteration
maxtime = currenttime - start
# Print up to 80 dots on a line, with a one second delay between each dot
if count <= 80:
sys.stdout.write('.')
count += 1
# Once there are 80 dots on a line, start a new line with the the time
else:
sys.stdout.write('\n[{:}] .'.format(time.strftime('%H:%M:%S')))
count = 1
# Close the output file
writeout.close() if outfile else ""
sys.stdout.write('\n')
def filer(filelist, extension='fastq', returndict=False):
"""
Helper script that creates a set of the stain names created by stripping off parts of the filename.
Hopefully handles different naming conventions (e.g. 2015-SEQ-001_S1_L001_R1_001.fastq(.gz),
2015-SEQ-001_R1_001.fastq.gz, 2015-SEQ-001_R1.fastq.gz, 2015-SEQ-001_1.fastq.gz, and 2015-SEQ-001_1.fastq.gz
all become 2015-SEQ-001)
:param filelist: List of files to parse
:param extension: the file extension to use. Default value is 'fastq
:param returndict: type BOOL: Option to return a dictionary of file name: fastq files associated with that name
rather than a set of the file names
"""
# Initialise the variables
fileset = set()
filedict = dict()
for seqfile in filelist:
# Search for the conventional motifs present following strain names
# _S\d+_L001_R\d_001.fastq(.gz) is a typical unprocessed Illumina fastq file
if re.search("_S\\d+_L001", seqfile):
file_name = re.split("_S\\d+_L001", seqfile)[0]
# HiSeq names are different: _S\\d+_R\\d_\\d{3}
# 2019-SEQ-0001_S1_R1_001.fastq.gz
elif re.search("_S\\d+_R\\d_\\d{3}", seqfile):
file_name = re.split("_S\\d+_R\\d_\\d{3}", seqfile)[0]
# Files with _R\d_001.fastq(.gz) are created in the SPAdes assembly pipeline
elif re.search("_R\\d_001", seqfile):
file_name = re.split("_R\\d_001", seqfile)[0]
# _R\d.fastq(.gz) represents a simple naming scheme for paired end reads
elif re.search("R\\d.{}".format(extension), seqfile):
file_name = re.split("_R\\d.{}".format(extension), seqfile)[0]
# _\d.fastq is always possible
elif re.search("[-_]\\d.{}".format(extension), seqfile):
file_name = re.split("[-_]\\d.{}".format(extension), seqfile)[0]
# .fastq is the last option
else:
file_name = re.split(".{}".format(extension), seqfile)[0]
# Add the calculated file name to the set
fileset.add(file_name)
# Populate the dictionary with the file name: seq file pair
try:
filedict[file_name].append(seqfile)
except KeyError:
filedict[file_name] = [seqfile]
# Return the appropriate variable
if not returndict:
return fileset
else:
return filedict
# def relativesymlink(src_file, dest_file):
# """
# https://stackoverflow.com/questions/9793631/creating-a-relative-symlink-in-python-without-using-os-chdir
# :param src_file: the file to be linked
# :param dest_file: the path and filename to which the file is to be linked
# """
# # Perform relative symlinking
# try:
# print(os.path.relpath(src_file), os.path.relpath(dest_file))
# os.symlink(
# # Find the relative path for the source file and the destination file
# os.path.relpath(src_file),
# os.path.relpath(dest_file)
# )
# # Except os errors
# except OSError as exception:
# # If the os error is anything but directory exists, then raise
# if exception.errno != errno.EEXIST:
# raise
def relative_symlink(src_file, output_dir, output_name=None, export_output=False):
"""
Create relative symlinks files - use the relative path from the desired output directory to the storage path
e.g. ../../2013-SEQ-0072/simulated/40/50_150/simulated_trimmed/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
is the relative path to the output_dir. The link name is the base name of the source file joined to the desired
output directory e.g. output_dir/2013-SEQ-0072/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
https://stackoverflow.com/questions/9793631/creating-a-relative-symlink-in-python-without-using-os-chdir
:param src_file: Source file to be symbolically linked
:param output_dir: Destination folder for the link
:param output_name: Optionally allow for the link to have a different name
:param export_output: type BOOL: Optionally return the absolute path of the new, linked file
:return output_file: type STR: Absolute path of the newly-created symlink
"""
if output_name:
file_name = output_name
else:
file_name = os.path.basename(src_file)
#
output_file = os.path.join(output_dir, file_name)
try:
os.symlink(
os.path.relpath(
src_file,
output_dir),
output_file
)
# Ignore FileExistsErrors
except FileExistsError:
pass
# Return the absolute path of the symlink if requested
if export_output:
return output_file
class GenObject(object):
"""Object to store static variables"""
def __init__(self, x=None):
start = x if x else {}
super(GenObject, self).__setattr__('datastore', start)
def __getattr__(self, key):
if key in self.datastore:
return self.datastore[key]
else:
raise AttributeError('The GenObject has not been initialised with the following key: {key}'
.format(key=key))
def __setattr__(self, key, value):
try:
self.datastore[key] = value
except TypeError:
raise AttributeError('The GenObject cannot accept the following key:value pair provided {key}:{value}'
.format(key=key,
value=value))
def __delattr__(self, key):
try:
del self.datastore[key]
except KeyError:
raise AttributeError('The GenObject does not contain the following key: {key}'
.format(key=key))
def returnattr(self, key, number=False):
"""
Returns a string of either datastore[key], or 'ND' if datastore[key] doesn't exist formatted for a CSV report
Replace any commas with semicolons.
:param key: Dictionary key to be used to return the value from datastore[key]
:param number: Boolean whether the type of the attribute is a number (int, float, etc). Will return 0
instead of ND
"""
# String to return if the key is not in the datastore
negative_return = 'ND,' if not number else '0,'
try:
if key in self.datastore:
# Return the string of the value with any commas replaced by semicolons. Append a comma to the
# end of the string for the CSV format
return_key = '{},'.format(str(self.datastore[key]).replace(',', ';'))
if not number:
return return_key
else:
if return_key == 'ND,':
return negative_return
else:
return return_key
else:
return negative_return
except AttributeError:
return negative_return
def isattr(self, key):
"""
Checks to see if an attribute exists. If it does, returns True, otherwise returns False
:param key: Dictionary key to be checked for presence in the datastore
:return: True/False depending on whether an attribute exists
"""
try:
if key in self.datastore:
return True
else:
return False
except AttributeError:
return False
def __getitem__(self, key):
"""
Make GenObjects subscriptable in order to allow for nested GenObject
"""
return getattr(self, key)
def __setitem__(self, key, value):
"""
Allow item assignment for GenObjects
"""
try:
self.datastore[key] = value
except TypeError:
raise AttributeError('The GenObject cannot accept the following key:value pair provided {key}:{value}'
.format(key=key,
value=value))
def dump(self):
"""
Prints only the nested dictionary values; removes __methods__ and __members__ attributes
"""
metadata = dict()
for attr in sorted(self.datastore):
# Initialise the attribute (e.g. sample.general) key in the metadata dictionary
metadata[attr] = dict()
# Ignore attributes that begin with '__'
if not attr.startswith('__'):
# If self.datastore[attribute] is a primitive datatype, populate the metadata dictionary with
# the attr: self.datastore[attr] pair
# e.g. attr: name, self.datastore[attr]: 2013-SEQ-0072
if isinstance(self.datastore[attr], str) or \
isinstance(self.datastore[attr], list) or \
isinstance(self.datastore[attr], dict) or \
isinstance(self.datastore[attr], int):
metadata[attr] = self.datastore[attr]
else:
# Otherwise, recursively convert GenObjects to nested dictionaries
metadata.update(self.nested_genobject(metadata, attr, self.datastore))
return metadata
class MetadataObject(object):
"""Object to store static variables"""
def __init__(self):
"""Create datastore attr with empty dict"""
super(MetadataObject, self).__setattr__('datastore', {})
# Initialise a list of keys that will not be printed to the .json file with the dump method
self.unwanted_keys = ['allelenames', 'alleles', 'faidict', 'gaplocations', 'maxcoverage',
'mincoverage', 'profiledata', 'resultsgap', 'averagedepth', 'avgdepth',
'resultssnp', 'sequences', 'sequence', 'snplocations', 'standarddev',
'totaldepth', 'blastlist', 'targetsequence', 'queryranges', 'querypercent',
'queryscore', 'results', 'blastresults', 'report_dict', 'sampledata', 'meta_dict']
def __getattr__(self, key):
""":key is retrieved from datastore if exists, for nested attr recursively :self.__setattr__"""
try:
return self.datastore[key]
except KeyError:
raise AttributeError('The MetadataObject has not been initialised with the following key: {key}'
.format(key=key))
def __setattr__(self, key, value=GenObject(), **args):
"""Add :value to :key in datastore or create GenObject for nested attr"""
if args:
self.datastore[key].value = args
else:
try:
self.datastore[key] = value
except TypeError:
raise AttributeError('The MetadataObject cannot accept the following key:value pair '
'provided {key}:{value}'.format(key=key,
value=value))
def __getitem__(self, key):
try:
return self.datastore[key]
except KeyError:
raise AttributeError('The MetadataObject has not been initialised with the following key: {key}'
.format(key=key))
def dump(self):
"""Prints only the nested dictionary values; removes __methods__ and __members__ attributes"""
metadata = dict()
for attr in sorted(self.datastore):
# Initialise the attribute (e.g. sample.general) key in the metadata dictionary
metadata[attr] = dict()
# Ignore attributes that begin with '__'
if not attr.startswith('__'):
# If self.datastore[attribute] is a primitive datatype, populate the metadata dictionary with
# the attr: self.datastore[attr] pair
# e.g. attr: name, self.datastore[attr]: 2013-SEQ-0072
if isinstance(self.datastore[attr], str) or \
isinstance(self.datastore[attr], list) or \
isinstance(self.datastore[attr], dict) or \
isinstance(self.datastore[attr], int):
metadata[attr] = self.datastore[attr]
else:
# Otherwise, recursively convert GenObjects to nested dictionaries
metadata.update(self.nested_genobject(metadata, attr, self.datastore))
return metadata
def nested_genobject(self, metadata, attr, datastore):
"""
Allow for the printing of nested GenObjects
:param metadata: Nested dictionary containing the metadata. Will be further populated by this method
:param attr: Current attribute being evaluated. Must be a GenObject e.g. sample.general
:param datastore: The dictionary of the current attribute. Will be converted to nested dictionaries
:return: Updated nested metadata dictionary with all GenObjects safely converted to dictionaries
"""
# Iterate through all the key: value pairs of the current datastore[attr] datastore
# e.g. reverse_reads <accessoryFunctions.accessoryFunctions.GenObject object at 0x7fe153b725f8>
for key, value in sorted(datastore[attr].datastore.items()):
# If the type(value) is a GenObject, then JSON serialization will not work
if 'GenObject' in str(type(value)):
# Initialise the nested attribute: key nested dictionary within the metadata dictionary
# e.g. attr: 100_100, key: reverse_reads
metadata[attr][key] = dict()
# Iterate through the nested keys and nested values within the value datastore
# e.g. nested_key: length, nested_value: 100
for nested_key, nested_datastore in sorted(value.datastore.items()):
# Create an additional dictionary layer within the metadata dictionary
metadata[attr][key][nested_key] = dict()
# If the type(nested_datastore) is a GenObject, recursively run this method to update the
# metadata dictionary, supply the newly created nested dictionary: metadata[attr][key] as
# the input metadata dictionary, the nested key as the input attribute, and the datastore of
# value as the input datastore
# e.g. key: 100_100,
# datastore: <accessoryFunctions.accessoryFunctions.GenObject object at 0x7fc526001e80>
if 'GenObject' in str(type(nested_datastore)):
metadata[attr][key].update(
self.nested_genobject(metadata[attr][key], nested_key, value.datastore))
# If the nested datastore is not a GenObject, populate the nested metadata dictionary with
# the attribute, key, nested key, and nested datastore
# e.g. attr: 100_100, key: reverse_reads, nested_key: length, nested_datastore: 100
else:
metadata[attr][key][nested_key] = nested_datastore
# Non-GenObjects can (usually) be added to the metadata dictionary without issues
else:
try:
if key not in self.unwanted_keys:
metadata[attr][key] = value
except AttributeError:
print('dumperror', attr)
# Return the metadata
return metadata
class MakeBlastDB(AbstractCommandline):
"""Base makeblastdb wrapper"""
def __init__(self, cmd='makeblastdb', **kwargs):
assert cmd is not None
extra_parameters = [
# Core:
_Switch(["-h", "h"],
"Print USAGE and DESCRIPTION; ignore other arguments."),
_Switch(["-help", "help"],
"Print USAGE, DESCRIPTION and ARGUMENTS description; "
"ignore other arguments."),
_Switch(["-version", "version"],
"Print version number; ignore other arguments."),
# Output configuration options
_Option(["-out", "out"],
"Output file prefix for db.",
filename=True,
equate=False),
_Option(["-in", "db"],
"The sequence create db with.",
filename=True,
equate=False), # Should this be required?
_Option(["-dbtype", "dbtype"],
"Molecule type of target db (string, 'nucl' or 'prot').",
equate=False)]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
AbstractCommandline.__init__(self, cmd, **kwargs)
def combinetargets(targets, targetpath, mol_type='nt', clear_format=False):
"""
Creates a set of all unique sequences in a list of supplied FASTA files. Properly formats headers and sequences
to be compatible with local pipelines. Splits hybrid entries. Removes illegal characters.
:param targets: fasta gene targets to combine
:param targetpath: folder containing the targets
:param mol_type: type STR: nt or prot sequence. Default is nt
:param clear_format: type BOOL: Remove any NCBI-like formatting, and attempt to use the accession/gi as the
record.id. Default is False
"""
# As part of the automated pipeline, this method can be called without having target files. Ensure that
# there actually are files before proceeding
if targets:
make_path(targetpath)
with open(os.path.join(targetpath, 'combinedtargets.fasta'), 'w') as combined:
idset = set()
for target in targets:
# Remove non-unicode characters present in the FASTA files
cleanedstring = str()
# Read in the file as binary
with open(target, 'rb') as fasta:
# Import all the text
text = fasta.read()
# Convert the binary variable to a string, ignoring non-UTF-8 characters
cleanedstring += text.decode('utf-8', 'ignore')
# Overwrite the file with the clean string
with open(target, 'w') as fasta:
fasta.write(cleanedstring)
# Clean up each record
for record in SeqIO.parse(target, 'fasta'):
# In case FASTA records have been spliced together, allow for the splitting of
# these records
if '>' in record.seq:
# Split the two records apart on '>' symbols
record.seq, hybrid = record.seq.split('>')
# Split the header from the sequence e.g. sspC:6:CP003808.1ATGGAAAGTACATTAGA...
# will be split into sspC:6:CP003808.1 and ATGGAAAGTACATTAGA
hybridid, seq = re.findall('(.+\\d+\\.\\d)(.+)', str(hybrid))[0]
# Replace and dashes in the record.id with underscores
hybridid = hybridid.replace('-', '_')
# Convert the string to a seq object
if mol_type == 'nt':
hybridseq = Seq(seq)
else:
hybridseq = Seq(seq)
# Create a SeqRecord of the sequence - use the sequence object and id
hybridrecord = SeqRecord(hybridseq,
description='',
id=hybridid)
# Remove and dashes or 'N's from the sequence data - makeblastdb can't handle sequences
# with gaps
# noinspection PyProtectedMember
hybridrecord.seq._data = hybridrecord.seq._data.replace('-', '').replace('N', '')
# Write the original record to the file
# Extract the sequence record from each entry in the multifasta
# Replace and dashes in the record.id with underscores
record.id = record.id.replace('-', '_')
# Remove and dashes or 'N's from the sequence data - makeblastdb can't handle sequences
# with gaps
# noinspection PyProtectedMember
record.seq._data = record.seq._data.replace('-', '').replace('N', '')
# Clear the name and description attributes of the record
record.name = ''
record.description = ''
if record.id not in idset:
SeqIO.write(record, combined, 'fasta')
if hybridrecord.id not in idset:
# Write the second record to file
SeqIO.write(hybridrecord, combined, 'fasta')
idset.add(hybridrecord.id)
else:
# Extract the sequence record from each entry in the multifasta
# Replace and dashes in the record.id with underscores
record.id = record.id.replace('-', '_')
# Remove any NCBI formatting, e.g. gi|07PF0776_00001|ref|1234| becomes 07PF0776_00001
if '|' in record.id and clear_format:
record.id = record.id.split('|')[1]
# Remove and dashes or 'N's from the sequence data - makeblastdb can't handle sequences
# with gaps
# noinspection PyProtectedMember
record.seq._data = record.seq._data.replace('-', '').replace('N', '')
# Clear the name and description attributes of the record
record.name = ''
record.description = ''
if record.id not in idset:
SeqIO.write(record, combined, 'fasta')
idset.add(record.id)
class KeyboardInterruptError(Exception):
pass
def strainer(sequencepath):
"""
Locate all the FASTA files in the supplied sequence path. Create basic metadata objects for
each sample
"""
metadata_list = list()
assert os.path.isdir(sequencepath), 'Cannot locate sequence path as specified: {}' \
.format(sequencepath)
# Get the sequences in the sequences folder into a list. Note that they must have a file extension that
# begins with .fa
strains = sorted(glob.glob(os.path.join(sequencepath, '*.fa*')))
# Populate the metadata object. This object will be populated to mirror the objects created in the
# genome assembly pipeline. This way this script will be able to be used as a stand-alone, or as part
# of a pipeline
assert strains, 'Could not find any files with an extension starting with "fa" in {}. Please check ' \
'to ensure that your sequence path is correct'.format(sequencepath)
for sample in strains:
# Create the object
metadata = MetadataObject()
# Set the base file name of the sequence. Just remove the file extension
filename = os.path.splitext(os.path.split(sample)[1])[0]
# Set the .name attribute to be the file name
metadata.name = filename
# Create the .general attribute
metadata.general = GenObject()
metadata.commands = GenObject()
metadata.general.outputdirectory = os.path.join(sequencepath, filename)
# Set the .general.bestassembly file to be the name and path of the sequence file
metadata.general.bestassemblyfile = os.path.join(metadata.general.outputdirectory, '{sn}.fasta'
.format(sn=filename))
make_path(metadata.general.outputdirectory)
# Create a symlink to the directory
relative_symlink(sample,
metadata.general.outputdirectory)
metadata.general.logout = os.path.join(metadata.general.outputdirectory, 'out')
metadata.general.logerr = os.path.join(metadata.general.outputdirectory, 'err')
# Append the metadata for each sample to the list of samples
metadata_list.append(metadata)
return strains, metadata_list
# noinspection PyProtectedMember
def modify_usage_error(subcommand, program_list):
"""
Method to append the help menu to a modified usage error when a subcommand is specified, but options are missing
:param subcommand: subcommand function
:param program_list: list of acceptable sub-programs
"""
import click
from click._compat import get_text_stderr
from click.utils import echo
def show(self, file=None):
import sys
if file is None:
file = get_text_stderr()
color = None
if self.ctx is not None:
color = self.ctx.color
echo('Error: %s\n' % self.format_message(), file=file, color=color)
# Set the sys.argv to be the first two arguments passed to the script if the subcommand was specified
arg2 = sys.argv[1] if sys.argv[1] in program_list else str()
sys.argv = [' '.join([sys.argv[0], arg2])] if arg2 else [sys.argv[0]]
# Call the help
subcommand(['--help'])
click.exceptions.UsageError.show = show
| mit |
firebitsbr/infernal-twin | build/pillow/build/lib.linux-i686-2.7/PIL/SunImagePlugin.py | 26 | 1965 | #
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.3"
from PIL import Image, ImageFile, ImagePalette, _binary
i16 = _binary.i16be
i32 = _binary.i32be
def _accept(prefix):
return len(prefix) >= 4 and i32(prefix) == 0x59a66a95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self):
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59a66a95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
self.mode, rawmode = "RGB", "BGR"
else:
raise SyntaxError("unsupported mode")
compression = i32(s[20:24])
if i32(s[24:28]) != 0:
length = i32(s[28:32])
offset = offset + length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(length))
if self.mode == "L":
self.mode = rawmode = "P"
stride = (((self.size[0] * depth + 7) // 8) + 3) & (~3)
if compression == 1:
self.tile = [("raw", (0, 0)+self.size, offset, (rawmode, stride))]
elif compression == 2:
self.tile = [("sun_rle", (0, 0)+self.size, offset, rawmode)]
#
# registry
Image.register_open("SUN", SunImageFile, _accept)
Image.register_extension("SUN", ".ras")
| gpl-3.0 |
manazhao/tf_recsys | tensorflow/python/training/proximal_gradient_descent_test.py | 102 | 8096 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Proximal Gradient Descent operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import proximal_gradient_descent
class ProximalGradientDescentOptimizerTest(test.TestCase):
def doTestProximalGradientDescentwithoutRegularization(
self, use_resource=False):
with self.test_session() as sess:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
else:
var0 = variables.Variable([0.0, 0.0])
var1 = variables.Variable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps Proximal Gradient Descent.
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-0.9, -1.8]), v0_val)
self.assertAllClose(np.array([-0.09, -0.18]), v1_val)
def testProximalGradientDescentwithoutRegularization(self):
self.doTestProximalGradientDescentwithoutRegularization(use_resource=False)
def testResourceProximalGradientDescentwithoutRegularization(self):
self.doTestProximalGradientDescentwithoutRegularization(use_resource=True)
def testProximalGradientDescentwithoutRegularization2(self):
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 3 steps Proximal Gradient Descent
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([0.1, 0.2]), v0_val)
self.assertAllClose(np.array([3.91, 2.82]), v1_val)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = proximal_gradient_descent.ProximalGradientDescentOptimizer(
1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], var0.eval(), atol=0.01)
def testProximalGradientDescentWithL1_L2(self):
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps Proximal Gradient Descent
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose(np.array([-0.0495, -0.0995]), v0_val)
self.assertAllClose(np.array([-0.0045, -0.0095]), v1_val)
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[1.0], [2.0]])
var1 = variables.Variable([[3.0], [4.0]])
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1]),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.02], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
else:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllClose([[1.0], [2.0]], v0_val)
self.assertAllClose([[3.0], [4.0]], v1_val)
else:
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
def testEquivSparseGradientDescentwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
with self.test_session():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.test_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
joshowen/django-allauth | allauth/socialaccount/providers/weixin/views.py | 6 | 2451 | import requests
from allauth.account import app_settings
from allauth.compat import reverse
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from allauth.utils import build_absolute_uri
from .client import WeixinOAuth2Client
from .provider import WeixinProvider
class WeixinOAuth2Adapter(OAuth2Adapter):
provider_id = WeixinProvider.id
access_token_url = 'https://api.weixin.qq.com/sns/oauth2/access_token'
profile_url = 'https://api.weixin.qq.com/sns/userinfo'
@property
def authorize_url(self):
settings = self.get_provider().get_settings()
url = settings.get(
'AUTHORIZE_URL', 'https://open.weixin.qq.com/connect/qrconnect')
return url
def complete_login(self, request, app, token, **kwargs):
openid = kwargs.get('response', {}).get('openid')
resp = requests.get(self.profile_url,
params={'access_token': token.token,
'openid': openid})
extra_data = resp.json()
nickname = extra_data.get('nickname')
if nickname:
extra_data['nickname'] = nickname.encode(
'raw_unicode_escape').decode('utf-8')
return self.get_provider().sociallogin_from_response(request,
extra_data)
class WeixinOAuth2ClientMixin(object):
def get_client(self, request, app):
callback_url = reverse(self.adapter.provider_id + "_callback")
protocol = (
self.adapter.redirect_uri_protocol or
app_settings.DEFAULT_HTTP_PROTOCOL)
callback_url = build_absolute_uri(
request, callback_url,
protocol=protocol)
provider = self.adapter.get_provider()
scope = provider.get_scope(request)
client = WeixinOAuth2Client(
self.request, app.client_id, app.secret,
self.adapter.access_token_method,
self.adapter.access_token_url,
callback_url,
scope)
return client
class WeixinOAuth2LoginView(WeixinOAuth2ClientMixin, OAuth2LoginView):
pass
class WeixinOAuth2CallbackView(WeixinOAuth2ClientMixin, OAuth2CallbackView):
pass
oauth2_login = WeixinOAuth2LoginView.adapter_view(WeixinOAuth2Adapter)
oauth2_callback = WeixinOAuth2CallbackView.adapter_view(WeixinOAuth2Adapter)
| mit |
biospi/seamass-windeps | src/boost_1_57_0/libs/python/test/vector_indexing_suite.py | 46 | 9555 | # Copyright Joel de Guzman 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
#####################################################################
# Check an object that we will use as container element
#####################################################################
>>> from vector_indexing_suite_ext import *
>>> x = X('hi')
>>> x
hi
>>> x.reset() # a member function that modifies X
>>> x
reset
>>> x.foo() # another member function that modifies X
>>> x
foo
# test that a string is implicitly convertible
# to an X
>>> x_value('bochi bochi')
'gotya bochi bochi'
#####################################################################
# Iteration
#####################################################################
>>> def print_xvec(xvec):
... s = '[ '
... for x in xvec:
... s += repr(x)
... s += ' '
... s += ']'
... print s
#####################################################################
# Replace all the contents using slice syntax
#####################################################################
>>> v = XVec()
>>> v[:] = [X('a'),X('b'),X('c'),X('d'),X('e')]
>>> print_xvec(v)
[ a b c d e ]
#####################################################################
# Indexing
#####################################################################
>>> len(v)
5
>>> v[0]
a
>>> v[1]
b
>>> v[2]
c
>>> v[3]
d
>>> v[4]
e
>>> v[-1]
e
>>> v[-2]
d
>>> v[-3]
c
>>> v[-4]
b
>>> v[-5]
a
#####################################################################
# Deleting an element
#####################################################################
>>> del v[0]
>>> v[0] = 'yaba' # must do implicit conversion
>>> print_xvec(v)
[ yaba c d e ]
#####################################################################
# Calling a mutating function of a container element
#####################################################################
>>> v[3].reset()
>>> v[3]
reset
#####################################################################
# Copying a container element
#####################################################################
>>> x = X(v[3])
>>> x
reset
>>> x.foo()
>>> x
foo
>>> v[3] # should not be changed to 'foo'
reset
#####################################################################
# Referencing a container element
#####################################################################
>>> x = v[3]
>>> x
reset
>>> x.foo()
>>> x
foo
>>> v[3] # should be changed to 'foo'
foo
#####################################################################
# Slice
#####################################################################
>>> sl = v[0:2]
>>> print_xvec(sl)
[ yaba c ]
>>> sl[0].reset()
>>> sl[0]
reset
#####################################################################
# Reset the container again
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # perform implicit conversion to X
>>> print_xvec(v)
[ a b c d e ]
#####################################################################
# Slice: replace [1:3] with an element
#####################################################################
>>> v[1:3] = X('z')
>>> print_xvec(v)
[ a z d e ]
#####################################################################
# Slice: replace [0:2] with a list
#####################################################################
>>> v[0:2] = ['1','2','3','4'] # perform implicit conversion to X
>>> print_xvec(v)
[ 1 2 3 4 d e ]
#####################################################################
# Slice: delete [3:4]
#####################################################################
>>> del v[3:4]
>>> print_xvec(v)
[ 1 2 3 d e ]
#####################################################################
# Slice: set [3:] to a list
#####################################################################
>>> v[3:] = [X('trailing'), X('stuff')] # a list
>>> print_xvec(v)
[ 1 2 3 trailing stuff ]
#####################################################################
# Slice: delete [:3]
#####################################################################
>>> del v[:3]
>>> print_xvec(v)
[ trailing stuff ]
#####################################################################
# Slice: insert a tuple to [0:0]
#####################################################################
>>> v[0:0] = ('leading','stuff') # can also be a tuple
>>> print_xvec(v)
[ leading stuff trailing stuff ]
#####################################################################
# Reset the container again
#####################################################################
>>> v[:] = ['a','b','c','d','e']
#####################################################################
# Some references to the container elements
#####################################################################
>>> z0 = v[0]
>>> z1 = v[1]
>>> z2 = v[2]
>>> z3 = v[3]
>>> z4 = v[4]
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy
c
>>> z3 # proxy
d
>>> z4 # proxy
e
#####################################################################
# Delete a container element
#####################################################################
>>> del v[2]
>>> print_xvec(v)
[ a b d e ]
#####################################################################
# Show that the references are still valid
#####################################################################
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy detached
c
>>> z3 # proxy index adjusted
d
>>> z4 # proxy index adjusted
e
#####################################################################
# Delete all container elements
#####################################################################
>>> del v[:]
>>> print_xvec(v)
[ ]
#####################################################################
# Show that the references are still valid
#####################################################################
>>> z0 # proxy detached
a
>>> z1 # proxy detached
b
>>> z2 # proxy detached
c
>>> z3 # proxy detached
d
>>> z4 # proxy detached
e
#####################################################################
# Reset the container again
#####################################################################
>>> v[:] = ['a','b','c','d','e']
#####################################################################
# renew the references to the container elements
#####################################################################
>>> z0 = v[0]
>>> z1 = v[1]
>>> z2 = v[2]
>>> z3 = v[3]
>>> z4 = v[4]
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy
c
>>> z3 # proxy
d
>>> z4 # proxy
e
#####################################################################
# Set [2:4] to a list such that there will be more elements
#####################################################################
>>> v[2:4] = ['x','y','v']
>>> print_xvec(v)
[ a b x y v e ]
#####################################################################
# Show that the references are still valid
#####################################################################
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy detached
c
>>> z3 # proxy detached
d
>>> z4 # proxy index adjusted
e
#####################################################################
# Contains
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> assert 'a' in v
>>> assert 'b' in v
>>> assert 'c' in v
>>> assert 'd' in v
>>> assert 'e' in v
>>> assert not 'X' in v
>>> assert not 12345 in v
#####################################################################
# Show that iteration allows mutable access to the elements
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> for x in v:
... x.reset()
>>> print_xvec(v)
[ reset reset reset reset reset ]
#####################################################################
# append
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> v.append('f')
>>> print_xvec(v)
[ a b c d e f ]
#####################################################################
# extend
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> v.extend(['f','g','h','i','j'])
>>> print_xvec(v)
[ a b c d e f g h i j ]
#####################################################################
# extend using a generator expression
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> def generator():
... addlist = ['f','g','h','i','j']
... for i in addlist:
... if i != 'g':
... yield i
>>> v.extend(generator())
>>> print_xvec(v)
[ a b c d e f h i j ]
#####################################################################
# vector of strings
#####################################################################
>>> sv = StringVec()
>>> sv.append('a')
>>> print sv[0]
a
#####################################################################
# END....
#####################################################################
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print 'running...'
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| apache-2.0 |
memo/tensorflow | third_party/llvm/expand_cmake_vars.py | 168 | 2679 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
| apache-2.0 |
savoirfairelinux/OpenUpgrade | addons/purchase/wizard/purchase_line_invoice.py | 205 | 5419 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class purchase_line_invoice(osv.osv_memory):
""" To create invoice for purchase order line"""
_name = 'purchase.order.line_invoice'
_description = 'Purchase Order Line Make Invoice'
def makeInvoices(self, cr, uid, ids, context=None):
"""
To get Purchase Order line and create Invoice
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun view of Invoice
"""
if context is None:
context={}
record_ids = context.get('active_ids',[])
if record_ids:
res = False
invoices = {}
invoice_obj = self.pool.get('account.invoice')
purchase_obj = self.pool.get('purchase.order')
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
account_jrnl_obj = self.pool.get('account.journal')
def multiple_order_invoice_notes(orders):
notes = ""
for order in orders:
notes += "%s \n" % order.notes
return notes
def make_invoice_by_partner(partner, orders, lines_ids):
"""
create a new invoice for one supplier
@param partner : The object partner
@param orders : The set of orders to add in the invoice
@param lines : The list of line's id
"""
name = orders and orders[0].name or ''
journal_id = account_jrnl_obj.search(cr, uid, [('type', '=', 'purchase')], context=None)
journal_id = journal_id and journal_id[0] or False
a = partner.property_account_payable.id
inv = {
'name': name,
'origin': name,
'type': 'in_invoice',
'journal_id':journal_id,
'reference' : partner.ref,
'account_id': a,
'partner_id': partner.id,
'invoice_line': [(6,0,lines_ids)],
'currency_id' : orders[0].currency_id.id,
'comment': multiple_order_invoice_notes(orders),
'payment_term': orders[0].payment_term_id.id,
'fiscal_position': partner.property_account_position.id
}
inv_id = invoice_obj.create(cr, uid, inv)
for order in orders:
order.write({'invoice_ids': [(4, inv_id)]})
return inv_id
for line in purchase_line_obj.browse(cr, uid, record_ids, context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.partner_id.id in invoices:
invoices[line.partner_id.id] = []
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, line, context=context)
inv_line_data.update({'origin': line.order_id.name})
inv_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
purchase_line_obj.write(cr, uid, [line.id], {'invoiced': True, 'invoice_lines': [(4, inv_id)]})
invoices[line.partner_id.id].append((line,inv_id))
res = []
for result in invoices.values():
il = map(lambda x: x[1], result)
orders = list(set(map(lambda x : x[0].order_id, result)))
res.append(make_invoice_by_partner(orders[0].partner_id, orders, il))
return {
'domain': "[('id','in', ["+','.join(map(str,res))+"])]",
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
reminisce/mxnet | tools/coreml/converter/utils.py | 46 | 3583 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
def load_model(model_name, epoch_num, data_shapes, label_shapes, label_names, gpus=''):
"""Returns a module loaded with the provided model.
Parameters
----------
model_name: str
Prefix of the MXNet model name as stored on the local directory.
epoch_num : int
Epoch number of model we would like to load.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, epoch_num)
mod = create_module(sym, data_shapes, label_shapes, label_names, gpus)
mod.set_params(
arg_params=arg_params,
aux_params=aux_params,
allow_missing=True
)
return mod
def create_module(sym, data_shapes, label_shapes, label_names, gpus=''):
"""Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
if gpus == '':
devices = mx.cpu()
else:
devices = [mx.gpu(int(i)) for i in gpus.split(',')]
data_names = [data_shape[0] for data_shape in data_shapes]
mod = mx.mod.Module(
symbol=sym,
data_names=data_names,
context=devices,
label_names=label_names
)
mod.bind(
for_training=False,
data_shapes=data_shapes,
label_shapes=label_shapes
)
return mod
| apache-2.0 |
mulllhausen/btc-inquisitor | mysql_grunt.py | 1 | 1676 | """module containing some general mysql-related functions"""
import MySQLdb, re
import config_grunt
import email_grunt
import filesystem_grunt
def connect():
"connect and do setup"
global cursor, mysql_db
mysql_params = config_grunt.config_dict["mysql"]
mysql_connection_params = {
"host": mysql_params["host"],
"db": mysql_params["db"],
"user": mysql_params["user"],
"passwd": mysql_params["passwd"]
}
try:
mysql_db = MySQLdb.connect(**mysql_connection_params)
except:
# don't email the exception message in case it contains the password
msg = "failed to connect to mysql database"
email_grunt.send(msg)
filesystem_grunt.update_errorlog(msg)
print "\n%s\n" % msg
raise
mysql_db.autocommit(True)
cursor = mysql_db.cursor(MySQLdb.cursors.DictCursor)
# connect when this module is imported
connect()
def disconnect():
mysql_db.close()
def quick_fetch(cmd, parameters = None):
"function for select statements where the result is required"
global cursor
cursor.execute(cmd, parameters)
return cursor.fetchall()
def execute(cmd, do_clean_query):
"""
function for any mysql statements where there may be no result, or we don't
care about the result. eg: updates, inserts, selects for a rowcount only.
"""
global cursor
if do_clean_query:
cmd = clean_query(cmd)
cursor.execute(cmd)
def clean_query(cmd):
"only use this function if the data contains no whitespace to preserve"
return re.sub("\s+", " ", cmd).strip()
#return cmd.replace("\n", " ").replace("\t", "").strip() # quicker?
| gpl-2.0 |
mosesfistos1/beetbox | beets/util/hidden.py | 20 | 2824 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple library to work out if a file is hidden on different platforms."""
from __future__ import division, absolute_import, print_function
import os
import stat
import ctypes
import sys
import beets.util
def _is_hidden_osx(path):
"""Return whether or not a file is hidden on OS X.
This uses os.lstat to work out if a file has the "hidden" flag.
"""
file_stat = os.lstat(beets.util.syspath(path))
if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'):
return bool(file_stat.st_flags & stat.UF_HIDDEN)
else:
return False
def _is_hidden_win(path):
"""Return whether or not a file is hidden on Windows.
This uses GetFileAttributes to work out if a file has the "hidden" flag
(FILE_ATTRIBUTE_HIDDEN).
"""
# FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation.
hidden_mask = 2
# Retrieve the attributes for the file.
attrs = ctypes.windll.kernel32.GetFileAttributesW(beets.util.syspath(path))
# Ensure we have valid attribues and compare them against the mask.
return attrs >= 0 and attrs & hidden_mask
def _is_hidden_dot(path):
"""Return whether or not a file starts with a dot.
Files starting with a dot are seen as "hidden" files on Unix-based OSes.
"""
return os.path.basename(path).startswith(b'.')
def is_hidden(path):
"""Return whether or not a file is hidden. `path` should be a
bytestring filename.
This method works differently depending on the platform it is called on.
On OS X, it uses both the result of `is_hidden_osx` and `is_hidden_dot` to
work out if a file is hidden.
On Windows, it uses the result of `is_hidden_win` to work out if a file is
hidden.
On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to
work out if a file is hidden.
"""
# Run platform specific functions depending on the platform
if sys.platform == 'darwin':
return _is_hidden_osx(path) or _is_hidden_dot(path)
elif sys.platform == 'win32':
return _is_hidden_win(path)
else:
return _is_hidden_dot(path)
__all__ = ['is_hidden']
| mit |
SHOAHDAN/-tg-station | bot/Timeconverter.py | 67 | 7310 | #Sources:
# http://wwp.greenwichmeantime.com/time-zone/usa/eastern-time/convert/
# http://www.timeanddate.com/library/abbreviations/timezones/na/
# Times are GMT +- x
# For eq.
# EST = -5
# GMT = 0
# UTC = 0
#Times are in hours,
#2.5 = 2 and half hours
global times
times = {"ADT":-3,"HAA":-3, #Synonyms on the same line
"AKDT":-8,"HAY":-8,
"AKST":-9,"HNY":-9,
"AST":-4,"HNA":-4,
"CDT":-5,"HAC":-5,
"CST":-6,"HNC":-6,
"EDT":-4,"HAE":-4,
"EGST":0,
"EGT":-1,
"EST":-5,"HNE":-5,"ET":-5,
"HADT":-9,
"HAST":-10,
"MDT":-6,"HAR":-6,
"MST":-7,"HNR":-7,
"NDT":-2.5,"HAT":-2.5,
"NST":-3.5,"HNT":-3.5,
"PDT":-7,"HAP":-7,
"PMDT":-2,
"PMST":-3,
"PST":-8,"HNP":-8,"PT":-8,
"WGST":-2,
"WGT":-3,
"GMT":0,
"UTC":0}
def converter(zones,time):
#Zones should be a list containing
# ( From zone
# To zone )
global times
#from_z = for example UTC+00:00, WGT or GMT-05:30
#to_z = same style as above.
from_z,to_z = zones
from_z = from_z.upper()
to_z = to_z.upper()
if from_z.find("+") != -1:
from_zone_offset = from_z[from_z.find("+"):]
if ":" in from_zone_offset:
try:
from_zone_offset1,from_zone_offset2 = from_zone_offset.split(":")
except ValueError:
return "Too many or too small amount of values"
try:
from_zone_offset = int(from_zone_offset1) + int(from_zone_offset2)/60.0
except:
return "Error, the 'From Zone' variable has an incorrect offset number"
else:
try:
from_zone_offset = float(from_zone_offset)
except:
return "Error, the 'From Zone' variable has an incorrect offset number"
try:
from_zone_realtime = from_zone_offset + times[from_z[:from_z.find("+")]]
except KeyError:
return "Incorrect From zone"
elif "-" in from_z:
from_zone_offset = from_z[from_z.find("-"):]
if ":" in from_zone_offset:
from_zone_offset1,from_zone_offset2 = from_zone_offset.split(":")
try:
from_zone_offset = -int(from_zone_offset1) + int(from_zone_offset2)/60.0
except:
return "Error, the 'From Zone' variable has an incorrect offset number"
else:
try:
from_zone_offset = -float(from_zone_offset)
except:
return "Error, the 'From Zone' variable has an incorrect offset number"
from_zone_realtime = times[from_z[:from_z.find("-")]] - from_zone_offset
pass
else:
from_zone_offset = 0
try:
from_zone_realtime = from_zone_offset + times[from_z]
except KeyError:
return "Incorrect From zone"
if to_z.find("+") != -1:
to_zone_offset = to_z[to_z.find("+"):]
if ":" in to_zone_offset:
try:
to_zone_offset1,to_zone_offset2 = to_zone_offset.split(":")
except ValueError:
return "Too many or too small amount of values"
try:
to_zone_offset = int(to_zone_offset1) + int(to_zone_offset2)/60.0
except:
return "Error, the 'To Zone' variable has an incorrect offset number"
else:
try:
to_zone_offset = float(to_zone_offset)
except:
return "Error, the 'To Zone' variable has an incorrect offset number"
try:
to_zone_realtime = to_zone_offset + times[to_z[:to_z.find("+")]]
except KeyError:
return "The zone you want the time to be changed to is not found"
elif "-" in to_z:
to_zone_offset = to_z[to_z.find("-"):]
if ":" in to_zone_offset:
to_zone_offset1,to_zone_offset2 = to_zone_offset.split(":")
try:
to_zone_offset = -int(to_zone_offset1) + int(to_zone_offset2)/60.0
except:
return "Error, the 'To Zone' variable has an incorrect offset number"
else:
try:
to_zone_offset = -float(to_zone_offset)
except:
return "Error, the 'To Zone' variable has an incorrect offset number"
to_zone_realtime = times[to_z[:to_z.find("-")]] -to_zone_offset
pass
else:
to_zone_offset = 0
try:
to_zone_realtime = to_zone_offset + times[to_z]
except KeyError:
return "Incorrect To zone"
try:
time_hour,time_minute = time.split(":")
time_hour,time_minute = int(time_hour),int(time_minute)
string = ":"
except:
try:
time_hour,time_minute = time.split(".")
time_hour,time_minute = int(time_hour),int(time_minute)
string = "."
except ValueError:
return "The time was input in an odd way"
if to_zone_realtime % 1.0 == 0.0 and from_zone_realtime % 1.0 == 0.0:
time_hour = time_hour + (to_zone_realtime - from_zone_realtime)
return str(int(time_hour))+string+str(int(time_minute))
else:
if to_zone_realtime % 1.0 != 0.0 and from_zone_realtime % 1.0 != 0.0:
time_minute = time_minute + (((to_zone_realtime % 1.0) * 60) - ((from_zone_realtime % 1.0) * 60))
elif to_zone_realtime % 1.0 != 0.0 and from_zone_realtime % 1.0 == 0.0:
time_minute = time_minute + (((to_zone_realtime % 1.0) * 60) - 0)
elif to_zone_realtime % 1.0 == 0.0 and from_zone_realtime % 1.0 != 0.0:
time_minute = time_minute + (0 - ((from_zone_realtime % 1.0) * 60))
else:
print "Wut?"
time_hour = time_hour + (int(to_zone_realtime//1) - int(from_zone_realtime//1))
return str(int(time_hour))+string+str(int(time_minute))
def formatter(time):
if "." in time:
string = "."
elif ":" in time:
string = ":"
else:
return time
hours,minutes = time.split(string)
days = 0
if int(minutes) < 0:
buphours = int(hours)
hours,minutes = divmod(int(minutes),60)
hours += buphours
if int(minutes) > 60:
hours,minutes = divmod(int(minutes),60)
hours += int(hours)
if int(hours) < 0:
days = 0
days,hours = divmod(int(hours),24)
if int(hours) > 24:
days = 0
days,hours = divmod(int(hours),24)
if int(hours) == 24 and int(minutes) > 0:
days += 1
hours = int(hours) - 24
hours = str(hours)
minutes = str(minutes)
if len(minutes) == 1:
minutes = "0"+minutes
if len(hours) == 1:
hours = "0"+hours
if days > 0:
if days == 1:
return hours+string+minutes+" (Tomorrow)"
else:
return hours+string+minutes+" (After "+str(days)+" days)"
elif days < 0:
if days == -1:
return hours+string+minutes+" (Yesterday)"
else:
return hours+string+minutes+" ("+str(abs(days))+" days ago)"
return hours+string+minutes
| agpl-3.0 |
Y3K/django | django/contrib/admin/utils.py | 49 | 16855 | from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
lookup_fields = lookup_path.split('__')
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\n\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
Reverse relations should also be excluded as these aren't attributes of the
model (rather something like `foo_set`).
"""
field = opts.get_field(name)
if (field.is_relation and
# Generic foreign keys OR reverse relations
((field.many_to_one and not field.related_model) or field.one_to_many)):
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field, empty_value_display):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if field.flatchoices:
return dict(field.flatchoices).get(value, empty_value_display)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, empty_value_display, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if boolean:
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.remote_field.model
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
| bsd-3-clause |
pombredanne/catawampus | tr/vendor/tornado/tornado/platform/interface.py | 15 | 2092 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interfaces for platform-specific functionality.
This module exists primarily for documentation purposes and as base classes
for other tornado.platform modules. Most code should import the appropriate
implementation from `tornado.platform.auto`.
"""
from __future__ import absolute_import, division, with_statement
def set_close_exec(fd):
"""Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
raise NotImplementedError()
class Waker(object):
"""A socket-like object that can wake another thread from ``select()``.
The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
thread wants to wake up the loop, it calls `wake`. Once it has woken
up, it will call `consume` to do any necessary per-wake cleanup. When
the ``IOLoop`` is closed, it closes its waker too.
"""
def fileno(self):
"""Returns a file descriptor for this waker.
Must be suitable for use with ``select()`` or equivalent on the
local platform.
"""
raise NotImplementedError()
def wake(self):
"""Triggers activity on the waker's file descriptor."""
raise NotImplementedError()
def consume(self):
"""Called after the listen has woken up to do any necessary cleanup."""
raise NotImplementedError()
def close(self):
"""Closes the waker's file descriptor(s)."""
raise NotImplementedError()
| apache-2.0 |
breznak/nupic.biodat | ECG/MIT-BIH-Arrhythmia/model/swarm/permutations.py | 1 | 3870 | ECG_MIN = 850
ECG_MAX = 1311
NCOLS = 2048
NCELLS = 4
HZ=360
AHEAD=1
DATA_FILE=u'file://./inputdata.csv'
ITERATIONS=15000 # or -1 for whole dataset #override for swarming
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'ecg'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'ecgScalar': PermuteEncoder(fieldName='ecg', encoderClass='ScalarEncoder', resolution=PermuteChoices([0.2]), w=51, minval=ECG_MIN, maxval=ECG_MAX),
'ecgDelta': PermuteEncoder(fieldName='ecg', encoderClass='DeltaEncoder', n=PermuteChoices([512, 1024, 2048]), w=51, minval=ECG_MIN, maxval=ECG_MAX),
},
},
'spParams': {
'columnCount': PermuteChoices([256, 512, 1024, 2048]),
},
'tpParams': {
'cellsPerColumn': PermuteChoices([2, 4, 8, 16]),
'pamLength': 5*HZ, #PermuteChoices([5, 10, 50, 100, 1*HZ, 5*HZ, 10*HZ, 30*HZ]),
},
# 'anomalyParams': {
# 'mode': PermuteChoices(["pure", "likelihood", "weighted"]),
# 'slidingWindowSize': PermuteInt(0, 20),
# 'binaryAnomalyThreshold': PermuteFloat(0.0, 0.9),
# },
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*ecg.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'prediction:aae:window=1000:field=consumption')
minimize = "multiStepBestPredictions:multiStep:errorMetric='altMAPE':steps=1:window=1800:field=ecg" #1800=5*HZ
#minimize = "prediction:anomaly:desiredPct=0.1:errorMetric='altMAPE':modelName='hotgymAnomalySwarmingDemo':steps=1:window=100:field=consumption"
#############################################################################
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
print perm
perm['modelParams']['tpParams']['columnCount']=perm['modelParams']['spParams']['columnCount']
return True
| gpl-2.0 |
FireBladeNooT/Medusa_1_6 | lib/tornado/test/util.py | 17 | 3023 | from __future__ import absolute_import, division, print_function, with_statement
import os
import platform
import socket
import sys
import textwrap
from tornado.testing import bind_unused_port
# Encapsulate the choice of unittest or unittest2 here.
# To be used as 'from tornado.test.util import unittest'.
if sys.version_info < (2, 7):
# In py26, we must always use unittest2.
import unittest2 as unittest
else:
# Otherwise, use whichever version of unittest was imported in
# tornado.testing.
from tornado.testing import unittest
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
"non-unix platform")
# travis-ci.org runs our tests in an overworked virtual machine, which makes
# timing-related tests unreliable.
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
'timing tests unreliable on travis')
skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
'not available on Google App Engine')
# Set the environment variable NO_NETWORK=1 to disable any tests that
# depend on an external network.
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
skipIfNoIPv6 = unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
def refusing_port():
"""Returns a local port number that will refuse all connections.
Return value is (cleanup_func, port); the cleanup function
must be called to free the port to be reused.
"""
# On travis-ci, port numbers are reassigned frequently. To avoid
# collisions with other tests, we use an open client-side socket's
# ephemeral port number to ensure that nothing can listen on that
# port.
server_socket, port = bind_unused_port()
server_socket.setblocking(1)
client_socket = socket.socket()
client_socket.connect(("127.0.0.1", port))
conn, client_addr = server_socket.accept()
conn.close()
server_socket.close()
return (client_socket.close, client_addr[1])
def exec_test(caller_globals, caller_locals, s):
"""Execute ``s`` in a given context and return the result namespace.
Used to define functions for tests in particular python
versions that would be syntax errors in older versions.
"""
# Flatten the real global and local namespace into our fake
# globals: it's all global from the perspective of code defined
# in s.
global_namespace = dict(caller_globals, **caller_locals)
local_namespace = {}
exec(textwrap.dedent(s), global_namespace, local_namespace)
return local_namespace
| gpl-3.0 |
woogers/volatility | volatility/plugins/linux/pkt_queues.py | 44 | 3171 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import os
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.netstat as linux_netstat
import volatility.plugins.linux.common as linux_common
class linux_pkt_queues(linux_netstat.linux_netstat):
"""Writes per-process packet queues out to disk"""
def __init__(self, config, *args, **kwargs):
linux_netstat.linux_netstat.__init__(self, config, *args, **kwargs)
self._config.add_option('DUMP-DIR', short_option = 'D', default = None, help = 'output directory for recovered packets', action = 'store', type = 'str')
def process_queue(self, name, pid, fd_num, queue):
if queue.qlen == 0:
return
wrote = 0
fname = "{0:s}.{1:d}.{2:d}".format(name, pid, fd_num)
fd = None
sk_buff = queue.m("next")
while sk_buff and sk_buff != queue.v():
pkt_len = sk_buff.len
if pkt_len > 0 and pkt_len != 0xffffffff:
# only open once we have a packet with data
# otherwise we get 0 sized files
if fd == None:
fd = open(os.path.join(self.edir, fname), "wb")
start = sk_buff.data
data = self.addr_space.zread(start, pkt_len)
fd.write(data)
wrote = wrote + pkt_len
sk_buff = sk_buff.next
if wrote:
yield "Wrote {0:d} bytes to {1:s}".format(wrote, fname)
if fd:
fd.close()
def calculate(self):
linux_common.set_plugin_members(self)
self.edir = self._config.DUMP_DIR
if not self.edir:
debug.error("No output directory given.")
if not os.path.isdir(self.edir):
debug.error(self.edir + " is not a directory")
for (task, fd_num, inet_sock) in linux_netstat.linux_netstat(self._config).calculate():
sk = inet_sock.sk
for msg in self.process_queue("receive", task.pid, fd_num, sk.sk_receive_queue):
yield msg
for msg in self.process_queue("write", task.pid, fd_num, sk.sk_write_queue):
yield msg
def render_text(self, outfd, data):
for msg in data:
outfd.write(msg + "\n")
| gpl-2.0 |
NoahFlowa/glowing-spoon | venv/lib/python2.7/site-packages/pip/_vendor/packaging/version.py | 1151 | 11556 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| apache-2.0 |
lofar-astron/factor | factor/scripts/combine_skymodels.py | 2 | 1661 | #! /usr/bin/env python
"""
Script to combine two makesourcedb sky models
"""
import argparse
from argparse import RawTextHelpFormatter
import lsmtool
import sys
import os
def main(model1, model2, skymodel):
"""
Combines makesourcedb sky models
Parameters
----------
model1 : str
Filename of the input makesourcedb sky model 1
model2 : str
Filename of the input makesourcedb sky model 2
skymodel : str
Filename of the output makesourcedb sky model
"""
try:
s1 = lsmtool.load(model1)
except:
# If first sky model is empty or cannot be loaded, just copy second one
# to output file
os.system('cp -f {0} {1}'.format(model2, skymodel))
return
# Now try to load second sky model and combine with first one
try:
s2 = lsmtool.load(model2)
# Combine sky models, keeping all sources
s1.ungroup()
s2.ungroup()
s1.concatenate(s2, keep='all')
except:
# If second sky model is empty or cannot be loaded, just save s1 to output
pass
s1.write(skymodel, clobber=True)
if __name__ == '__main__':
descriptiontext = "Combine two makesourcedb sky models.\n"
parser = argparse.ArgumentParser(description=descriptiontext, formatter_class=RawTextHelpFormatter)
parser.add_argument('model1', help='name of input makesourcedb sky model 1')
parser.add_argument('model2', help='name of input makesourcedb sky model 2')
parser.add_argument('skymodel', help='name of the output makesourcedb sky model')
args = parser.parse_args()
main(args.model1, args.model2, args.skymodel)
| gpl-2.0 |
google-code-export/pyglet | tests/window/EVENT_KEY.py | 33 | 1087 | #!/usr/bin/env python
'''Test that key press and release events work correctly.
Expected behaviour:
One window will be opened. Type into this window and check the console
output for key press and release events. Check that the correct
key symbol and modifiers are reported.
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import window
from pyglet.window import key
class EVENT_KEYPRESS(unittest.TestCase):
def on_key_press(self, symbol, modifiers):
print 'Pressed %s with modifiers %s' % \
(key.symbol_string(symbol), key.modifiers_string(modifiers))
def on_key_release(self, symbol, modifiers):
print 'Released %s with modifiers %s' % \
(key.symbol_string(symbol), key.modifiers_string(modifiers))
def test_keypress(self):
w = window.Window(200, 200)
w.push_handlers(self)
while not w.has_exit:
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
regul8/pupy | pupy/modules/pyexec.py | 27 | 1118 | # -*- coding: UTF8 -*-
from pupylib.PupyModule import *
import StringIO
import pupylib.utils
__class_name__="PythonExec"
class PythonExec(PupyModule):
""" execute python code on a remote system """
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='pyexec', description=self.__doc__)
group=self.arg_parser.add_mutually_exclusive_group(required=True)
group.add_argument('--file', metavar="<path>", help="execute code from .py file")
group.add_argument('-c','--code', metavar='<code string>', help="execute python oneliner code. ex : 'import platform;print platform.uname()'")
def run(self, args):
code=""
if args.file:
self.info("loading code from %s ..."%args.file)
with open(args.file,'r') as f:
code=f.read()
else:
code=args.code
stdout=StringIO.StringIO()
stderr=StringIO.StringIO()
try:
with pupylib.utils.redirected_stdo(self.client.conn, stdout, stderr):
self.client.conn.execute(code+"\n")
res=stdout.getvalue()
err=stderr.getvalue()
if err.strip():
err="\n"+err
self.rawlog(res+err)
finally:
stdout.close()
stderr.close()
| bsd-3-clause |
sonictk/MARI-Extension-Pack | Scripts/Tools/View/extPack_screenshot_all_channels.py | 1 | 3102 | # ------------------------------------------------------------------------------
# Screenshot All Channels
# ------------------------------------------------------------------------------
# Will do Screenshots of all Channels and export them based on the path provided
# in Screenshot Settings
# ------------------------------------------------------------------------------
# http://mari.ideascale.com
# http://cg-cnu.blogspot.in/
# ------------------------------------------------------------------------------
# Written by Sreenivas Alapati, 2014
# ------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF HE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import mari
def screenshotAllChannels():
'''Take screenshot of all the channels for the current view '''
if mari.projects.current() == None:
mari.utils.message("No project currently open", title = "Error")
return
mari.utils.message("Snapshotting multiple Channels requires Incremental Screenshot Setting to be enabled")
mari.history.startMacro('Snapshot all Channels')
curGeo = mari.geo.current()
curChannel = curGeo.currentChannel()
chanList = curGeo.channelList()
curCanvas = mari.canvases.current()
mari.app.setWaitCursor()
for chan in chanList:
curGeo.setCurrentChannel(chan)
curCanvas.repaint()
snapAction = mari.actions.find ('/Mari/Canvas/Take Screenshot')
snapAction.trigger()
curGeo.setCurrentChannel(curChannel)
curCanvas.repaint()
mari.app.restoreCursor()
mari.history.stopMacro()
return
| bsd-3-clause |
CeltonMcGrath/TACTIC | src/pyasm/prod/web/asset_info_wdg.py | 6 | 9944 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['AssetInfoWdg', 'CondensedAssetInfoWdg', 'ShotInfoWdg', 'CondensedShotInfoWdg', 'SubmissionInfoWdg', 'GeneralInfoWdg']
from pyasm.web import *
from pyasm.widget import BaseTableElementWdg, ExpandableTextWdg, PublishLinkWdg, ThumbWdg
from frame_info_wdg import FrameRangeWdg
class AssetInfoWdg(BaseTableElementWdg):
'''widget to display the code, name and description in one column'''
def init(my):
my.thumb = None
def get_display(my):
my.sobject = my.get_current_sobject()
custom_css = my.get_option('css')
table = Table(css='embed')
if custom_css:
table.add_class(custom_css)
table.add_style("width: 200px")
table.add_col(css='large')
table.add_col()
table.add_color('color','color')
my._add_code(table)
my._add_name(table)
my._add_description(table)
return table
def get_simple_display(my):
sobject = my.get_current_sobject()
code = sobject.get_code()
name = sobject.get_name()
description = sobject.get_value("description")
return "%s, %s, %s" % (code, name, description)
def _add_code(my, table):
table.add_row()
table.add_cell(HtmlElement.i("Code:"))
table.add_cell( "<b>%s</b>" % my.sobject.get_code() )
def _add_name(my, table):
name = my.sobject.get_value("name", no_exception=True)
if not name:
return
table.add_row()
table.add_cell("<i>Name:</i>")
table.add_cell( my.sobject.get_value("name") )
def _add_description(my, table):
table.add_row()
table.add_cell("<i>Description:</i>")
expand = ExpandableTextWdg()
expand.set_id('asset_info_desc')
description = my.sobject.get_value("description", no_exception=True)
expand.set_value( WikiUtil().convert(description) )
expand.set_max_length(240)
table.add_cell( expand )
class CondensedAssetInfoWdg(AssetInfoWdg):
'''widget to display the code, name and description in one column'''
def get_display(my):
my.sobject = my.get_current_sobject()
custom_css = my.get_option('css')
div = DivWdg(css=custom_css)
div.add_color('color','color')
div.add_style('width', '18em')
code_span = SpanWdg('%s <i>%s</i>' \
%(my.sobject.get_code(), my.sobject.get_value('name')))
expand = ExpandableTextWdg()
expand.set_id('asset_info_desc')
description = my.sobject.get_value("description")
expand.set_value( WikiUtil().convert(description) )
desc_span = SpanWdg('Desc: ')
desc_span.add(expand)
div.add(code_span)
div.add(HtmlElement.br())
div.add(desc_span)
return div
class ShotInfoWdg(AssetInfoWdg):
'''widget to display the code, name and description in one column'''
def preprocess(my):
my.thumb = ThumbWdg()
my.thumb.set_icon_size('60')
my.thumb.set_sobjects(my.sobjects)
my.thumb.preprocess()
def get_display(my):
if not my.thumb:
my.preprocess()
my.sobject = my.get_current_sobject()
table = Table(css='embed')
table.add_color('color','color')
table.add_style("width: 300px")
table.add_row()
th = table.add_header("<i>Code: </i> <b style='font-size: 1.2em'>%s</b>" % my.sobject.get_code() )
# add status
th.add_style('text-align','left')
status_span = SpanWdg("", css='large')
th.add(status_span)
status = my.sobject.get_value("status")
if status:
status_span.add(my.sobject.get_value("status"))
table.add_row()
my.thumb.set_current_index(my.get_current_index())
thumb_td = table.add_cell(my.thumb)
row_span = 2
if my.sobject.has_value("priority"):
row_span = 3
# add priority
table.add_cell("<i>Priority: </i>")
priority = my.sobject.get_value("priority")
if not priority:
table.add_cell("None")
else:
table.add_cell(my.sobject.get_value("priority") )
# this row should be added only if priority is added
table.add_row()
thumb_td.set_attr('rowspan', row_span)
# add pipeline
table.add_cell("<i>Pipeline: </i>")
status = my.sobject.get_value("pipeline_code")
if not status:
table.add_cell("None")
else:
table.add_cell(my.sobject.get_value("pipeline_code") )
my._add_frame_range(table)
table.add_row()
td = table.add_cell( "<i>Description: </i>")
description = my.sobject.get_value("description")
expand = ExpandableTextWdg()
expand.set_id('asset_info_desc')
expand.set_value( WikiUtil().convert(description) )
expand.set_max_length(300)
td.add(expand)
main_div = DivWdg(table)
if my.get_option("publish") == "false":
return main_div
#my._add_publish_link(main_div)
return main_div
def get_simple_display(my):
sobject = my.get_current_sobject()
code = sobject.get_code()
description = sobject.get_value("description")
status = sobject.get_value("status")
return "%s, %s, %s" % (code, status, description)
def _add_frame_range(my, table):
frame_wdg = FrameRangeWdg()
frame_wdg.set_sobject(my.sobject)
table.add_row()
table.add_cell("<i>Frame Info:</i>")
table.add_cell( frame_wdg )
def _add_publish_link(my, main_div):
publish_link = PublishLinkWdg(my.sobject.get_search_type(), my.sobject.get_id())
div = DivWdg(publish_link)
div.add_style('padding-top','5px')
main_div.add(div)
# build an iframe to show publish browsing
search_type = my.sobject.get_search_type()
search_id = my.sobject.get_id()
from pyasm.widget import IconButtonWdg, IconWdg
browse_link = IconButtonWdg("Publish Browser", IconWdg.CONTENTS)
iframe = WebContainer.get_iframe()
iframe.set_width(100)
url = WebContainer.get_web().get_widget_url()
url.set_option("widget", "pyasm.prod.web.PublishBrowserWdg")
url.set_option("search_type", search_type)
url.set_option("search_id", search_id)
script = iframe.get_on_script(url.to_string())
browse_link.add_event("onclick", script)
div.add(browse_link)
div.set_style('padding-top: 6px')
class CondensedShotInfoWdg(ShotInfoWdg):
'''widget to display the code, name and description in one column'''
def get_display(my):
my.sobject = my.get_current_sobject()
custom_css = my.get_option('css')
div = DivWdg(css=custom_css)
div.add_color('color','color')
div.add_style('width', '18em')
code_span = SpanWdg('%s' % (my.sobject.get_code()))
expand = ExpandableTextWdg()
expand.set_id('shot_info_desc')
description = my.sobject.get_value("description")
expand.set_value( WikiUtil().convert(description) )
desc_span = SpanWdg()
desc_span.add(expand)
div.add(code_span)
div.add(HtmlElement.br())
div.add(desc_span)
return div
class SubmissionInfoWdg(AssetInfoWdg):
'''widget information about a submission in a condensed manner'''
def preprocess(my):
my.thumb = ThumbWdg()
my.thumb.set_sobjects(my.sobjects)
my.thumb.preprocess()
def get_display(my):
my.sobject = my.get_current_sobject()
table = Table(css='embed')
table.add_style("width: 300px")
table.add_color('color','color')
table.add_row()
td = table.add_cell("<i>Code: </i> <b style='font-size: 1.2em'>%s</b>" % my.sobject.get_code() )
td.add_style("background: #e0e0e0")
table.add_row()
my.thumb.set_current_index(my.get_current_index())
table.add_cell(my.thumb)
table2 = Table(css='embed')
table2.add_row()
table2.add_cell("<i>Status: </i>")
status = my.sobject.get_value("status")
if not status:
table2.add_cell("<i style='color: #c0c0c0'>None</i>")
else:
table2.add_cell(my.sobject.get_value("status") )
my._add_frame_range(table2)
table.add_cell( table2 )
table.add_row()
td = table.add_cell( "<i>Description: </i>")
description = my.sobject.get_value("description")
#td.add(WikiUtil().convert(description))
expand = ExpandableTextWdg()
expand.set_id('asset_info_desc')
expand.set_value( WikiUtil().convert(description) )
expand.set_max_length(300)
td.add(expand)
return table
class GeneralInfoWdg(AssetInfoWdg):
'''widget to display the code, name and description in one column'''
def get_display(my):
my.sobject = my.get_current_sobject()
custom_css = my.get_option('css')
table = Table(css='embed')
table.add_color('color','color')
if custom_css:
table.add_class(custom_css)
table.add_style("width: 200px")
table.add_col(css='large')
table.add_col()
my._add_code(table)
my._add_description(table)
| epl-1.0 |
mx3L/enigma2 | lib/python/Plugins/SystemPlugins/NetworkWizard/NetworkWizard.py | 18 | 13858 | from Screens.Wizard import wizardManager, WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from Screens.MessageBox import MessageBox
from Components.Pixmap import Pixmap, MovingPixmap, MultiPixmap
from Components.Sources.Boolean import Boolean
from Components.Network import iNetwork
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_SKIN_IMAGE
from enigma import eTimer
from os import system
class NetworkWizard(WizardLanguage, Rc):
skin = """
<screen position="0,0" size="720,576" title="Welcome..." flags="wfNoBorder" >
<widget name="text" position="153,40" size="340,300" font="Regular;22" />
<widget source="list" render="Listbox" position="53,340" size="440,180" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<widget name="config" position="53,340" zPosition="1" size="440,180" transparent="1" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/buttons/button_red.png" position="40,225" zPosition="0" size="15,16" transparent="1" alphatest="on" />
<widget name="languagetext" position="55,225" size="95,30" font="Regular;18" />
<widget name="wizard" pixmap="skin_default/wizard.png" position="40,50" zPosition="10" size="110,174" alphatest="on" />
<widget name="rc" pixmaps="skin_default/rc.png,skin_default/rcold.png" position="500,50" zPosition="10" size="154,500" alphatest="on" />
<widget name="arrowdown" pixmap="skin_default/arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowdown2" pixmap="skin_default/arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup" pixmap="skin_default/arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup2" pixmap="skin_default/arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget source="VKeyIcon" render="Pixmap" pixmap="skin_default/buttons/key_text.png" position="40,260" zPosition="0" size="35,25" transparent="1" alphatest="on" >
<convert type="ConditionalShowHide" />
</widget>
<widget name="HelpWindow" pixmap="skin_default/buttons/key_text.png" position="125,170" zPosition="1" size="1,1" transparent="1" alphatest="on" />
</screen>"""
def __init__(self, session, interface = None):
self.xmlfile = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkWizard/networkwizard.xml")
WizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)
Rc.__init__(self)
self.session = session
self["wizard"] = Pixmap()
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self.InstalledInterfaceCount = None
self.Adapterlist = None
self.InterfaceState = None
self.isInterfaceUp = None
self.WlanPluginInstalled = False
self.ap = None
self.w = None
if interface is not None:
self.selectedInterface = interface
else:
self.selectedInterface = None
self.NextStep = None
self.resetRef = None
self.checkRef = None
self.AdapterRef = None
self.APList = None
self.newAPlist = None
self.oldlist = None
self.originalInterfaceState = {}
self.originalInterfaceStateChanged = False
self.Text = None
self.rescanTimer = eTimer()
self.rescanTimer.callback.append(self.rescanTimerFired)
self.getInstalledInterfaceCount()
self.isWlanPluginInstalled()
def exitWizardQuestion(self, ret = False):
if (ret):
self.markDone()
self.close()
def markDone(self):
self.stopScan()
del self.rescanTimer
self.checkOldInterfaceState()
pass
def back(self):
self.stopScan()
self.ap = None
WizardLanguage.back(self)
def stopScan(self):
self.rescanTimer.stop()
if self.w is not None:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
iWlan.stopGetNetworkList()
self.w = None
def getInstalledInterfaceCount(self):
self.originalInterfaceState = {}
self.Adapterlist = iNetwork.getAdapterList()
self.InstalledInterfaceCount = len(self.Adapterlist)
if self.Adapterlist is not None:
if self.InstalledInterfaceCount == 1 and self.selectedInterface is None:
self.selectedInterface = self.Adapterlist[0]
for interface in iNetwork.getAdapterList():
self.originalInterfaceState[interface] = {}
self.originalInterfaceState[interface]["up"] = iNetwork.getAdapterAttribute(interface, 'up')
def selectInterface(self):
self.InterfaceState = None
if self.selectedInterface is None:
if self.InstalledInterfaceCount <= 1:
if not iNetwork.isWirelessInterface(self.selectedInterface):
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
self.checkInterface(self.selectedInterface)
else:
self.NextStep = 'selectinterface'
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
if not iNetwork.isWirelessInterface(self.selectedInterface):
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
self.checkInterface(self.selectedInterface)
def checkOldInterfaceState(self):
# disable up interface if it was originally down and config is unchanged.
if self.originalInterfaceStateChanged is False:
for interface in self.originalInterfaceState.keys():
if interface == self.selectedInterface:
if self.originalInterfaceState[interface]["up"] is False:
if iNetwork.checkforInterface(interface) is True:
system("ifconfig " + interface + " down")
def listInterfaces(self):
self.checkOldInterfaceState()
list = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()]
list.append((_("Exit network wizard"), "end"))
return list
def InterfaceSelectionMade(self, index):
self.selectedInterface = index
self.InterfaceSelect(index)
def InterfaceSelect(self, index):
if index == 'end':
self.NextStep = 'end'
elif index == 'eth0':
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
def InterfaceSelectionMoved(self):
self.InterfaceSelect(self.selection)
def checkInterface(self,iface):
self.stopScan()
if self.Adapterlist is None:
self.Adapterlist = iNetwork.getAdapterList()
if self.NextStep is not 'end':
if len(self.Adapterlist) == 0:
#Reset Network to defaults if network broken
iNetwork.resetNetworkConfig('lan', self.resetNetworkConfigCB)
self.resetRef = self.session.openWithCallback(self.resetNetworkConfigFinished, MessageBox, _("Please wait while we prepare your network interfaces..."), type = MessageBox.TYPE_INFO, enable_input = False)
if iface in iNetwork.getInstalledAdapters():
if iface in iNetwork.configuredNetworkAdapters and len(iNetwork.configuredNetworkAdapters) == 1:
if iNetwork.getAdapterAttribute(iface, 'up') is True:
self.isInterfaceUp = True
else:
self.isInterfaceUp = False
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.isInterfaceUp = iNetwork.checkforInterface(iface)
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.resetNetworkConfigFinished(False)
def resetNetworkConfigFinished(self,data):
if data is True:
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
def resetNetworkConfigCB(self,callback,iface):
if callback is not None:
if callback is True:
iNetwork.getInterfaces(self.getInterfacesFinished)
def getInterfacesFinished(self, data):
if data is True:
if iNetwork.getAdapterAttribute(self.selectedInterface, 'up') is True:
self.isInterfaceUp = True
else:
self.isInterfaceUp = False
self.resetRef.close(True)
else:
print "we should never come here!"
def AdapterSetupEnd(self, iface):
self.originalInterfaceStateChanged = True
if iNetwork.getAdapterAttribute(iface, "dhcp") is True:
iNetwork.checkNetworkState(self.AdapterSetupEndFinished)
self.AdapterRef = self.session.openWithCallback(self.AdapterSetupEndCB, MessageBox, _("Please wait while we test your network..."), type = MessageBox.TYPE_INFO, enable_input = False)
else:
self.currStep = self.getStepWithID("confdns")
self.afterAsyncCode()
def AdapterSetupEndCB(self,data):
if data is True:
if iNetwork.isWirelessInterface(self.selectedInterface):
if self.WlanPluginInstalled == True:
from Plugins.SystemPlugins.WirelessLan.Wlan import iStatus
iStatus.getDataForInterface(self.selectedInterface,self.checkWlanStateCB)
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
def AdapterSetupEndFinished(self,data):
if data <= 2:
self.InterfaceState = True
else:
self.InterfaceState = False
self.AdapterRef.close(True)
def checkWlanStateCB(self,data,status):
if data is not None:
if data is True:
if status is not None:
text1 = _("Your receiver is now ready to be used.\n\nYour internet connection is working.\n\n")
text2 = _('Accesspoint:') + "\t" + str(status[self.selectedInterface]["accesspoint"]) + "\n"
text3 = _('SSID:') + "\t" + str(status[self.selectedInterface]["essid"]) + "\n"
text4 = _('Link quality:') + "\t" + str(status[self.selectedInterface]["quality"])+ "\n"
text5 = _('Signal strength:') + "\t" + str(status[self.selectedInterface]["signal"]) + "\n"
text6 = _('Bitrate:') + "\t" + str(status[self.selectedInterface]["bitrate"]) + "\n"
text7 = _('Encryption:') + " " + str(status[self.selectedInterface]["encryption"]) + "\n"
text8 = _("Please press OK to continue.")
infotext = text1 + text2 + text3 + text4 + text5 + text7 +"\n" + text8
self.currStep = self.getStepWithID("checkWlanstatusend")
self.Text = infotext
if str(status[self.selectedInterface]["accesspoint"]) == "Not-Associated":
self.InterfaceState = False
self.afterAsyncCode()
def checkNetwork(self):
iNetwork.checkNetworkState(self.checkNetworkStateCB)
self.checkRef = self.session.openWithCallback(self.checkNetworkCB, MessageBox, _("Please wait while we test your network..."), type = MessageBox.TYPE_INFO, enable_input = False)
def checkNetworkCB(self,data):
if data is True:
if iNetwork.isWirelessInterface(self.selectedInterface):
if self.WlanPluginInstalled == True:
from Plugins.SystemPlugins.WirelessLan.Wlan import iStatus
iStatus.getDataForInterface(self.selectedInterface,self.checkWlanStateCB)
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
def checkNetworkStateCB(self,data):
if data <= 2:
self.InterfaceState = True
else:
self.InterfaceState = False
self.checkRef.close(True)
def rescanTimerFired(self):
self.rescanTimer.stop()
self.updateAPList()
def updateAPList(self):
self.oldlist = self.APList
self.newAPlist = []
newList = []
newListIndex = None
currentListEntry = None
newList = self.listAccessPoints()
for oldentry in self.oldlist:
if oldentry not in newList:
newList.append(oldentry)
for newentry in newList:
self.newAPlist.append(newentry)
if len(self.newAPlist):
if (self.wizard[self.currStep].has_key("dynamiclist")):
currentListEntry = self["list"].getCurrent()
if currentListEntry is not None:
idx = 0
for entry in self.newAPlist:
if entry == currentListEntry:
newListIndex = idx
idx +=1
self.wizard[self.currStep]["evaluatedlist"] = self.newAPlist
self['list'].setList(self.newAPlist)
if newListIndex is not None:
self["list"].setIndex(newListIndex)
self["list"].updateList(self.newAPlist)
def listAccessPoints(self):
self.APList = []
if self.WlanPluginInstalled is False:
self.APList.append( ( _("No networks found"), None ) )
else:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
iWlan.setInterface(self.selectedInterface)
self.w = iWlan.getInterface()
aps = iWlan.getNetworkList()
if aps is not None:
print "[NetworkWizard.py] got Accespoints!"
tmplist = []
complist = []
for ap in aps:
a = aps[ap]
if a['active']:
tmplist.append( (a['bssid'], a['essid']) )
complist.append( (a['bssid'], a['essid']) )
for entry in tmplist:
if entry[1] == "":
for compentry in complist:
if compentry[0] == entry[0]:
complist.remove(compentry)
for entry in complist:
self.APList.append( (entry[1], entry[1]) )
if not len(aps):
self.APList.append( ( _("No networks found"), None ) )
self.rescanTimer.start(4000)
return self.APList
def AccessPointsSelectionMoved(self):
self.ap = self.selection
self.NextStep = 'wlanconfig'
def checkWlanSelection(self):
self.stopScan()
self.currStep = self.getStepWithID(self.NextStep)
def isWlanPluginInstalled(self):
try:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
except ImportError:
self.WlanPluginInstalled = False
else:
self.WlanPluginInstalled = True
def listChoices(self):
self.stopScan()
list = []
if self.WlanPluginInstalled == True:
list.append((_("Configure your wireless LAN again"), "scanwlan"))
list.append((_("Configure your internal LAN"), "nwconfig"))
list.append((_("Exit network wizard"), "end"))
return list
def ChoicesSelectionMade(self, index):
self.ChoicesSelect(index)
def ChoicesSelect(self, index):
if index == 'end':
self.NextStep = 'end'
elif index == 'nwconfig':
self.selectedInterface = "eth0"
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
def ChoicesSelectionMoved(self):
pass
| gpl-2.0 |
draugiskisprendimai/odoo | addons/website_crm_partner_assign/controllers/main.py | 271 | 7541 | # -*- coding: utf-8 -*-
import werkzeug
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug, unslug
from openerp.tools.translate import _
class WebsiteCrmPartnerAssign(http.Controller):
_references_per_page = 40
@http.route([
'/partners',
'/partners/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>',
'/partners/grade/<model("res.partner.grade"):grade>/page/<int:page>',
'/partners/country/<model("res.country"):country>',
'/partners/country/<model("res.country"):country>/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>/page/<int:page>',
], type='http', auth="public", website=True)
def partners(self, country=None, grade=None, page=0, **post):
country_all = post.pop('country_all', False)
partner_obj = request.registry['res.partner']
country_obj = request.registry['res.country']
search = post.get('search', '')
base_partner_domain = [('is_company', '=', True), ('grade_id.website_published', '=', True), ('website_published', '=', True)]
if search:
base_partner_domain += ['|', ('name', 'ilike', search), ('website_description', 'ilike', search)]
# group by grade
grade_domain = list(base_partner_domain)
if not country and not country_all:
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = country_obj.search(request.cr, request.uid, [('code', '=', country_code)], context=request.context)
if country_ids:
country = country_obj.browse(request.cr, request.uid, country_ids[0], context=request.context)
if country:
grade_domain += [('country_id', '=', country.id)]
grades = partner_obj.read_group(
request.cr, SUPERUSER_ID, grade_domain, ["id", "grade_id"],
groupby="grade_id", orderby="grade_id DESC", context=request.context)
grades_partners = partner_obj.search(
request.cr, SUPERUSER_ID, grade_domain,
context=request.context, count=True)
# flag active grade
for grade_dict in grades:
grade_dict['active'] = grade and grade_dict['grade_id'][0] == grade.id
grades.insert(0, {
'grade_id_count': grades_partners,
'grade_id': (0, _("All Categories")),
'active': bool(grade is None),
})
# group by country
country_domain = list(base_partner_domain)
if grade:
country_domain += [('grade_id', '=', grade.id)]
countries = partner_obj.read_group(
request.cr, SUPERUSER_ID, country_domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_partners = partner_obj.search(
request.cr, SUPERUSER_ID, country_domain,
context=request.context, count=True)
# flag active country
for country_dict in countries:
country_dict['active'] = country and country_dict['country_id'] and country_dict['country_id'][0] == country.id
countries.insert(0, {
'country_id_count': countries_partners,
'country_id': (0, _("All Countries")),
'active': bool(country is None),
})
# current search
if grade:
base_partner_domain += [('grade_id', '=', grade.id)]
if country:
base_partner_domain += [('country_id', '=', country.id)]
# format pager
if grade and not country:
url = '/partners/grade/' + slug(grade)
elif country and not grade:
url = '/partners/country/' + slug(country)
elif country and grade:
url = '/partners/grade/' + slug(grade) + '/country/' + slug(country)
else:
url = '/partners'
url_args = {}
if search:
url_args['search'] = search
if country_all:
url_args['country_all'] = True
partner_count = partner_obj.search_count(
request.cr, SUPERUSER_ID, base_partner_domain,
context=request.context)
pager = request.website.pager(
url=url, total=partner_count, page=page, step=self._references_per_page, scope=7,
url_args=url_args)
# search partners matching current search parameters
partner_ids = partner_obj.search(
request.cr, SUPERUSER_ID, base_partner_domain,
order="grade_id DESC",
context=request.context) # todo in trunk: order="grade_id DESC, implemented_count DESC", offset=pager['offset'], limit=self._references_per_page
partners = partner_obj.browse(request.cr, SUPERUSER_ID, partner_ids, request.context)
# remove me in trunk
partners = sorted(partners, key=lambda x: (x.grade_id.sequence if x.grade_id else 0, len([i for i in x.implemented_partner_ids if i.website_published])), reverse=True)
partners = partners[pager['offset']:pager['offset'] + self._references_per_page]
google_map_partner_ids = ','.join(map(str, [p.id for p in partners]))
values = {
'countries': countries,
'current_country': country,
'grades': grades,
'current_grade': grade,
'partners': partners,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'searches': post,
'search_path': "%s" % werkzeug.url_encode(post),
}
return request.website.render("website_crm_partner_assign.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/partners/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, partner_name='', **post):
_, partner_id = unslug(partner_id)
current_grade, current_country = None, None
grade_id = post.get('grade_id')
country_id = post.get('country_id')
if grade_id:
grade_ids = request.registry['res.partner.grade'].exists(request.cr, request.uid, int(grade_id), context=request.context)
if grade_ids:
current_grade = request.registry['res.partner.grade'].browse(request.cr, request.uid, grade_ids[0], context=request.context)
if country_id:
country_ids = request.registry['res.country'].exists(request.cr, request.uid, int(country_id), context=request.context)
if country_ids:
current_country = request.registry['res.country'].browse(request.cr, request.uid, country_ids[0], context=request.context)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {
'main_object': partner,
'partner': partner,
'current_grade': current_grade,
'current_country': current_country
}
return request.website.render("website_crm_partner_assign.partner", values)
return self.partners(**post)
| agpl-3.0 |
trondeau/gnuradio | gr-digital/python/digital/qa_probe_density.py | 57 | 2132 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
class test_probe_density(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [0, 1, 0, 1]
expected_data = 1
src = blocks.vector_source_b(src_data)
op = digital.probe_density_b(1)
self.tb.connect(src, op)
self.tb.run()
result_data = op.density()
self.assertEqual(expected_data, result_data)
def test_002(self):
src_data = [1, 1, 1, 1]
expected_data = 1
src = blocks.vector_source_b(src_data)
op = digital.probe_density_b(0.01)
self.tb.connect(src, op)
self.tb.run()
result_data = op.density()
self.assertEqual(expected_data, result_data)
def test_003(self):
src_data = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
expected_data = 0.95243
src = blocks.vector_source_b(src_data)
op = digital.probe_density_b(0.01)
self.tb.connect(src, op)
self.tb.run()
result_data = op.density()
print result_data
self.assertAlmostEqual(expected_data, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_probe_density, "test_probe_density.xml")
| gpl-3.0 |
ferabra/edx-platform | cms/djangoapps/contentstore/tests/utils.py | 56 | 17991 | '''
Utilities for contentstore tests
'''
import json
import textwrap
from mock import Mock
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import Client
from opaque_keys.edx.locations import SlashSeparatedCourseKey, AssetLocation
from contentstore.utils import reverse_url # pylint: disable=import-error
from student.models import Registration # pylint: disable=import-error
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.tests.utils import ProceduralCourseTestMixin
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
def parse_json(response):
"""Parse response, which is assumed to be json"""
return json.loads(response.content)
def user(email):
"""look up a user by email"""
return User.objects.get(email=email)
def registration(email):
"""look up registration object by email"""
return Registration.objects.get(user__email=email)
class AjaxEnabledTestClient(Client):
"""
Convenience class to make testing easier.
"""
def ajax_post(self, path, data=None, content_type="application/json", **kwargs):
"""
Convenience method for client post which serializes the data into json and sets the accept type
to json
"""
if not isinstance(data, basestring):
data = json.dumps(data or {})
kwargs.setdefault("HTTP_X_REQUESTED_WITH", "XMLHttpRequest")
kwargs.setdefault("HTTP_ACCEPT", "application/json")
return self.post(path=path, data=data, content_type=content_type, **kwargs)
def get_html(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to html
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="text/html", **extra)
def get_json(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to json
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="application/json", **extra)
class CourseTestCase(ProceduralCourseTestMixin, ModuleStoreTestCase):
"""
Base class for Studio tests that require a logged in user and a course.
Also provides helper methods for manipulating and verifying the course.
"""
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client can log them in.
The test user is created in the ModuleStoreTestCase setUp method.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
self.user_password = super(CourseTestCase, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=self.user_password)
self.course = CourseFactory.create()
def create_non_staff_authed_user_client(self, authenticate=True):
"""
Create a non-staff user, log them in (if authenticate=True), and return the client, user to use for testing.
"""
nonstaff, password = self.create_non_staff_user()
client = AjaxEnabledTestClient()
if authenticate:
client.login(username=nonstaff.username, password=password)
nonstaff.is_authenticated = lambda: authenticate
return client, nonstaff
def reload_course(self):
"""
Reloads the course object from the database
"""
self.course = self.store.get_course(self.course.id)
def save_course(self):
"""
Updates the course object in the database
"""
self.course.save()
self.store.update_item(self.course, self.user.id)
TEST_VERTICAL = 'vertical_test'
ORPHAN_DRAFT_VERTICAL = 'orphan_draft_vertical'
ORPHAN_DRAFT_HTML = 'orphan_draft_html'
PRIVATE_VERTICAL = 'a_private_vertical'
PUBLISHED_VERTICAL = 'a_published_vertical'
SEQUENTIAL = 'vertical_sequential'
DRAFT_HTML = 'draft_html'
DRAFT_VIDEO = 'draft_video'
LOCKED_ASSET_KEY = AssetLocation.from_deprecated_string('/c4x/edX/toy/asset/sample_static.txt')
def import_and_populate_course(self):
"""
Imports the test toy course and populates it with additional test data
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store)
course_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
# create an Orphan
# We had a bug where orphaned draft nodes caused export to fail. This is here to cover that case.
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
vertical.location = vertical.location.replace(name='no_references')
self.store.update_item(vertical, self.user.id, allow_not_found=True)
orphan_vertical = self.store.get_item(vertical.location)
self.assertEqual(orphan_vertical.location.name, 'no_references')
self.assertEqual(len(orphan_vertical.children), len(vertical.children))
# create an orphan vertical and html; we already don't try to import
# the orphaned vertical, but we should make sure we don't import
# the orphaned vertical's child html, too
orphan_draft_vertical = self.store.create_item(
self.user.id, course_id, 'vertical', self.ORPHAN_DRAFT_VERTICAL
)
orphan_draft_html = self.store.create_item(
self.user.id, course_id, 'html', self.ORPHAN_DRAFT_HTML
)
orphan_draft_vertical.children.append(orphan_draft_html.location)
self.store.update_item(orphan_draft_vertical, self.user.id)
# create a Draft vertical
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
draft_vertical = self.store.convert_to_draft(vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(draft_vertical))
# create a Private (draft only) vertical
private_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PRIVATE_VERTICAL)
self.assertFalse(self.store.has_published_version(private_vertical))
# create a Published (no draft) vertical
public_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PUBLISHED_VERTICAL)
public_vertical = self.store.publish(public_vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(public_vertical))
# add the new private and new public as children of the sequential
sequential = self.store.get_item(course_id.make_usage_key('sequential', self.SEQUENTIAL))
sequential.children.append(private_vertical.location)
sequential.children.append(public_vertical.location)
self.store.update_item(sequential, self.user.id)
# create an html and video component to make drafts:
draft_html = self.store.create_item(self.user.id, course_id, 'html', self.DRAFT_HTML)
draft_video = self.store.create_item(self.user.id, course_id, 'video', self.DRAFT_VIDEO)
# add them as children to the public_vertical
public_vertical.children.append(draft_html.location)
public_vertical.children.append(draft_video.location)
self.store.update_item(public_vertical, self.user.id)
# publish changes to vertical
self.store.publish(public_vertical.location, self.user.id)
# convert html/video to draft
self.store.convert_to_draft(draft_html.location, self.user.id)
self.store.convert_to_draft(draft_video.location, self.user.id)
# lock an asset
content_store.set_attr(self.LOCKED_ASSET_KEY, 'locked', True)
# create a non-portable link - should be rewritten in new courses
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
new_data = html_module.data = html_module.data.replace(
'/static/',
'/c4x/{0}/{1}/asset/'.format(course_id.org, course_id.course)
)
self.store.update_item(html_module, self.user.id)
html_module = self.store.get_item(html_module.location)
self.assertEqual(new_data, html_module.data)
return course_id
def check_populated_course(self, course_id):
"""
Verifies the content of the given course, per data that was populated in import_and_populate_course
"""
items = self.store.get_items(
course_id,
qualifiers={'category': 'vertical'},
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.check_verticals(items)
def verify_item_publish_state(item, publish_state):
"""Verifies the publish state of the item is as expected."""
self.assertEqual(self.store.has_published_version(item), publish_state)
def get_and_verify_publish_state(item_type, item_name, publish_state):
"""
Gets the given item from the store and verifies the publish state
of the item is as expected.
"""
item = self.store.get_item(course_id.make_usage_key(item_type, item_name))
verify_item_publish_state(item, publish_state)
return item
# verify draft vertical has a published version with published children
vertical = get_and_verify_publish_state('vertical', self.TEST_VERTICAL, True)
for child in vertical.get_children():
verify_item_publish_state(child, True)
# verify that it has a draft too
self.assertTrue(getattr(vertical, "is_draft", False))
# make sure that we don't have a sequential that is in draft mode
sequential = get_and_verify_publish_state('sequential', self.SEQUENTIAL, True)
self.assertFalse(getattr(sequential, "is_draft", False))
# verify that we have the private vertical
private_vertical = get_and_verify_publish_state('vertical', self.PRIVATE_VERTICAL, False)
# verify that we have the public vertical
public_vertical = get_and_verify_publish_state('vertical', self.PUBLISHED_VERTICAL, True)
# verify that we have the draft html
draft_html = self.store.get_item(course_id.make_usage_key('html', self.DRAFT_HTML))
self.assertTrue(getattr(draft_html, 'is_draft', False))
# verify that we have the draft video
draft_video = self.store.get_item(course_id.make_usage_key('video', self.DRAFT_VIDEO))
self.assertTrue(getattr(draft_video, 'is_draft', False))
# verify verticals are children of sequential
for vert in [vertical, private_vertical, public_vertical]:
self.assertIn(vert.location, sequential.children)
# verify draft html is the child of the public vertical
self.assertIn(draft_html.location, public_vertical.children)
# verify draft video is the child of the public vertical
self.assertIn(draft_video.location, public_vertical.children)
# verify textbook exists
course = self.store.get_course(course_id)
self.assertGreater(len(course.textbooks), 0)
# verify asset attributes of locked asset key
self.assertAssetsEqual(self.LOCKED_ASSET_KEY, self.LOCKED_ASSET_KEY.course_key, course_id)
# verify non-portable links are rewritten
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
self.assertIn('/static/foo.jpg', html_module.data)
return course
def assertCoursesEqual(self, course1_id, course2_id):
"""
Verifies the content of the two given courses are equal
"""
course1_items = self.store.get_items(course1_id)
course2_items = self.store.get_items(course2_id)
self.assertGreater(len(course1_items), 0) # ensure it found content instead of [] == []
if len(course1_items) != len(course2_items):
course1_block_ids = set([item.location.block_id for item in course1_items])
course2_block_ids = set([item.location.block_id for item in course2_items])
raise AssertionError(
u"Course1 extra blocks: {}; course2 extra blocks: {}".format(
course1_block_ids - course2_block_ids, course2_block_ids - course1_block_ids
)
)
for course1_item in course1_items:
course1_item_loc = course1_item.location
course2_item_loc = course2_id.make_usage_key(course1_item_loc.block_type, course1_item_loc.block_id)
if course1_item_loc.block_type == 'course':
# mongo uses the run as the name, split uses 'course'
store = self.store._get_modulestore_for_courselike(course2_id) # pylint: disable=protected-access
new_name = 'course' if isinstance(store, SplitMongoModuleStore) else course2_item_loc.run
course2_item_loc = course2_item_loc.replace(name=new_name)
course2_item = self.store.get_item(course2_item_loc)
# compare published state
self.assertEqual(
self.store.has_published_version(course1_item),
self.store.has_published_version(course2_item)
)
# compare data
self.assertEqual(hasattr(course1_item, 'data'), hasattr(course2_item, 'data'))
if hasattr(course1_item, 'data'):
self.assertEqual(course1_item.data, course2_item.data)
# compare meta-data
self.assertEqual(own_metadata(course1_item), own_metadata(course2_item))
# compare children
self.assertEqual(course1_item.has_children, course2_item.has_children)
if course1_item.has_children:
expected_children = []
for course1_item_child in course1_item.children:
expected_children.append(
course2_id.make_usage_key(course1_item_child.block_type, course1_item_child.block_id)
)
self.assertEqual(expected_children, course2_item.children)
# compare assets
content_store = self.store.contentstore
course1_assets, count_course1_assets = content_store.get_all_content_for_course(course1_id)
_, count_course2_assets = content_store.get_all_content_for_course(course2_id)
self.assertEqual(count_course1_assets, count_course2_assets)
for asset in course1_assets:
asset_son = asset.get('content_son', asset['_id'])
self.assertAssetsEqual(asset_son, course1_id, course2_id)
def check_verticals(self, items):
""" Test getting the editing HTML for each vertical. """
# assert is here to make sure that the course being tested actually has verticals (units) to check.
self.assertGreater(len(items), 0, "Course has no verticals (units) to check")
for descriptor in items:
resp = self.client.get_html(get_url('container_handler', descriptor.location))
self.assertEqual(resp.status_code, 200)
def assertAssetsEqual(self, asset_son, course1_id, course2_id):
"""Verifies the asset of the given key has the same attributes in both given courses."""
content_store = contentstore()
category = asset_son.block_type if hasattr(asset_son, 'block_type') else asset_son['category']
filename = asset_son.block_id if hasattr(asset_son, 'block_id') else asset_son['name']
course1_asset_attrs = content_store.get_attrs(course1_id.make_asset_key(category, filename))
course2_asset_attrs = content_store.get_attrs(course2_id.make_asset_key(category, filename))
self.assertEqual(len(course1_asset_attrs), len(course2_asset_attrs))
for key, value in course1_asset_attrs.iteritems():
if key in ['_id', 'filename', 'uploadDate', 'content_son', 'thumbnail_location']:
pass
else:
self.assertEqual(value, course2_asset_attrs[key])
def mock_requests_get(*args, **kwargs):
"""
Returns mock responses for the youtube API.
"""
# pylint: disable=unused-argument
response_transcript_list = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
response_transcript = textwrap.dedent("""
<transcript>
<text start="100" dur="100">subs #1</text>
<text start="200" dur="40">subs #2</text>
<text start="240" dur="140">subs #3</text>
</transcript>
""")
if kwargs == {'params': {'lang': 'en', 'v': 'good_id_2'}}:
return Mock(status_code=200, text='')
elif kwargs == {'params': {'type': 'list', 'v': 'good_id_2'}}:
return Mock(status_code=200, text=response_transcript_list, content=response_transcript_list)
elif kwargs == {'params': {'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}}:
return Mock(status_code=200, text=response_transcript, content=response_transcript)
return Mock(status_code=404, text='')
def get_url(handler_name, key_value, key_name='usage_key_string', kwargs=None):
"""
Helper function for getting HTML for a page in Studio and checking that it does not error.
"""
return reverse_url(handler_name, key_name, key_value, kwargs)
| agpl-3.0 |
Lujeni/ansible | lib/ansible/modules/cloud/amazon/rds.py | 13 | 56969 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts
description:
- Creates, deletes, or modifies rds resources.
- When creating an instance it can be either a new instance or a read-only replica of an existing instance.
- This module has a dependency on python-boto >= 2.5 and will soon be deprecated.
- The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0).
- Please use boto3 based M(rds_instance) instead.
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0.
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
type: str
instance_name:
description:
- Database instance identifier.
- Required except when using I(command=facts) or I(command=delete) on just a snapshot.
type: str
source_instance:
description:
- Name of the database to replicate.
- Used only when I(command=replicate).
type: str
db_engine:
description:
- The type of database.
- Used only when I(command=create).
- mariadb was added in version 2.2.
choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee',
'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
type: str
size:
description:
- Size in gigabytes of the initial storage for the DB instance.
- Used only when I(command=create) or I(command=modify).
type: str
instance_type:
description:
- The instance type of the database.
- If not specified then the replica inherits the same instance type as the source instance.
- Required when I(command=create).
- Optional when I(command=replicate), I(command=modify) or I(command=restore).
aliases: ['type']
type: str
username:
description:
- Master database username.
- Used only when I(command=create).
type: str
password:
description:
- Password for the master database username.
- Used only when I(command=create) or I(command=modify).
type: str
db_name:
description:
- Name of a database to create within the instance.
- If not specified then no database is created.
- Used only when I(command=create).
type: str
engine_version:
description:
- Version number of the database engine to use.
- If not specified then the current Amazon RDS default engine version is used
- Used only when I(command=create).
type: str
parameter_group:
description:
- Name of the DB parameter group to associate with this instance.
- If omitted then the RDS default DBParameterGroup will be used.
- Used only when I(command=create) or I(command=modify).
type: str
license_model:
description:
- The license model for this DB instance.
- Used only when I(command=create) or I(command=restore).
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
type: str
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment.
- Can not be used in conjunction with I(zone) parameter.
- Used only when I(command=create) or I(command=modify).
type: bool
iops:
description:
- Specifies the number of IOPS for the instance.
- Used only when I(command=create) or I(command=modify).
- Must be an integer greater than 1000.
type: str
security_groups:
description:
- Comma separated list of one or more security groups.
- Used only when I(command=create) or I(command=modify).
type: str
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids.
- Also requires I(subnet) to be specified.
- Used only when I(command=create) or I(command=modify).
type: list
elements: str
port:
description:
- Port number that the DB instance uses for connections.
- Used only when I(command=create) or I(command=replicate).
- 'Defaults to the standard ports for each I(db_engine): C(3306) for MySQL and MariaDB, C(1521) for Oracle
C(1433) for SQL Server, C(5432) for PostgreSQL.'
type: int
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically.
- Used only when I(command=create) or I(command=modify) or I(command=restore) or I(command=replicate).
type: bool
default: false
option_group:
description:
- The name of the option group to use.
- If not specified then the default option group is used.
- Used only when I(command=create).
type: str
maint_window:
description:
- 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))'
- Times are specified in UTC.
- If not specified then a random maintenance window is assigned.
- Used only when I(command=create) or I(command=modify).
type: str
backup_window:
description:
- 'Backup window in format of C(hh24:mi-hh24:mi). (Example: C(18:00-20:30))'
- Times are specified in UTC.
- If not specified then a random backup window is assigned.
- Used only when command=create or command=modify.
type: str
backup_retention:
description:
- Number of days backups are retained.
- Set to 0 to disable backups.
- Default is 1 day.
- 'Valid range: 0-35.'
- Used only when I(command=create) or I(command=modify).
type: str
zone:
description:
- availability zone in which to launch the instance.
- Used only when I(command=create), I(command=replicate) or I(command=restore).
- Can not be used in conjunction with I(multi_zone) parameter.
aliases: ['aws_zone', 'ec2_zone']
type: str
subnet:
description:
- VPC subnet group.
- If specified then a VPC instance is created.
- Used only when I(command=create).
type: str
snapshot:
description:
- Name of snapshot to take.
- When I(command=delete), if no I(snapshot) name is provided then no snapshot is taken.
- When I(command=delete), if no I(instance_name) is provided the snapshot is deleted.
- Used with I(command=facts), I(command=delete) or I(command=snapshot).
type: str
wait:
description:
- When I(command=create), replicate, modify or restore then wait for the database to enter the 'available' state.
- When I(command=delete), wait for the database to be terminated.
type: bool
default: false
wait_timeout:
description:
- How long before wait gives up, in seconds.
- Used when I(wait=true).
default: 300
type: int
apply_immediately:
description:
- When I(apply_immediately=trye), the modifications will be applied as soon as possible rather than waiting for the
next preferred maintenance window.
- Used only when I(command=modify).
type: bool
default: false
force_failover:
description:
- If enabled, the reboot is done using a MultiAZ failover.
- Used only when I(command=reboot).
type: bool
default: false
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to.
- Used only when I(command=modify).
type: str
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set.
- Used with I(command=create).
version_added: "1.9"
type: str
publicly_accessible:
description:
- Explicitly set whether the resource should be publicly accessible or not.
- Used with I(command=create), I(command=replicate).
- Requires boto >= 2.26.0
type: str
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource.
- Used with I(command=create), I(command=replicate), I(command=restore).
- Requires boto >= 2.26.0
type: dict
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds:
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug:
msg: "The new db endpoint is {{ rds.instance.endpoint }}"
'''
RETURN = '''
instance:
description: the rds instance
returned: always
type: complex
contains:
engine:
description: the name of the database engine
returned: when RDS instance exists
type: str
sample: "oracle-se"
engine_version:
description: the version of the database engine
returned: when RDS instance exists
type: str
sample: "11.2.0.4.v6"
license_model:
description: the license model information
returned: when RDS instance exists
type: str
sample: "bring-your-own-license"
character_set_name:
description: the name of the character set that this instance is associated with
returned: when RDS instance exists
type: str
sample: "AL32UTF8"
allocated_storage:
description: the allocated storage size in gigabytes (GB)
returned: when RDS instance exists
type: str
sample: "100"
publicly_accessible:
description: the accessibility options for the DB instance
returned: when RDS instance exists
type: bool
sample: "true"
latest_restorable_time:
description: the latest time to which a database can be restored with point-in-time restore
returned: when RDS instance exists
type: str
sample: "1489707802.0"
secondary_availability_zone:
description: the name of the secondary AZ for a DB instance with multi-AZ support
returned: when RDS instance exists and is multy-AZ
type: str
sample: "eu-west-1b"
backup_window:
description: the daily time range during which automated backups are created if automated backups are enabled
returned: when RDS instance exists and automated backups are enabled
type: str
sample: "03:00-03:30"
auto_minor_version_upgrade:
description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window
returned: when RDS instance exists
type: bool
sample: "true"
read_replica_source_dbinstance_identifier:
description: the identifier of the source DB instance if this RDS instance is a read replica
returned: when read replica RDS instance exists
type: str
sample: "null"
db_name:
description: the name of the database to create when the DB instance is created
returned: when RDS instance exists
type: str
sample: "ASERTG"
endpoint:
description: the endpoint uri of the database instance
returned: when RDS instance exists
type: str
sample: "my-ansible-database.asdfaosdgih.us-east-1.rds.amazonaws.com"
port:
description: the listening port of the database instance
returned: when RDS instance exists
type: int
sample: 3306
parameter_groups:
description: the list of DB parameter groups applied to this RDS instance
returned: when RDS instance exists and parameter groups are defined
type: complex
contains:
parameter_apply_status:
description: the status of parameter updates
returned: when RDS instance exists
type: str
sample: "in-sync"
parameter_group_name:
description: the name of the DP parameter group
returned: when RDS instance exists
type: str
sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz"
option_groups:
description: the list of option group memberships for this RDS instance
returned: when RDS instance exists
type: complex
contains:
option_group_name:
description: the option group name for this RDS instance
returned: when RDS instance exists
type: str
sample: "default:oracle-se-11-2"
status:
description: the status of the RDS instance's option group membership
returned: when RDS instance exists
type: str
sample: "in-sync"
pending_modified_values:
description: a dictionary of changes to the RDS instance that are pending
returned: when RDS instance exists
type: complex
contains:
db_instance_class:
description: the new DB instance class for this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: str
sample: "null"
db_instance_identifier:
description: the new DB instance identifier this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: str
sample: "null"
allocated_storage:
description: the new allocated storage size for this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: str
sample: "null"
backup_retention_period:
description: the pending number of days for which automated backups are retained
returned: when RDS instance exists
type: str
sample: "null"
engine_version:
description: indicates the database engine version
returned: when RDS instance exists
type: str
sample: "null"
iops:
description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied
returned: when RDS instance exists
type: str
sample: "null"
master_user_password:
description: the pending or in-progress change of the master credentials for this RDS instance
returned: when RDS instance exists
type: str
sample: "null"
multi_az:
description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment
returned: when RDS instance exists
type: str
sample: "null"
port:
description: specifies the pending port for this RDS instance
returned: when RDS instance exists
type: str
sample: "null"
db_subnet_groups:
description: information on the subnet group associated with this RDS instance
returned: when RDS instance exists
type: complex
contains:
description:
description: the subnet group associated with the DB instance
returned: when RDS instance exists
type: str
sample: "Subnets for the UAT RDS SQL DB Instance"
name:
description: the name of the DB subnet group
returned: when RDS instance exists
type: str
sample: "samplesubnetgrouprds-j6paiqkxqp4z"
status:
description: the status of the DB subnet group
returned: when RDS instance exists
type: str
sample: "complete"
subnets:
description: the description of the DB subnet group
returned: when RDS instance exists
type: complex
contains:
availability_zone:
description: subnet availability zone information
returned: when RDS instance exists
type: complex
contains:
name:
description: availability zone
returned: when RDS instance exists
type: str
sample: "eu-west-1b"
provisioned_iops_capable:
description: whether provisioned iops are available in AZ subnet
returned: when RDS instance exists
type: bool
sample: "false"
identifier:
description: the identifier of the subnet
returned: when RDS instance exists
type: str
sample: "subnet-3fdba63e"
status:
description: the status of the subnet
returned: when RDS instance exists
type: str
sample: "active"
'''
import time
try:
import boto.rds
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
try:
import boto.rds2
import boto.rds2.exceptions
HAS_RDS2 = True
except ImportError:
HAS_RDS2 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AWSRetry
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
DEFAULT_PORTS = {
'aurora': 3306,
'mariadb': 3306,
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(
db_instance_identifier=instancename
)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound as e:
return None
except Exception as e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(
db_snapshot_identifier=snapshotid,
snapshot_type='manual'
)['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password,
**params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(
instance_name,
source_instance,
**params
)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(
instance_name,
snapshot,
**params
)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance.create_time,
'status': self.status,
'availability_zone': self.instance.availability_zone,
'backup_retention': self.instance.backup_retention_period,
'backup_window': self.instance.preferred_backup_window,
'maintenance_window': self.instance.preferred_maintenance_window,
'multi_zone': self.instance.multi_az,
'instance_type': self.instance.instance_class,
'username': self.instance.master_username,
'iops': self.instance.iops
}
# Only assign an Endpoint if one is available
if hasattr(self.instance, 'endpoint'):
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
d['DBName'] = self.instance.DBName if hasattr(self.instance, 'DBName') else None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'engine': self.instance['Engine'],
'engine_version': self.instance['EngineVersion'],
'license_model': self.instance['LicenseModel'],
'character_set_name': self.instance['CharacterSetName'],
'allocated_storage': self.instance['AllocatedStorage'],
'publicly_accessible': self.instance['PubliclyAccessible'],
'latest_restorable_time': self.instance['LatestRestorableTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'secondary_availability_zone': self.instance['SecondaryAvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'backup_window': self.instance['PreferredBackupWindow'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'],
'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'db_name': self.instance['DBName'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance['DBParameterGroups'] is not None:
parameter_groups = []
for x in self.instance['DBParameterGroups']:
parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']})
d['parameter_groups'] = parameter_groups
if self.instance['OptionGroupMemberships'] is not None:
option_groups = []
for x in self.instance['OptionGroupMemberships']:
option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']})
d['option_groups'] = option_groups
if self.instance['PendingModifiedValues'] is not None:
pdv = self.instance['PendingModifiedValues']
d['pending_modified_values'] = {
'multi_az': pdv['MultiAZ'],
'master_user_password': pdv['MasterUserPassword'],
'port': pdv['Port'],
'iops': pdv['Iops'],
'allocated_storage': pdv['AllocatedStorage'],
'engine_version': pdv['EngineVersion'],
'backup_retention_period': pdv['BackupRetentionPeriod'],
'db_instance_class': pdv['DBInstanceClass'],
'db_instance_identifier': pdv['DBInstanceIdentifier']
}
if self.instance["DBSubnetGroup"] is not None:
dsg = self.instance["DBSubnetGroup"]
db_subnet_groups = {}
db_subnet_groups['vpc_id'] = dsg['VpcId']
db_subnet_groups['name'] = dsg['DBSubnetGroupName']
db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower()
db_subnet_groups['description'] = dsg['DBSubnetGroupDescription']
db_subnet_groups['subnets'] = []
for x in dsg["Subnets"]:
db_subnet_groups['subnets'].append({
'status': x['SubnetStatus'].lower(),
'identifier': x['SubnetIdentifier'],
'availability_zone': {
'name': x['SubnetAvailabilityZone']['Name'],
'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable']
}
})
d['db_subnet_groups'] = db_subnet_groups
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
d['endpoint'] = self.instance["Endpoint"].get('Address', None)
d['port'] = self.instance["Endpoint"].get('Port', None)
else:
d['endpoint'] = None
d['port'] = None
d['DBName'] = self.instance['DBName'] if hasattr(self.instance, 'DBName') else None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot.snapshot_create_time,
'status': self.status,
'availability_zone': self.snapshot.availability_zone,
'instance_id': self.snapshot.instance_id,
'instance_created': self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot['SnapshotCreateTime'],
'status': self.status,
'availability_zone': self.snapshot['AvailabilityZone'],
'instance_id': self.snapshot['DBInstanceIdentifier'],
'instance_created': self.snapshot['InstanceCreateTime'],
'snapshot_type': self.snapshot['SnapshotType'],
'iops': self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
start_time = time.time()
wait_timeout = module.params.get('wait_timeout') + start_time
check_interval = 5
while wait_timeout > time.time() and resource.status != status:
time.sleep(check_interval)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name)
if resource is None:
break
# Some RDS resources take much longer than others to be ready. Check
# less aggressively for slow ones to avoid throttling.
if time.time() > start_time + 90:
check_interval = 20
return resource
def create_db_instance(module, conn):
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group', 'port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if HAS_RDS2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if HAS_RDS2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if HAS_RDS2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception as e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if HAS_RDS2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if HAS_RDS2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if HAS_RDS2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) is not None and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
if module.params.get(k) is False:
pass
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if HAS_RDS2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name=dict(required=False),
source_instance=dict(required=False),
db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
'sqlserver-web', 'postgres', 'aurora'], required=False),
size=dict(required=False),
instance_type=dict(aliases=['type'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(required=False),
engine_version=dict(required=False),
parameter_group=dict(required=False),
license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone=dict(type='bool', required=False),
iops=dict(required=False),
security_groups=dict(required=False),
vpc_security_groups=dict(type='list', required=False),
port=dict(required=False, type='int'),
upgrade=dict(type='bool', default=False),
option_group=dict(required=False),
maint_window=dict(required=False),
backup_window=dict(required=False),
backup_retention=dict(required=False),
zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet=dict(required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
snapshot=dict(required=False),
apply_immediately=dict(type='bool', default=False),
new_instance_name=dict(required=False),
tags=dict(type='dict', required=False),
publicly_accessible=dict(required=False),
character_set_name=dict(required=False),
force_failover=dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if HAS_RDS2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
if __name__ == '__main__':
main()
| gpl-3.0 |
mrquim/mrquimrepo | script.module.schism.common/lib/js2py/constructors/jsdate.py | 33 | 10192 | from js2py.base import *
from time_helpers import *
TZ_OFFSET = (time.altzone/3600)
ABS_OFFSET = abs(TZ_OFFSET)
TZ_NAME = time.tzname[1]
ISO_FORMAT = '%s-%s-%sT%s:%s:%s.%sZ'
@Js
def Date(year, month, date, hours, minutes, seconds, ms):
return now().to_string()
Date.Class = 'Date'
def now():
return PyJsDate(int(time.time()*1000), prototype=DatePrototype)
@Js
def UTC(year, month, date, hours, minutes, seconds, ms): # todo complete this
args = arguments
y = args[0].to_number()
m = args[1].to_number()
l = len(args)
dt = args[2].to_number() if l>2 else Js(1)
h = args[3].to_number() if l>3 else Js(0)
mi = args[4].to_number() if l>4 else Js(0)
sec = args[5].to_number() if l>5 else Js(0)
mili = args[6].to_number() if l>6 else Js(0)
if not y.is_nan() and 0<=y.value<=99:
y = y + Js(1900)
t = TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))
return PyJsDate(t, prototype=DatePrototype)
@Js
def parse(string):
return PyJsDate(TimeClip(parse_date(string.to_string().value)), prototype=DatePrototype)
Date.define_own_property('now', {'value': Js(now),
'enumerable': False,
'writable': True,
'configurable': True})
Date.define_own_property('parse', {'value': parse,
'enumerable': False,
'writable': True,
'configurable': True})
Date.define_own_property('UTC', {'value': UTC,
'enumerable': False,
'writable': True,
'configurable': True})
class PyJsDate(PyJs):
Class = 'Date'
extensible = True
def __init__(self, value, prototype=None):
self.value = value
self.own = {}
self.prototype = prototype
# todo fix this problematic datetime part
def to_local_dt(self):
return datetime.datetime.utcfromtimestamp(UTCToLocal(self.value)/1000)
def to_utc_dt(self):
return datetime.datetime.utcfromtimestamp(self.value/1000)
def local_strftime(self, pattern):
if self.value is NaN:
return 'Invalid Date'
try:
dt = self.to_local_dt()
except:
raise MakeError('TypeError', 'unsupported date range. Will fix in future versions')
try:
return dt.strftime(pattern)
except:
raise MakeError('TypeError', 'Could not generate date string from this date (limitations of python.datetime)')
def utc_strftime(self, pattern):
if self.value is NaN:
return 'Invalid Date'
try:
dt = self.to_utc_dt()
except:
raise MakeError('TypeError', 'unsupported date range. Will fix in future versions')
try:
return dt.strftime(pattern)
except:
raise MakeError('TypeError', 'Could not generate date string from this date (limitations of python.datetime)')
def parse_date(py_string):
return NotImplementedError()
def date_constructor(*args):
if len(args)>=2:
return date_constructor2(*args)
elif len(args)==1:
return date_constructor1(args[0])
else:
return date_constructor0()
def date_constructor0():
return now()
def date_constructor1(value):
v = value.to_primitive()
if v._type()=='String':
v = parse_date(v.value)
else:
v = v.to_int()
return PyJsDate(TimeClip(v), prototype=DatePrototype)
def date_constructor2(*args):
y = args[0].to_number()
m = args[1].to_number()
l = len(args)
dt = args[2].to_number() if l>2 else Js(1)
h = args[3].to_number() if l>3 else Js(0)
mi = args[4].to_number() if l>4 else Js(0)
sec = args[5].to_number() if l>5 else Js(0)
mili = args[6].to_number() if l>6 else Js(0)
if not y.is_nan() and 0<=y.value<=99:
y = y + Js(1900)
t = TimeClip(LocalToUTC(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili))))
return PyJsDate(t, prototype=DatePrototype)
Date.create = date_constructor
DatePrototype = PyJsDate(float('nan'), prototype=ObjectPrototype)
def check_date(obj):
if obj.Class!='Date':
raise MakeError('TypeError', 'this is not a Date object')
class DateProto:
def toString():
check_date(this)
if this.value is NaN:
return 'Invalid Date'
offset = (UTCToLocal(this.value) - this.value)/msPerHour
return this.local_strftime('%a %b %d %Y %H:%M:%S GMT') + '%s00 (%s)' % (pad(offset, 2, True), GetTimeZoneName(this.value))
def toDateString():
check_date(this)
return this.local_strftime('%d %B %Y')
def toTimeString():
check_date(this)
return this.local_strftime('%H:%M:%S')
def toLocaleString():
check_date(this)
return this.local_strftime('%d %B %Y %H:%M:%S')
def toLocaleDateString():
check_date(this)
return this.local_strftime('%d %B %Y')
def toLocaleTimeString():
check_date(this)
return this.local_strftime('%H:%M:%S')
def valueOf():
check_date(this)
return this.value
def getTime():
check_date(this)
return this.value
def getFullYear():
check_date(this)
if this.value is NaN:
return NaN
return YearFromTime(UTCToLocal(this.value))
def getUTCFullYear():
check_date(this)
if this.value is NaN:
return NaN
return YearFromTime(this.value)
def getMonth():
check_date(this)
if this.value is NaN:
return NaN
return MonthFromTime(UTCToLocal(this.value))
def getDate():
check_date(this)
if this.value is NaN:
return NaN
return DateFromTime(UTCToLocal(this.value))
def getUTCMonth():
check_date(this)
if this.value is NaN:
return NaN
return MonthFromTime(this.value)
def getUTCDate():
check_date(this)
if this.value is NaN:
return NaN
return DateFromTime(this.value)
def getDay():
check_date(this)
if this.value is NaN:
return NaN
return WeekDay(UTCToLocal(this.value))
def getUTCDay():
check_date(this)
if this.value is NaN:
return NaN
return WeekDay(this.value)
def getHours():
check_date(this)
if this.value is NaN:
return NaN
return HourFromTime(UTCToLocal(this.value))
def getUTCHours():
check_date(this)
if this.value is NaN:
return NaN
return HourFromTime(this.value)
def getMinutes():
check_date(this)
if this.value is NaN:
return NaN
return MinFromTime(UTCToLocal(this.value))
def getUTCMinutes():
check_date(this)
if this.value is NaN:
return NaN
return MinFromTime(this.value)
def getSeconds():
check_date(this)
if this.value is NaN:
return NaN
return SecFromTime(UTCToLocal(this.value))
def getUTCSeconds():
check_date(this)
if this.value is NaN:
return NaN
return SecFromTime(this.value)
def getMilliseconds():
check_date(this)
if this.value is NaN:
return NaN
return msFromTime(UTCToLocal(this.value))
def getUTCMilliseconds():
check_date(this)
if this.value is NaN:
return NaN
return msFromTime(this.value)
def getTimezoneOffset():
check_date(this)
if this.value is NaN:
return NaN
return (UTCToLocal(this.value) - this.value)/60000
def setTime(time):
check_date(this)
this.value = TimeClip(time.to_number().to_int())
return this.value
def setMilliseconds(ms):
check_date(this)
t = UTCToLocal(this.value)
tim = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
u = TimeClip(LocalToUTC(MakeDate(Day(t), tim)))
this.value = u
return u
def setUTCMilliseconds(ms):
check_date(this)
t = this.value
tim = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms.to_int())
u = TimeClip(MakeDate(Day(t), tim))
this.value = u
return u
# todo Complete all setters!
def toUTCString():
check_date(this)
return this.utc_strftime('%d %B %Y %H:%M:%S')
def toISOString():
check_date(this)
t = this.value
year = YearFromTime(t)
month, day, hour, minute, second, milli = pad(MonthFromTime(t)+1), pad(DateFromTime(t)), pad(HourFromTime(t)), pad(MinFromTime(t)), pad(SecFromTime(t)), pad(msFromTime(t))
return ISO_FORMAT % (unicode(year) if 0<=year<=9999 else pad(year, 6, True), month, day, hour, minute, second, milli)
def toJSON(key):
o = this.to_object()
tv = o.to_primitive('Number')
if tv.Class=='Number' and not tv.is_finite():
return this.null
toISO = o.get('toISOString')
if not toISO.is_callable():
raise this.MakeError('TypeError', 'toISOString is not callable')
return toISO.call(o, ())
def pad(num, n=2, sign=False):
'''returns n digit string representation of the num'''
s = unicode(abs(num))
if len(s)<n:
s = '0'*(n-len(s)) + s
if not sign:
return s
if num>=0:
return '+'+s
else:
return '-'+s
fill_prototype(DatePrototype, DateProto, default_attrs)
Date.define_own_property('prototype', {'value': DatePrototype,
'enumerable': False,
'writable': False,
'configurable': False})
DatePrototype.define_own_property('constructor', {'value': Date,
'enumerable': False,
'writable': True,
'configurable': True}) | gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.