rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
if sys.exc_info() != (None,None,None): | if capture_traceback and sys.exc_info() != (None,None,None): | def __init__(self,fn='rl_dbgmemo.dbg',mode='w',getScript=1,modules=(),**kw): import time, socket self.fn = fn if mode!='w': return self.store = store = {} if sys.exc_info() != (None,None,None): import traceback s = getStringIO() traceback.print_exc(None,s) store['__traceback'] = s.getvalue() cwd=os.getcwd() lcwd = os.listdir(cwd) exed = os.path.abspath(os.path.dirname(sys.argv[0])) store.update({ 'gmt': time.asctime(time.gmtime(time.time())), 'platform': sys.platform, 'version': sys.version, 'executable': sys.executable, 'prefix': sys.prefix, 'path': sys.path, 'argv': sys.argv, 'cwd': cwd, 'hostname': socket.gethostname(), 'lcwd': lcwd, }) if exed!=cwd: store.update({'exed': exed, 'lexed': os.listdir(exed), }) if hasattr(os,'uname'): store.update({ 'uname': os.uname(), 'ctermid': os.ctermid(), 'getgid': os.getgid(), 'getuid': os.getuid(), 'getegid': os.getegid(), 'geteuid': os.geteuid(), 'getlogin': os.getlogin(), 'getgroups': os.getgroups(), 'getpgrp': os.getpgrp(), 'getpid': os.getpid(), 'getppid': os.getppid(), }) if getScript: fn = os.path.abspath(sys.argv[0]) if os.path.isfile(fn): store['__script'] = open(fn,'r').read() module_versions = {} for n,m in sys.modules.items(): if n=='reportlab' or n=='rlextra' or n[:10]=='reportlab.' or n[:8]=='rlextra.': v = getattr(m,'__version__',None) if v: module_versions[n] = v store['__module_versions'] = module_versions self.store['__payload'] = {} self._add(kw) |
S = self._CPage and [CondPageBreak(aH+1)] or [] | S = getattr(self,'_CPage',1) and [CondPageBreak(aH+1)] or [] | def split(self, aW, aH): S = self._CPage and [CondPageBreak(aH+1)] or [] for f in self._flowables: S.append(f) return S |
f.write(str(self.startxref) + LINEEND) | f.write(('%d' % self.startxref) + LINEEND) | def writeTrailer(self, f): f.write('trailer' + LINEEND) f.write('<< /Size %d /Root %d 0 R /Info %d 0 R>>' % (len(self.objects) + 1, 1, self.infopos) + LINEEND) f.write('startxref' + LINEEND) f.write(str(self.startxref) + LINEEND) |
fn = float(len(P)) return reduce(lambda x,y: (x[0]+y[0]/fn,x[1]+y[1]/fn),P,(0,0)) | return reduce(lambda x,y, fn=float(len(P)): (x[0]+y[0]/fn,x[1]+y[1]/fn),P,(0,0)) | def centroid(P): '''compute average point of a set of points''' fn = float(len(P)) return reduce(lambda x,y: (x[0]+y[0]/fn,x[1]+y[1]/fn),P,(0,0)) |
def goodTest(x,t,tb=0,**kw): | def goodTest(x,t,tb=0,inOnly=0,**kw): | def goodTest(x,t,tb=0,**kw): try: P=_pyRXP.Parser(**kw) r = P(x) rb = 0 except: et, ev, _unused = sys.exc_info() r = '%s %s' % (et.__name__, str(ev)) rb = 1 s = '' for k,v in kw.items(): s = s+', %s=%s' % (k,str(v)) if type(t) is type(''): t = t.replace('\r','\\r') t = t.replace('\n','\\n') if type(r) is type(''): r = r.replace('\r','\\r') r = r.replace('\n','\\n') print >>_logf, '%s.Parser(%s)(%s)'%(_pyRXP.__name__,s[2:],repr(x)), if r==t and rb==tb: print >>_logf, 'OK' _dot('.') else: _dot('E') print >>_logf,'\nBAD got ', r print >>_logf,'Expected', t |
if r==t and rb==tb: | if (inOnly and t in r) or (r==t) and rb==tb: | def goodTest(x,t,tb=0,**kw): try: P=_pyRXP.Parser(**kw) r = P(x) rb = 0 except: et, ev, _unused = sys.exc_info() r = '%s %s' % (et.__name__, str(ev)) rb = 1 s = '' for k,v in kw.items(): s = s+', %s=%s' % (k,str(v)) if type(t) is type(''): t = t.replace('\r','\\r') t = t.replace('\n','\\n') if type(r) is type(''): r = r.replace('\r','\\r') r = r.replace('\n','\\n') print >>_logf, '%s.Parser(%s)(%s)'%(_pyRXP.__name__,s[2:],repr(x)), if r==t and rb==tb: print >>_logf, 'OK' _dot('.') else: _dot('E') print >>_logf,'\nBAD got ', r print >>_logf,'Expected', t |
def failTest(x,t,tb=1,**kw): goodTest(x,t,tb,**kw) | def failTest(x,t,tb=1,inOnly=0,**kw): goodTest(x,t,tb,inOnly=inOnly,**kw) def bigDepth(n): return n and '<tag%d>%s</tag%d>' % (n,bigDepth(n-1),n) or 'middle' | def failTest(x,t,tb=1,**kw): goodTest(x,t,tb,**kw) |
failTest(bigDepth(257),"""Error Internal error, stack limit reached!\n""", inOnly=1) | def _runTests(pyRXP): global _pyRXP _pyRXP = pyRXP print >>_logf, '############# Testing',pyRXP.__name__ try: for k,v in pyRXP.parser_flags.items(): eval('pyRXP.Parser(%s=%d)' % (k,v)) print >>_logf,'Parser keywords OK' _dot('.') except: traceback.print_exc() print >>_logf,'Parser keywords BAD' _dot('E') try: for k,v in pyRXP.parser_flags.items(): eval('pyRXP.Parser()("<a/>",%s=%d)' % (k,v)) print >>_logf,'Parser().parse keywords OK' _dot('.') except: traceback.print_exc() print >>_logf,'Parser().parse keywords BAD' _dot('E') goodTest('<a></a>',('a', None, [], None)) goodTest('<a></a>',('a', {}, [], None),ExpandEmpty=1) goodTest('<a></a>',['a', None, [], None],MakeMutableTree=1) goodTest('<a/>',('a', None, None, None)) goodTest('<a/>',('a', {}, [], None),ExpandEmpty=1) goodTest('<a/>',['a', None, None, None],MakeMutableTree=1) goodTest('<a/>',['a', {}, [], None],ExpandEmpty=1,MakeMutableTree=1) failTest('</a>',"Error Error: End tag </a> outside of any element\n in unnamed entity at line 1 char 4 of [unknown]\nEnd tag </a> outside of any element\nParse Failed!\n") goodTest('<a>A<!--comment--></a>',('a', None, ['A'], None)) goodTest('<a>A<!--comment--></a>',('a', {}, ['A'], None),ExpandEmpty=1) goodTest('<a>A<!--comment--></a>', ('a', None, ['A', ('<!--', None, ['comment'], None)], None), ReturnComments=1) goodTest('<a>A<&></a>',('a', None, ['A<&>'], None)) goodTest('<a>A<&></a>',('a', None, ['A', '<', '&', '>'], None), MergePCData=0) goodTest('<!--comment--><a/>',('a', None, None, None),ReturnComments=1) goodTest('<!--comment--><a/>',[('<!--',None,['comment'],None),('a', None, None, None)],ReturnComments=1,ReturnList=1) goodTest('<!--comment--><a/>',('a', None, None, None),ReturnComments=1) failTest('<?xml version="1.0" encoding="LATIN-1"?></a>',"Error Unknown declared encoding LATIN-1\nInternal error, ParserPush failed!\n") goodTest('<?work version="1.0" encoding="utf-8"?><a/>',[('<?',{'name':'work'}, ['version="1.0" encoding="utf-8"'],None), ('a', None, None, None)],IgnorePlacementErrors=1,ReturnList=1,ReturnProcessingInstructions=1,ReturnComments=1) goodTest('<a>\nHello\n<b>cruel\n</b>\nWorld\n</a>',('a', None, ['\nHello\n', ('b', None, ['cruel\n'], (('aaa', 2, 3), ('aaa', 3, 4))), '\nWorld\n'], (('aaa', 0, 3), ('aaa', 5, 4))),fourth=pyRXP.recordLocation,srcName='aaa') goodTest('<a aname="ANAME" aother="AOTHER">\nHello\n<b bname="BNAME" bother="BOTHER">cruel\n</b>\nWorld\n</a>',('a', {"aname": "ANAME", "aother": "AOTHER"}, ['\nHello\n', ('b', {"bname": "BNAME", "bother": "BOTHER"}, ['cruel\n'], (('aaa', 2, 33), ('aaa', 3, 4))), '\nWorld\n'], (('aaa', 0, 33), ('aaa', 5, 4))),fourth=pyRXP.recordLocation,srcName='aaa') goodTest('<a><![CDATA[<a>]]></a>',('a', None, ['<a>'], None)) goodTest('<a><![CDATA[<a>]]></a>',('a', None, [('<![CDATA[', None, ['<a>'], None)], None),ReturnCDATASectionsAsTuples=1) goodTest('''<foo:A xmlns:foo="http://www.foo.org/"><foo:B><foo:C xmlns:foo="http://www.bar.org/"><foo:D>abcd</foo:D></foo:C></foo:B><foo:B/><A>bare A<C>bare C</C><B>bare B</B></A><A xmlns="http://default.reportlab.com/" xmlns:bongo="http://bongo.reportlab.com/">default ns A<bongo:A>bongo A</bongo:A><B>default NS B</B></A></foo:A>''',('{http://www.foo.org/}A', {'xmlns:foo': 'http://www.foo.org/'}, [('{http://www.foo.org/}B', None, [('{http://www.bar.org/}C', {'xmlns:foo': 'http://www.bar.org/'}, [('{http://www.bar.org/}D', None, ['abcd'], None)], None)], None), ('{http://www.foo.org/}B', None, None, None), ('A', None, ['bare A', ('C', None, ['bare C'], None), ('B', None, ['bare B'], None)], None), ('{http://default.reportlab.com/}A', {'xmlns': 'http://default.reportlab.com/', 'xmlns:bongo': 'http://bongo.reportlab.com/'}, ['default ns A', ('{http://bongo.reportlab.com/}A', None, ['bongo A'], None), ('{http://default.reportlab.com/}B', None, ['default NS B'], None)], None)], None),XMLNamespaces=1,ReturnNamespaceAttributes=1) |
|
c=s.fontsize,d=self.canv: d.stringWidth(a,b,c), v)) | c=s.fontsize,d=d.stringWidth: d(a,b,c), v)) | def _calc(self): |
if op == 'GRID': self._drawBox( (sc, sr), (ec, er), weight, color) self._drawInnerGrid( (sc, sr), (ec, er), weight, color) elif op in ('BOX', 'OUTLINE',): self._drawBox( (sc, sr), (ec, er), weight, color) elif op == 'INNERGRID': self._drawInnerGrid( (sc, sr), (ec, er), weight, color) elif op == 'LINEBELOW': self._drawHLines((sc, sr+1), (ec, er+1), weight, color) elif op == 'LINEABOVE': self._drawHLines((sc, sr), (ec, er), weight, color) elif op == 'LINEBEFORE': self._drawVLines((sc, sr), (ec, er), weight, color) elif op == 'LINEAFTER': self._drawVLines((sc+1, sr), (ec+1, er), weight, color) else: raise ValueError, "Unknown line style %s" % op | getattr(self,_LineOpMap.get(op, '_drawUnknown' ))( (sc, sr), (ec, er), weight, color) | def _drawLines(self): for op, (sc, sr), (ec, er), weight, color in self._linecmds: if sc < 0: sc = sc + self._ncols if ec < 0: ec = ec + self._ncols if sr < 0: sr = sr + self._nrows if er < 0: er = er + self._nrows if op == 'GRID': self._drawBox( (sc, sr), (ec, er), weight, color) self._drawInnerGrid( (sc, sr), (ec, er), weight, color) elif op in ('BOX', 'OUTLINE',): self._drawBox( (sc, sr), (ec, er), weight, color) elif op == 'INNERGRID': self._drawInnerGrid( (sc, sr), (ec, er), weight, color) elif op == 'LINEBELOW': self._drawHLines((sc, sr+1), (ec, er+1), weight, color) elif op == 'LINEABOVE': self._drawHLines((sc, sr), (ec, er), weight, color) elif op == 'LINEBEFORE': self._drawVLines((sc, sr), (ec, er), weight, color) elif op == 'LINEAFTER': self._drawVLines((sc+1, sr), (ec+1, er), weight, color) else: raise ValueError, "Unknown line style %s" % op self._curcolor = None |
self._calc() | def _splitRows(self,availHeight): self._calc() h = 0 n = 0 lim = len(self._rowHeights) while n<lim: hn = h + self._rowHeights[n] if hn>availHeight: break h = hn n = n + 1 |
|
y = rowpos + (cellstyle.bottomPadding + rowheight-cellstyle.topPadding+(n-1)*leading)/2.0+leading-fontsize | y = rowpos + (cellstyle.bottomPadding + rowheight-cellstyle.topPadding+(n-1)*leading)/2.0 | def _drawCell(self, cellval, cellstyle, (colpos, rowpos), (colwidth, rowheight)): #print "cellstyle is ", repr(cellstyle), id(cellstyle) if self._curcellstyle is not cellstyle: cur = self._curcellstyle if cur is None or cellstyle.color != cur.color: #print "setting cell color to %s" % `cellstyle.color` self.canv.setFillColor(cellstyle.color) if cur is None or cellstyle.leading != cur.leading or cellstyle.fontname != cur.fontname or cellstyle.fontsize != cur.fontsize: #print "setting font: %s, %s, %s" % (cellstyle.fontname, cellstyle.fontsize, cellstyle.leading) self.canv.setFont(cellstyle.fontname, cellstyle.fontsize, cellstyle.leading) self._curcellstyle = cellstyle #print "leading is ", cellstyle.leading, "size is", cellstyle.fontsize just = cellstyle.alignment #print "alignment is ", just if just == 'LEFT': draw = self.canv.drawString x = colpos + cellstyle.leftPadding elif just in ('CENTRE', 'CENTER'): draw = self.canv.drawCentredString x = colpos + colwidth * 0.5 elif just == 'RIGHT': draw = self.canv.drawRightString x = colpos + colwidth - cellstyle.rightPadding else: raise ValueError, 'Invalid justification %s' % just |
LINECOMMANDS = ( 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'BOXGRID', 'LINEBELOW', 'LINEABOVE', 'LINEBEFORE', 'LINEAFTER', ) | _LineOpMap = { 'GRID':'_drawGrid', 'BOX':'_drawBox', 'OUTLINE':'_drawBox', 'INNERGRID':'_drawInnerGrid', 'LINEBELOW':'_drawHLinesB', 'LINEABOVE':'_drawHLines', 'LINEBEFORE':'_drawVLines', 'LINEAFTER':'_drawVLinesA', } LINECOMMANDS = _LineOpMap.keys() | def _drawCell(self, cellval, cellstyle, (colpos, rowpos), (colwidth, rowheight)): #print "cellstyle is ", repr(cellstyle), id(cellstyle) if self._curcellstyle is not cellstyle: cur = self._curcellstyle if cur is None or cellstyle.color != cur.color: #print "setting cell color to %s" % `cellstyle.color` self.canv.setFillColor(cellstyle.color) if cur is None or cellstyle.leading != cur.leading or cellstyle.fontname != cur.fontname or cellstyle.fontsize != cur.fontsize: #print "setting font: %s, %s, %s" % (cellstyle.fontname, cellstyle.fontsize, cellstyle.leading) self.canv.setFont(cellstyle.fontname, cellstyle.fontsize, cellstyle.leading) self._curcellstyle = cellstyle #print "leading is ", cellstyle.leading, "size is", cellstyle.fontsize just = cellstyle.alignment #print "alignment is ", just if just == 'LEFT': draw = self.canv.drawString x = colpos + cellstyle.leftPadding elif just in ('CENTRE', 'CENTER'): draw = self.canv.drawCentredString x = colpos + colwidth * 0.5 elif just == 'RIGHT': draw = self.canv.drawRightString x = colpos + colwidth - cellstyle.rightPadding else: raise ValueError, 'Invalid justification %s' % just |
t=apply(Table,([('Attribute', 'Synonyms'), ('alignment', 'align, alignment'), ('bulletColor', 'bulletcolor, bcolor'), ('bulletFontName', 'bfont, bulletfontname'), ('bulletFontSize', 'bfontsize, bulletfontsize'), ('bulletIndent', 'bindent, bulletindent'), ('firstLineIndent', 'findent, firstlineindent'), ('fontName', 'face, fontname, font'), ('fontSize', 'size, fontsize'), ('leading', 'leading'), ('leftIndent', 'leftindent, lindent'), ('rightIndent', 'rightindent, rindent'), ('spaceAfter', 'spaceafter, spacea'), ('spaceBefore', 'spacebefore, spaceb'), ('textColor', 'fg, textcolor, color')],)) | t = Table([ ('Attribute', 'Synonyms'), ('alignment', 'align, alignment'), ('bulletColor', 'bulletcolor, bcolor'), ('bulletFontName', 'bfont, bulletfontname'), ('bulletFontSize', 'bfontsize, bulletfontsize'), ('bulletIndent', 'bindent, bulletindent'), ('firstLineIndent', 'findent, firstlineindent'), ('fontName', 'face, fontname, font'), ('fontSize', 'size, fontsize'), ('leading', 'leading'), ('leftIndent', 'leftindent, lindent'), ('rightIndent', 'rightindent, rindent'), ('spaceAfter', 'spaceafter, spacea'), ('spaceBefore', 'spacebefore, spaceb'), ('textColor', 'fg, textcolor, color')]) | def test(): from reportlab.lib.units import inch rowheights = (24, 16, 16, 16, 16) rowheights2 = (24, 16, 16, 16, 30) colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32) data = ( ('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89), ('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119), ('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13), ('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843') ) data2 = ( ('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89), ('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119), ('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13), ('Hats\nLarge', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843') ) styleSheet = getSampleStyleSheet() lst = [] lst.append(Paragraph("Tables", styleSheet['Heading1'])) lst.append(Paragraph(__doc__, styleSheet['BodyText'])) lst.append(Paragraph("The Tables (shown in different styles below) were created using the following code:", styleSheet['BodyText'])) lst.append(Preformatted(""" colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32) rowheights = (24, 16, 16, 16, 16) data = ( ('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89), ('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119), ('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13), ('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843') ) t = Table(data, colwidths, rowheights) """, styleSheet['Code'], dedent=4)) lst.append(Paragraph(""" You can then give the Table a TableStyle object to control its format. The first TableStyle used was created as follows: """, styleSheet['BodyText'])) lst.append(Preformatted(""" |
if W>maxWidth: | if W>maxWidth+_FUZZ: | def func(x): W, H = _listWrapOn(self._content,x*availWidth,self.canv) W /= x H /= x return W, H |
if H<=maxHeight: | if H<=maxHeight-_FUZZ: | def func(x): W, H = _listWrapOn(self._content,x*availWidth,self.canv) W /= x H /= x return W, H |
import glob | def bruteForceSearchForAFM(faceName): """Looks in all AFM files on path for face with given name. Returns AFM file name or None. Ouch!""" import glob from reportlab.rl_config import T1SearchPath for dirname in T1SearchPath: if not rl_isdir(dirname): continue possibles = glob.glob(dirname + os.sep + '*.[aA][fF][mM]') for possible in possibles: (topDict, glyphDict) = parseAFMFile(possible) if topDict['FontName'] == faceName: return possible return None |
|
possibles = glob.glob(dirname + os.sep + '*.[aA][fF][mM]') | possibles = rl_glob(dirname + os.sep + '*.[aA][fF][mM]') | def bruteForceSearchForAFM(faceName): """Looks in all AFM files on path for face with given name. Returns AFM file name or None. Ouch!""" import glob from reportlab.rl_config import T1SearchPath for dirname in T1SearchPath: if not rl_isdir(dirname): continue possibles = glob.glob(dirname + os.sep + '*.[aA][fF][mM]') for possible in possibles: (topDict, glyphDict) = parseAFMFile(possible) if topDict['FontName'] == faceName: return possible return None |
g.add(String(x+self.boxWidth/2.,(self.boxHeight-ascent)/2.), | g.add(String(x+self.boxWidth/2.,(self.boxHeight-ascent)/2., | def draw(self): g = Group() ys = self.bottomPadding+(self.triangleHeight/2)+self.sourceLabelOffset+self.sourceLabelFontSize if self.background: x,y = self._getDrawingDimensions() g.add(Rect(-self.leftPadding,-ys,x,y, strokeColor=None, strokeWidth=0, fillColor=self.background)) |
fontSize = self.labelFontSize) | fontSize = self.labelFontSize)) | def draw(self): g = Group() ys = self.bottomPadding+(self.triangleHeight/2)+self.sourceLabelOffset+self.sourceLabelFontSize if self.background: x,y = self._getDrawingDimensions() g.add(Rect(-self.leftPadding,-ys,x,y, strokeColor=None, strokeWidth=0, fillColor=self.background)) |
print 'Error: %s '+ cmdname or cmd | print 'Error: '+ cmdname or cmd | def do_exec(cmd, cmdname=None): i=os.popen(cmd,'r') print i.read() i = i.close() if i is not None: if cmdname is not None: print 'Error: %s '+ cmdname or cmd sys.exit(1) |
_fonts[font.fontName] = font | fontName = str(font.fontName) _fonts[fontName] = font | def registerFont(font): "Registers a font, including setting up info for accelerated stringWidth" #assert isinstance(font, Font), 'Not a Font: %s' % font _fonts[font.fontName] = font if not font._multiByte: if _stringWidth: _rl_accel.setFontInfo(string.lower(font.fontName), _dummyEncoding, font.face.ascent, font.face.descent, font.widths) |
_rl_accel.setFontInfo(string.lower(font.fontName), | _rl_accel.setFontInfo(string.lower(fontName), | def registerFont(font): "Registers a font, including setting up info for accelerated stringWidth" #assert isinstance(font, Font), 'Not a Font: %s' % font _fonts[font.fontName] = font if not font._multiByte: if _stringWidth: _rl_accel.setFontInfo(string.lower(font.fontName), _dummyEncoding, font.face.ascent, font.face.descent, font.widths) |
text = self.labels[i] if text: si = self.strands[i] labelRadius = si.labelRadius ex = centerx + labelRadius*car ey = centery + labelRadius*sar L = Label() L.setText(text) L.x = ex L.y = ey L.boxAnchor = _findNearestAngleValue(angle*180/pi,_ANGLE2ANCHOR) L.fontName = si.fontName L.fontSize = si.fontSize L.fillColor = si.fontColor L.textAnchor = 'boxauto' spokes.append(L) | if labels: text = labels[i] if text: si = self.strands[i] labelRadius = si.labelRadius ex = centerx + labelRadius*car ey = centery + labelRadius*sar L = Label() L.setText(text) L.x = ex L.y = ey L.boxAnchor = _findNearestAngleValue(angle*180/pi,_ANGLE2ANCHOR) L.fontName = si.fontName L.fontSize = si.fontSize L.fillColor = si.fontColor L.textAnchor = 'boxauto' spokes.append(L) | def draw(self): # normalize slice data g = self.makeBackground() or Group() |
p = [self.CX(i,0),self.CY(i,0), self.CX(i,1),self.CY(i,1), self.OX(i,angle,1),self.OY(i,angle,1), self.OX(i,angle,0),self.OY(i,angle,0)] L.append((rd,Polygon(p, strokeColor=strokeColor, fillColor=fillColor,strokeWidth=strokeWidth,strokeLineJoin=1))) | if abs(angle-_270r)>1e-6: p = [self.CX(i,0),self.CY(i,0), self.CX(i,1),self.CY(i,1), self.OX(i,angle,1),self.OY(i,angle,1), self.OX(i,angle,0),self.OY(i,angle,0)] L.append((rd,Polygon(p, strokeColor=strokeColor, fillColor=fillColor,strokeWidth=strokeWidth,strokeLineJoin=1))) | def _fillSide(self,L,i,angle,strokeColor,strokeWidth,fillColor): rd = self.rad_dist(angle) if rd<self.rad_dist(self._sl3d[i].mid): p = [self.CX(i,0),self.CY(i,0), self.CX(i,1),self.CY(i,1), self.OX(i,angle,1),self.OY(i,angle,1), self.OX(i,angle,0),self.OY(i,angle,0)] L.append((rd,Polygon(p, strokeColor=strokeColor, fillColor=fillColor,strokeWidth=strokeWidth,strokeLineJoin=1))) |
stripe = Rect(x, self.y, self.delta, h) | if x + self.delta > self.x + w: w1 = self.x + w - x else: w1 = self.delta stripe = Rect(x, self.y, w1, h) | def makeInnerTiles(self): # inner grid lines group = Group() |
rmStripe = Rect(self.x + w - self.delta0, self.y, self.delta0, h) | rmStripe = Rect(x, self.y, self.x + w - x, h) | def makeInnerTiles(self): # inner grid lines group = Group() |
stripe = Rect(self.x, y, w, self.delta) | if y + self.delta > self.y + w: h1 = self.y + w - y else: h1 = self.delta stripe = Rect(self.x, y, w, h1) | def makeInnerTiles(self): # inner grid lines group = Group() |
umStripe = Rect(self.x, self.y + w - self.delta0, w, self.delta0) | umStripe = Rect(self.x, self.y + w - self.delta0, w, self.y + h - y) | def makeInnerTiles(self): # inner grid lines group = Group() |
D = Drawing(450,650) for row in range(5): y = 530 - row*120 | D = Drawing(450, 650) d = 80 s = 60 for row in range(10): y = 530 - row*d | def test(): D = Drawing(450,650) for row in range(5): y = 530 - row*120 if row == 0: for col in range(3): x = 20 + col*120 g = Grid0() g.x = x g.y = y g.useRects = 0 g.useLines = 1 if col == 0: pass elif col == 1: g.delta0 = 10 elif col == 2: g.orientation = 'horizontal' g.demo() D.add(g) elif row == 1: for col in range(3): x = 20 + col*120 g = Grid0() g.y = y g.x = x if col == 0: pass elif col == 1: g.delta0 = 10 elif col == 2: g.orientation = 'horizontal' g.demo() D.add(g) elif row == 2: for col in range(3): x = 20 + col*120 g = Grid0() g.x = x g.y = y g.useLines = 1 g.useRects = 1 if col == 0: pass elif col == 1: g.delta0 = 10 elif col == 2: g.orientation = 'horizontal' g.demo() D.add(g) elif row == 3: for col in range(3): x = 20 + col*120 sr = ShadedRect0() sr.x = x sr.y = y sr.fillColorStart = colors.Color(0, 0, 0) sr.fillColorEnd = colors.Color(1, 1, 1) if col == 0: sr.numShades = 5 elif col == 1: sr.numShades = 2 elif col == 2: sr.numShades = 1 sr.demo() D.add(sr) elif row == 4: for col in range(3): x = 20 + col*120 sr = ShadedRect0() sr.x = x sr.y = y sr.fillColorStart = colors.red sr.fillColorEnd = colors.blue sr.orientation = 'horizontal' if col == 0: sr.numShades = 10 elif col == 1: sr.numShades = 20 elif col == 2: sr.numShades = 50 sr.demo() D.add(sr) renderPDF.drawToFile(D, 'grids.pdf', 'grids.py') print 'wrote file: grids.pdf' |
x = 20 + col*120 | x = 20 + col*d | def test(): D = Drawing(450,650) for row in range(5): y = 530 - row*120 if row == 0: for col in range(3): x = 20 + col*120 g = Grid0() g.x = x g.y = y g.useRects = 0 g.useLines = 1 if col == 0: pass elif col == 1: g.delta0 = 10 elif col == 2: g.orientation = 'horizontal' g.demo() D.add(g) elif row == 1: for col in range(3): x = 20 + col*120 g = Grid0() g.y = y g.x = x if col == 0: pass elif col == 1: g.delta0 = 10 elif col == 2: g.orientation = 'horizontal' g.demo() D.add(g) elif row == 2: for col in range(3): x = 20 + col*120 g = Grid0() g.x = x g.y = y g.useLines = 1 g.useRects = 1 if col == 0: pass elif col == 1: g.delta0 = 10 elif col == 2: g.orientation = 'horizontal' g.demo() D.add(g) elif row == 3: for col in range(3): x = 20 + col*120 sr = ShadedRect0() sr.x = x sr.y = y sr.fillColorStart = colors.Color(0, 0, 0) sr.fillColorEnd = colors.Color(1, 1, 1) if col == 0: sr.numShades = 5 elif col == 1: sr.numShades = 2 elif col == 2: sr.numShades = 1 sr.demo() D.add(sr) elif row == 4: for col in range(3): x = 20 + col*120 sr = ShadedRect0() sr.x = x sr.y = y sr.fillColorStart = colors.red sr.fillColorEnd = colors.blue sr.orientation = 'horizontal' if col == 0: sr.numShades = 10 elif col == 1: sr.numShades = 20 elif col == 2: sr.numShades = 50 sr.demo() D.add(sr) renderPDF.drawToFile(D, 'grids.pdf', 'grids.py') print 'wrote file: grids.pdf' |
canv = self.canv | canv = getattr(self,'canv',None) | def _listCellGeom(self, V,w,s,W=None,H=None): aW = w-s.leftPadding-s.rightPadding t = 0 w = 0 canv = self.canv for v in V: vw, vh = v.wrapOn(canv,aW, 72000) if W is not None: W.append(vw) if H is not None: H.append(vh) w = max(w,vw) t = t + vh + v.getSpaceBefore()+v.getSpaceAfter() return w, t - V[0].getSpaceBefore()-V[-1].getSpaceAfter() |
path = [baseDir] | if type(baseDir) not in SeqTypes path = [baseDir] else: path = list(baseDir) path = filter(None,basDir) | def recursiveImport(modulename, baseDir=None, noCWD=0, debug=0): """Dynamically imports possible packagized module, or raises ImportError""" import imp parts = string.split(modulename, '.') name = parts[0] #this hosed my brain..redo it slowly (AR). #path = list(baseDir and (type(baseDir) not in SeqTypes and [baseDir] or filter(None,baseDir)) or None) if baseDir is None: path = sys.path[:] else: path = [baseDir] if noCWD: if '.' in path: path.remove('.') abspath = os.path.abspath('.') if abspath in path: path.remove(abspath) else: if '.' not in path: path.insert(0,'.') if debug: import pprint pp = pprint.pprint print 'path=',pp(path) #make import errors a bit more informative fullName = name try: (file, pathname, description) = imp.find_module(name, path) childModule = parentModule = imp.load_module(name, file, pathname, description) if debug: print 'imported module = %s' % parentModule for name in parts[1:]: fullName = fullName + '.' + name if debug: print 'trying part %s' % name (file, pathname, description) = imp.find_module(name, [os.path.dirname(parentModule.__file__)]) childModule = imp.load_module(fullName, file, pathname, description) if debug: print 'imported module = %s' % childModule setattr(parentModule, name, childModule) parentModule = childModule except ImportError: msg = "cannot import '%s' while attempting recursive import of '%s'" % (fullName, modulename) if baseDir: msg = msg + " under paths '%s'" % `path` raise ImportError, msg return childModule |
datafile=qpath+job[:-3]+"sff" | fileext="sff" if os.path.splitext(datafilename)[1].lower()==".cff": fileext="cff" datafile=qpath+job[:-3]+fileext | def removejob(user,jobid,cslist): if (checkconfig() == -1) or (checkfaxuser(user,1) == 0): raise CSConfigError if not listtypes.has_key(cslist) or CheckJobID(jobid)==-1: raise -1 qpath=BuildListPath(cslist,user) job ="" if listtypes[cslist][1]==1: job=user+"-" job=job+listtypes[cslist][0]+"-"+jobid+".txt" #job=prefix+"-"+jobid+".txt" if (not os.access(qpath+job,os.W_OK)): print '<p><b> Job file "%s" (ID:%s) is not valid job to remove (List:%s)</b></p>' % (job,jobid,cslist) return -1 control=cs_helpers.readConfig(qpath+job) # in capisuite 0.4.3, the filename options in failed and done store the original file path # (e.g. /var/spool/capisuite/users/me/senq/fax-12.sff). if cslist!="faxdone" and cslist!="faxfailed": datafile=control.get("GLOBAL","filename") else: datafile=qpath+job[:-3]+"sff" if not datafile: return -1 try: lockfile=open(qpath+job[:-3]+"lock","w") # lock so that it isn't deleted while sending (or else) fcntl.lockf(lockfile,fcntl.LOCK_EX | fcntl.LOCK_NB) os.unlink(qpath+job) os.unlink(datafile) fcntl.lockf(lockfile,fcntl.LOCK_UN) os.unlink(qpath+job[:-3]+"lock") except IOError,err: if (err.errno in (errno.EACCES,errno.EAGAIN)): print "<p><b>Job is currently in transmission or in similar use. Can't remove.</b></p>" |
raise CSUserInputError("nvalid input (fax) file") | raise CSUserInputError("Invalid input (fax) file") | def sendfax(user,dialstring,sourcefile,cstarttime="",addressee="",subject="",useprefix=None): if (checkconfig == -1) or (checkfaxuser(user,1) == 0) or (not sourcefile): raise CSConfigError if not dialstring: raise CSUserInputError("empty dialstring") if ((cs_helpers.getOption(CAPI_config,user,"outgoing_MSN","")=="") and \ (CAPI_config.get(user,"fax_numbers","")=="")): raise CSGeneralError("Sorry, your are not allowed to send a fax") filetype = os.path.splitext(sourcefile)[1].lower()[1:] # splittext returns a list of 2, so no "None" check needed if not filetype: raise CSUserInputError("nvalid input (fax) file") # Convert to empty string, if set to "None" if addressee==None: addressee="" if subject==None: subject="" # filter out common separators from dialstring, check it dialstring=dialstring.translate(string.maketrans("",""),"-/ ()") if re.compile("[^0-9\+]+").search(dialstring): raise CSUserInputError("Invalid dialstring") prefix=cs_helpers.getOption(CAPI_config,user,"dial_prefix","") if (useprefix): dialstring=prefix+dialstring if (not os.access(sourcefile,os.R_OK)): raise CSInternalError("Cannot read fax source file:"+cgi.escape(sourcefile,1)) sendq = os.path.join(UsersFax_Path,user,"sendq")+"/" newname=cs_helpers.uniqueName(sendq,"fax",filetype) # --TODO--Error check!!!! shutil.copy(sourcefile,newname) if not cstarttime: cstarttime = time.ctime() cs_helpers.writeDescription(newname,"dialstring=\""+dialstring+"\"\n" +"starttime=\""+cstarttime+"\"\ntries=\"0\"\n" +"user=\""+user+"\"\naddressee=\""+addressee+"\"\nsubject=\"" +subject+"\"\n") os.chmod(newname,0600) os.chmod(newname[:-3]+"txt",0600) if (os.getuid()==0): user_entry=pwd.getpwnam(user) os.chown(newname,user_entry[2],user_entry[3]) os.chown(newname[:-3]+"txt",user_entry[2],user_entry[3]) #print "<p>",sourcefile,"successful enqueued as",newname,"for",dialstring,"</p>" |
self.SaveRankingData() self.LoadRegattaRanking() | pass | def UpdateRegattaRanking(self): self.SaveRankingData() self.LoadRegattaRanking() |
if str(value) == str(self.T_DetailRanking.item(s, 0).text()) and s != pos: return 0 | if self.T_DetailRanking.item(s, 0): newVal = self.T_DetailRanking.item(s, 0).text() if str(value) == str(newVal) and s != pos: return 0 | def ChechForSkipper(self, value, pos): for s in range(0, self.T_DetailRanking.rowCount()): if str(value) == str(self.T_DetailRanking.item(s, 0).text()) and s != pos: return 0 |
Ranks = {} | Ranks = {} risultati = datafile.options('result') if len(risultati) == 0: return | def LoadRegattaRanking(self): try: current_class = str(self.L_Regattas.currentItem().text()) if current_class[-1:] == '*': k = self.ShowQuestionDialog("Save data ?") if k == QtGui.QMessageBox.Yes: self.SaveRankingData(Regatta=current_class[:-1]) else: self.L_Regattas.currentItem().setText(current_class[:-1]) except: pass current_class = '' if self.T_DetailRanking.rowCount() > 1: self.ClearRankTable() for c in range(0, self.L_Regattas.count()): if self.L_Regattas.isItemSelected(self.L_Regattas.item(c)) == True: current_class = self.L_Regattas.item(c).text() break if current_class != '': File = self.inputpath+re.sub(' ','_',str(current_class))+".cla" Header = self.tr("Race") try: datafile = ConfigParser.ConfigParser() datafile.readfp(open(File)) except: return x = 0 TableHeader = ["Skipper"] points = {} Ranks = {} for sk in datafile.options('result'): _pt = datafile.get('result', sk) pt = _pt.split(',') tot = 0 Ranks[sk] = pt for p in pt: if p == 'DNF': tot = tot + self.DNF else: tot = tot + int(p) Ranks[sk].append(str(tot)) points[sk] = tot it = points.items() it = [(v, k) for (k, v) in it] it.sort() it = [(k, v) for (v, k) in it] for c in range(0,len(Ranks[sk])): self.T_DetailRanking.insertColumn(c+1) TableHeader.append("%s %d"%(Header,(c+1))) TableHeader.remove(TableHeader[len(TableHeader)-1]) TableHeader.append("Tot") self.T_DetailRanking.setHorizontalHeaderLabels(TableHeader) for sk_ in it: sk = sk_[0] self.T_DetailRanking.insertRow(self.T_DetailRanking.rowCount()) ski = QtGui.QTableWidgetItem(sk) self.T_DetailRanking.setItem(x,0,ski) c = 1 for p in Ranks[sk]: if p == '999': p = 'DNF' pt = QtGui.QTableWidgetItem(p) pt.setTextAlignment(QtCore.Qt.AlignCenter) self.T_DetailRanking.setItem(x,c,pt) c = c + 1 x = x + 1 self.T_DetailRanking.resizeColumnsToContents() |
for c in range(0,len(Ranks[sk])): | lenRanks = len(Ranks[sk]) for c in range(0,lenRanks): | def LoadRegattaRanking(self): try: current_class = str(self.L_Regattas.currentItem().text()) if current_class[-1:] == '*': k = self.ShowQuestionDialog("Save data ?") if k == QtGui.QMessageBox.Yes: self.SaveRankingData(Regatta=current_class[:-1]) else: self.L_Regattas.currentItem().setText(current_class[:-1]) except: pass current_class = '' if self.T_DetailRanking.rowCount() > 1: self.ClearRankTable() for c in range(0, self.L_Regattas.count()): if self.L_Regattas.isItemSelected(self.L_Regattas.item(c)) == True: current_class = self.L_Regattas.item(c).text() break if current_class != '': File = self.inputpath+re.sub(' ','_',str(current_class))+".cla" Header = self.tr("Race") try: datafile = ConfigParser.ConfigParser() datafile.readfp(open(File)) except: return x = 0 TableHeader = ["Skipper"] points = {} Ranks = {} for sk in datafile.options('result'): _pt = datafile.get('result', sk) pt = _pt.split(',') tot = 0 Ranks[sk] = pt for p in pt: if p == 'DNF': tot = tot + self.DNF else: tot = tot + int(p) Ranks[sk].append(str(tot)) points[sk] = tot it = points.items() it = [(v, k) for (k, v) in it] it.sort() it = [(k, v) for (v, k) in it] for c in range(0,len(Ranks[sk])): self.T_DetailRanking.insertColumn(c+1) TableHeader.append("%s %d"%(Header,(c+1))) TableHeader.remove(TableHeader[len(TableHeader)-1]) TableHeader.append("Tot") self.T_DetailRanking.setHorizontalHeaderLabels(TableHeader) for sk_ in it: sk = sk_[0] self.T_DetailRanking.insertRow(self.T_DetailRanking.rowCount()) ski = QtGui.QTableWidgetItem(sk) self.T_DetailRanking.setItem(x,0,ski) c = 1 for p in Ranks[sk]: if p == '999': p = 'DNF' pt = QtGui.QTableWidgetItem(p) pt.setTextAlignment(QtCore.Qt.AlignCenter) self.T_DetailRanking.setItem(x,c,pt) c = c + 1 x = x + 1 self.T_DetailRanking.resizeColumnsToContents() |
self.T_DetailRanking.insertColumn(self.T_DetailRanking.columnCount()-1) | def AddRace(self): self.T_DetailRanking.insertColumn(self.T_DetailRanking.columnCount()-1) cols = self.T_DetailRanking.columnCount() if cols == 2: self.T_DetailRanking.insertColumn(self.T_DetailRanking.columnCount()-1) cols += 1 TableHeader=[] for c in range(0, cols -1): TableHeader.append("%s %d"%("Race",(c))) TableHeader[0] = "Skipper" TableHeader.append("Tot") self.T_DetailRanking.setHorizontalHeaderLabels(TableHeader) self.T_DetailRanking.resizeColumnsToContents() |
|
def _element_content(self, element): """ Distill the textual content recursively. Added since I do not know how to use the lib for this. (Tuttle) """ out = "" for child in element.childNodes: if child.nodeType == ELEMENT_NODE: out += self._element_content(child) elif child.nodeType == xml.dom.minidom.Node.TEXT_NODE: out += child.data return out | def _element_content(self, element): """ Distill the textual content recursively. Added since I do not know how to use the lib for this. (Tuttle) """ |
|
print >> sys.stderr, 'Assuming rendered msgid in %s, not included:\n%s\n' % \ (self._curr_fn, element.toprettyxml(' ')) | print >> sys.stderr, 'Assuming rendered msgid in %s, not included:\n %s\n' % \ (self._curr_fn, element.toprettyxml(' ', '\n ')) | def _do_translate(self, element, domain): filename = self._curr_fn excerpt = self._make_excerpt(element) msgid = element.getAttribute('i18n:translate') |
msgid = self._element_content(element) print >> sys.stderr, 'Warning: Literal msgids should be avoided in %s, still adding:\n%s\n' % \ (self._curr_fn, element.toprettyxml(' ')) | msgid = self._make_msgstr(element, shrink = False) print >> sys.stderr, 'Warning: Literal msgids should be avoided in %s, still adding:\n %s\n' % \ (self._curr_fn, element.toprettyxml(' ', '\n ')) | def _do_translate(self, element, domain): filename = self._curr_fn excerpt = self._make_excerpt(element) msgid = element.getAttribute('i18n:translate') |
msgstr = self._make_msgstr(element) | msgstr = self._make_msgstr(element, shrink = True) | def _do_translate(self, element, domain): filename = self._curr_fn excerpt = self._make_excerpt(element) msgid = element.getAttribute('i18n:translate') |
print >> sys.stderr, 'Assuming rendered msgid in %s:\n%s\n' % \ | print >> sys.stderr, 'Assuming rendered msgid in %s, not included:\n%s\n' % \ | def _do_attributes(self, element, domain): rendered = [] if element.hasAttribute('tal:attributes'): attrs = element.getAttribute('tal:attributes').split(';') attrs = [attr.strip() for attr in attrs if attr.strip()] rendered = [attr.split()[0] for attr in attrs] |
def _make_msgstr(self, element): | def _make_msgstr(self, element, shrink = True): | def _make_msgstr(self, element): node = copy.deepcopy(element) self._make_pretty(node) msgstr = '' for child in node.childNodes: chunk = child.toxml() # XXX Do we need to escape anything else? chunk = chunk.replace('"', '\\"') chunk = ' '.join(chunk.split()) msgstr += chunk + ' ' |
chunk = ' '.join(chunk.split()) msgstr += chunk + ' ' | if shrink: chunk = ' '.join(chunk.split()) + ' ' msgstr += chunk | def _make_msgstr(self, element): node = copy.deepcopy(element) self._make_pretty(node) msgstr = '' for child in node.childNodes: chunk = child.toxml() # XXX Do we need to escape anything else? chunk = chunk.replace('"', '\\"') chunk = ' '.join(chunk.split()) msgstr += chunk + ' ' |
msgstr = msgstr.strip() | lines = msgstr.split("\n") for i in range(len(lines)): lines[i] = ' ' + lines[i].strip() msgstr = ''.join(lines).strip() | def read(self): """Reads in from all given ZPTs and builds up MessageCatalogs accordingly. |
if msgid: | else: | def _do_translate(self, element, domain): filename = self._curr_fn excerpt = self._make_excerpt(element) msgid = element.getAttribute('i18n:translate') |
attrs['i18n:attributes'].split(';')] | attrs['i18n:attributes'].split(';') if i18nattr] | def _valid_i18ned_attr(attr, attrs): """This returns 1 for attributes attr that are part of attrs and are translated using i18n:attributes. It also returns 1 for any attr that does not exist at all in attrs.""" if attrs.has_key(attr) and _translatable(attrs[attr]): if attrs.has_key('i18n:attributes'): if attrs['i18n:attributes'].find(';') == -1: # old syntax i18nattrs = [i18nattr.strip() for i18nattr in \ attrs['i18n:attributes'].split()] else: # new syntax i18nattrs = [i18nattr.strip().split()[0] for i18nattr in \ attrs['i18n:attributes'].split(';')] if not (attr in i18nattrs): return 0 else: return 1 else: return 0 return 1 |
for key in data_dict.keys(): | for key in data_dict: | def merge(self,__loc_data__=None,__conflict_solve=None,**kw): """S.merge(data,conflict,k=v1,k=v2,...) -> merge data and k=v into S. |
origins.append([sx[i]/dar[0], center[1], center[2]]) | origins.append([sx[i], center[1], center[2]]) | def _add_slices(self, item, sgrid, contours=False): cvector = item.get('cvector') center = sgrid.GetCenter() dar = self._axis.get('daspect') sx, sy, sz = item.get('slices') if len(shape(sx)) == 2 and shape(sx) == shape(sy) == shape(sz): s = Surface(sx,sy,sz) sgrid2 = self._get_2d_structured_grid(s) plane = vtk.vtkStructuredGridGeometryFilter() plane.SetInput(sgrid2) plane.Update() data = self._cut_data(plane) implds = vtk.vtkImplicitDataSet() implds.SetDataSet(data.GetOutput()) implds.Modified() cut = vtk.vtkCutter() cut.SetInput(sgrid) cut.SetCutFunction(implds) cut.GenerateValues(10, -2,2) cut.GenerateCutScalarsOn() cut.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInput(cut.GetOutput()) mapper.SetLookupTable(self._axis._vtk_colormap) caxis = self._axis.get('caxis') if None in caxis: caxis = data.GetOutput().GetScalarRange() mapper.SetScalarRange(caxis) mapper.Update() actor = vtk.vtkActor() actor.SetMapper(mapper) self._set_shading(item, data, actor) self._set_actor_properties(item, actor) self._axis._renderer.AddActor(actor) self._axis._vtk_apd.AddInput(cut.GetOutput()) self._axis._vtk_apd.AddInput(data.GetOutput()) else: origins = [] normals = [] sx = ravel(sx)/dar[0] sy = ravel(sy)/dar[1] sz = ravel(sz)/dar[2] for i in range(len(sx)): normals.append([1,0,0]) origins.append([sx[i]/dar[0], center[1], center[2]]) for i in range(len(sy)): normals.append([0,1,0]) origins.append([center[0], sy[i]/dar[1], center[2]]) for i in range(len(sz)): normals.append([0,0,1]) origins.append([center[0], center[1], sz[i]/dar[2]]) for i in range(len(normals)): plane = vtk.vtkPlane() plane.SetOrigin(origins[i]) plane.SetNormal(normals[i]) cut = vtk.vtkCutter() cut.SetInput(sgrid) cut.SetCutFunction(plane) cut.Update() data = self._cut_data(cut) mapper = vtk.vtkPolyDataMapper() if contours: iso = vtk.vtkContourFilter() iso.SetInput(data.GetOutput()) if cvector is not None: for i in range(len(cvector)): iso.SetValue(i, cvector[i]) else: zmin, zmax = data.GetOutput().GetScalarRange() iso.GenerateValues(item.get('clevels'), zmin, zmax) iso.Update() mapper.SetInput(iso.GetOutput()) else: mapper.SetInput(data.GetOutput()) mapper.SetLookupTable(self._axis._vtk_colormap) caxis = self._axis.get('caxis') if None in caxis: caxis = sgrid.GetScalarRange() mapper.SetScalarRange(caxis) mapper.Update() actor = vtk.vtkActor() actor.SetMapper(mapper) if not contours: self._set_shading(item, data, actor) self._set_actor_properties(item, actor) self._axis._renderer.AddActor(actor) self._axis._vtk_apd.AddInput(cut.GetOutput()) |
origins.append([center[0], sy[i]/dar[1], center[2]]) | origins.append([center[0], sy[i], center[2]]) | def _add_slices(self, item, sgrid, contours=False): cvector = item.get('cvector') center = sgrid.GetCenter() dar = self._axis.get('daspect') sx, sy, sz = item.get('slices') if len(shape(sx)) == 2 and shape(sx) == shape(sy) == shape(sz): s = Surface(sx,sy,sz) sgrid2 = self._get_2d_structured_grid(s) plane = vtk.vtkStructuredGridGeometryFilter() plane.SetInput(sgrid2) plane.Update() data = self._cut_data(plane) implds = vtk.vtkImplicitDataSet() implds.SetDataSet(data.GetOutput()) implds.Modified() cut = vtk.vtkCutter() cut.SetInput(sgrid) cut.SetCutFunction(implds) cut.GenerateValues(10, -2,2) cut.GenerateCutScalarsOn() cut.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInput(cut.GetOutput()) mapper.SetLookupTable(self._axis._vtk_colormap) caxis = self._axis.get('caxis') if None in caxis: caxis = data.GetOutput().GetScalarRange() mapper.SetScalarRange(caxis) mapper.Update() actor = vtk.vtkActor() actor.SetMapper(mapper) self._set_shading(item, data, actor) self._set_actor_properties(item, actor) self._axis._renderer.AddActor(actor) self._axis._vtk_apd.AddInput(cut.GetOutput()) self._axis._vtk_apd.AddInput(data.GetOutput()) else: origins = [] normals = [] sx = ravel(sx)/dar[0] sy = ravel(sy)/dar[1] sz = ravel(sz)/dar[2] for i in range(len(sx)): normals.append([1,0,0]) origins.append([sx[i]/dar[0], center[1], center[2]]) for i in range(len(sy)): normals.append([0,1,0]) origins.append([center[0], sy[i]/dar[1], center[2]]) for i in range(len(sz)): normals.append([0,0,1]) origins.append([center[0], center[1], sz[i]/dar[2]]) for i in range(len(normals)): plane = vtk.vtkPlane() plane.SetOrigin(origins[i]) plane.SetNormal(normals[i]) cut = vtk.vtkCutter() cut.SetInput(sgrid) cut.SetCutFunction(plane) cut.Update() data = self._cut_data(cut) mapper = vtk.vtkPolyDataMapper() if contours: iso = vtk.vtkContourFilter() iso.SetInput(data.GetOutput()) if cvector is not None: for i in range(len(cvector)): iso.SetValue(i, cvector[i]) else: zmin, zmax = data.GetOutput().GetScalarRange() iso.GenerateValues(item.get('clevels'), zmin, zmax) iso.Update() mapper.SetInput(iso.GetOutput()) else: mapper.SetInput(data.GetOutput()) mapper.SetLookupTable(self._axis._vtk_colormap) caxis = self._axis.get('caxis') if None in caxis: caxis = sgrid.GetScalarRange() mapper.SetScalarRange(caxis) mapper.Update() actor = vtk.vtkActor() actor.SetMapper(mapper) if not contours: self._set_shading(item, data, actor) self._set_actor_properties(item, actor) self._axis._renderer.AddActor(actor) self._axis._vtk_apd.AddInput(cut.GetOutput()) |
origins.append([center[0], center[1], sz[i]/dar[2]]) | origins.append([center[0], center[1], sz[i]]) | def _add_slices(self, item, sgrid, contours=False): cvector = item.get('cvector') center = sgrid.GetCenter() dar = self._axis.get('daspect') sx, sy, sz = item.get('slices') if len(shape(sx)) == 2 and shape(sx) == shape(sy) == shape(sz): s = Surface(sx,sy,sz) sgrid2 = self._get_2d_structured_grid(s) plane = vtk.vtkStructuredGridGeometryFilter() plane.SetInput(sgrid2) plane.Update() data = self._cut_data(plane) implds = vtk.vtkImplicitDataSet() implds.SetDataSet(data.GetOutput()) implds.Modified() cut = vtk.vtkCutter() cut.SetInput(sgrid) cut.SetCutFunction(implds) cut.GenerateValues(10, -2,2) cut.GenerateCutScalarsOn() cut.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInput(cut.GetOutput()) mapper.SetLookupTable(self._axis._vtk_colormap) caxis = self._axis.get('caxis') if None in caxis: caxis = data.GetOutput().GetScalarRange() mapper.SetScalarRange(caxis) mapper.Update() actor = vtk.vtkActor() actor.SetMapper(mapper) self._set_shading(item, data, actor) self._set_actor_properties(item, actor) self._axis._renderer.AddActor(actor) self._axis._vtk_apd.AddInput(cut.GetOutput()) self._axis._vtk_apd.AddInput(data.GetOutput()) else: origins = [] normals = [] sx = ravel(sx)/dar[0] sy = ravel(sy)/dar[1] sz = ravel(sz)/dar[2] for i in range(len(sx)): normals.append([1,0,0]) origins.append([sx[i]/dar[0], center[1], center[2]]) for i in range(len(sy)): normals.append([0,1,0]) origins.append([center[0], sy[i]/dar[1], center[2]]) for i in range(len(sz)): normals.append([0,0,1]) origins.append([center[0], center[1], sz[i]/dar[2]]) for i in range(len(normals)): plane = vtk.vtkPlane() plane.SetOrigin(origins[i]) plane.SetNormal(normals[i]) cut = vtk.vtkCutter() cut.SetInput(sgrid) cut.SetCutFunction(plane) cut.Update() data = self._cut_data(cut) mapper = vtk.vtkPolyDataMapper() if contours: iso = vtk.vtkContourFilter() iso.SetInput(data.GetOutput()) if cvector is not None: for i in range(len(cvector)): iso.SetValue(i, cvector[i]) else: zmin, zmax = data.GetOutput().GetScalarRange() iso.GenerateValues(item.get('clevels'), zmin, zmax) iso.Update() mapper.SetInput(iso.GetOutput()) else: mapper.SetInput(data.GetOutput()) mapper.SetLookupTable(self._axis._vtk_colormap) caxis = self._axis.get('caxis') if None in caxis: caxis = sgrid.GetScalarRange() mapper.SetScalarRange(caxis) mapper.Update() actor = vtk.vtkActor() actor.SetMapper(mapper) if not contours: self._set_shading(item, data, actor) self._set_actor_properties(item, actor) self._axis._renderer.AddActor(actor) self._axis._vtk_apd.AddInput(cut.GetOutput()) |
assert shape == cdata.shape, \ | assert v.shape == cdata.shape, \ | def _parseargs_isosurface(self, *args): nargs = len(args) if nargs >= 5 and nargs <= 6: # isosurface(X,Y,Z,V,isovalue) x, y, z, v = _check_xyzv(*args[:4]) isovalue = float(args[4]) elif nargs >= 2 and nargs <= 3: # isosurface(V,isovalue) x, y, z, v = _check_xyzv(args[0]) isovalue = float(args[1]) else: raise TypeError, "Wrong number of arguments" |
legendcounter=len(ax.get('plotitems')) | def plot(self, *args, **kwargs): """Draw line and scatter plots. |
|
legendcounter = len(ax.get('plotitems'))-legendcounter | no_lines = len(lines) | def plot(self, *args, **kwargs): """Draw line and scatter plots. |
if len(legends) == legendcounter: for i in range(legendcounter): legend = legends[legendcounter-i-1] | if len(legends) == no_lines: for i in range(no_lines): legend = legends[no_lines-i-1] | def plot(self, *args, **kwargs): """Draw line and scatter plots. |
(len(legends), legendcounter) | (len(legends), no_lines) | def plot(self, *args, **kwargs): """Draw line and scatter plots. |
for arg in ['--Numeric', '--numarray', ' --numpy']: | for arg in ['--Numeric', '--numarray', '--numpy']: try: __import__(arg[2:]) except: print "You don't have %s installed" %arg[2:] continue | def testMeshGrid_DenseFromNodenseMeshgridOutput(self): # sparse fails for dense output when input has singleton dimensions x = seq(-2,2,0.1) y = seq(-4,4,1) xx, yy = meshgrid(x,y) # xx and yy now has singleton dimension self.assertEqual(rank(xx), 2) self.assertEqual(rank(yy), 2) self.assertEqual(multiply.reduce(xx.shape), size(xx)) self.assertEqual(multiply.reduce(yy.shape), size(yy)) # This one should fail when xx and yy is not flat as well xx, yy = meshgrid(xx.flat, yy.flat, sparse=False) # no singleton self.assertEqual(shape(xx), (size(y), size(x))) self.assertEqual(shape(yy), (size(y), size(x))) xx, yy = meshgrid(x,y) # Add singleton dimensions xx, yy = meshgrid(xx, yy, sparse=False) self.assertEqual(shape(xx), (size(y), size(x))) self.assertEqual(shape(yy), (size(y), size(x))) |
from easyviz.blt_ import * | from scitools.easyviz.blt_ import * | def test1(self): try: if 'pyx_' in _plts: use(_plts['pyx_'], globals()) figure() plot((2, 2, 3, 4), 'kv-', (3, 3, 4, 6), 'kv', x='auto') axis(-1, 5, -1, 10) title('A very long title') legend('line 1', 'line 2') #latextext = r"$2\pi\gamma k\Omega$" #$plt._g.text(0, 0, latextext) hold('on') plot((5, 3, 5, 5)) legend('line 3') hardcopy('testpyx.eps') except: print 'pyx_ error: \nDebugging plt' #debug(plt) |
from easyviz.matplotlib_ import plt | from scitools.easyviz.matplotlib_ import plt | def test(self): self.setUp() try: figure() plot((1,2,3), (4,6,5), 'ro--') legend('tril') axis(0, 5, 0, 10) hold('on') plot((2,4,5), (4,4,4), '') legend('notrill', 'trill2') plot((1,2,3,4,5,6), (1,3,2,4,3,6), 'k:x') legend('nils') import Numeric x=Numeric.array((1,2,3,4)) plot(x, x**2, 'y') axis(0, 10, 0, 10) hardcopy('blt_test1.ps') figure() plot((1,2,3), 'r:', (2,3,4), 'b-', (3,4,5), 'k-.',\ (4,6,6), 'g--')#, x='auto') legend('dotted', 'solid', 'dotdashed', 'dashed') hardcopy('blt_test2.ps') figure() plot((1,10,100), log='y') hold('on') loglog((1,19,199), (10,100,1000)) legend('loglog', 'loglog') title('one scale') xlabel('x') ylabel('y') hardcopy('blt_test3.ps') except: print "Error: running debug(plt)" #debug(plt) |
from easyviz.examples import * | from scitools.easyviz.examples import * | def test(self): self.setUp() try: figure() plot((1,2,3), (4,6,5), 'ro--') legend('tril') axis(0, 5, 0, 10) hold('on') plot((2,4,5), (4,4,4), '') legend('notrill', 'trill2') plot((1,2,3,4,5,6), (1,3,2,4,3,6), 'k:x') legend('nils') import Numeric x=Numeric.array((1,2,3,4)) plot(x, x**2, 'y') axis(0, 10, 0, 10) hardcopy('blt_test1.ps') figure() plot((1,2,3), 'r:', (2,3,4), 'b-', (3,4,5), 'k-.',\ (4,6,6), 'g--')#, x='auto') legend('dotted', 'solid', 'dotdashed', 'dashed') hardcopy('blt_test2.ps') figure() plot((1,10,100), log='y') hold('on') loglog((1,19,199), (10,100,1000)) legend('loglog', 'loglog') title('one scale') xlabel('x') ylabel('y') hardcopy('blt_test3.ps') except: print "Error: running debug(plt)" #debug(plt) |
"python -c '''from easyviz.unittest_ import *;unittest.main()'''") | "python -c '''from scitools.easyviz.unittest_ import *;unittest.main()'''") | def main(): postscriptfiles = '' + \ " ".join(glob.glob('*.eps')) + \ " ".join(glob.glob('*.ps')) if len(postscriptfiles) > 0: print "Warning: you have old ps/eps files in testdir." print "These files might be overwritten by this test" print "The files are: ",postscriptfiles #rm *.ps *.eps *.pyc *~ -f #unittest.main() # There is a sys.exit() inside here.... if False: os.system( "python -c '''from easyviz.unittest_ import *;unittest.main()'''") suite = unittest.makeSuite(easyvizTest, 'test') suite = unittest.makeSuite(bltTest, 'test') suite = unittest.makeSuite(gnuplotTest, 'test') else: unittest.TextTestRunner(verbosity=2).run( unittest.makeSuite(easyvizTest)) if False: if os.uname()[0] == 'Linux': psviewer = 'gv' for file in glob.glob('*.ps'): os.system('%s %s &' %(psviewer, file)) for file in glob.glob('*.eps'): os.system('%s %s &' %(psviewer, file)) #raw_input('Press return when files are examined') else: show() |
self.set(**kwargs) | def __init__(self, *args, **kwargs): PlotProperties.__init__(self, **kwargs) self._prop.update(Streams._local_prop) self._parseargs(*args) self.set(**kwargs) |
|
for key in 'stepsize tubescale ribbonwith'.split(): | for key in 'stepsize tubescale ribbonwidth'.split(): | def set(self, **kwargs): PlotProperties.set(self, **kwargs) |
def system(command, verbose=True, failure_handling='exit') | def system(command, verbose=True, failure_handling='exit'): | def system(command, verbose=True, failure_handling='exit') """ Wrapping of the os.system command. Actually, the commands.getstatusoutput function is used, and the output from the system command is fetched. @param command: operating system command to be executed. @param verbose: False: no output, True: print command. @param failure_handling: one of 'exit', 'warning', 'exception', or 'silent'. In case of failure, the output from the command is always displayed. @return: the same as commands.getstatusoutput, i.e., a boolean failure variable and the output string (result of command). """ if verbose: print 'Running operating system command\n %s' % command failure, output = commands.getstatusoutput(command) if failure: msg = 'Failure when running operating system command'\ '\n %s\nOutput:\n%s' % (command, output) if failure_handling == 'exit': print msg, '\nExecution aborted!' sys.exit(1) if failure_handling == 'warning': print 'Warning:', msg elif failure_handling == 'exception': raise OSError, msg elif failure_handling == 'silent': pass else: raise ValueError, 'wrong value "%s" of failure_handling' % \ failure_handling return failure, output |
- plot(y1,...,x=x) | - plot(y1,y2,...,x=x) | def plot(self, *args, **kwargs): """Draw line and scatter plots. |
for i in range(len(args)-1): if not isinstance(args[i], str): if isinstance(args[i+1], str): lines.append(Line(x=kwargs['x'], y=args[i], format=args[1+i])) else: lines.append(Line(x=kwargs['x'], y=args[i], format='')) if i == nargs-2: | if nargs == 1 or (nargs == 2 and isinstance(args[1], str)): if nargs == 1: lines.append(Line(x=kwargs['x'], y=args[0], format='')) else: lines.append(Line(x=kwargs['x'], y=args[0], format=args[1])) else: for i in range(len(args)-1): if not isinstance(args[i], str): if isinstance(args[i+1], str): | def plot(self, *args, **kwargs): """Draw line and scatter plots. |
y=args[i+1], | y=args[i], format=args[1+i])) else: lines.append(Line(x=kwargs['x'], y=args[i], | def plot(self, *args, **kwargs): """Draw line and scatter plots. |
for i in range(len(args)-1): if not isinstance(args[i], str): if isinstance(args[i+1], str): lines.append(Line(x=kwargs['x'], y=kwargs['y'], z=args[i], format=args[1+i])) else: lines.append(Line(x=kwargs['x'], y=kwargs['y'], z=args[i], format='')) if i == nargs-2: | if nargs == 1 or (nargs == 2 and isinstance(args[1], str)): if nargs == 1: lines.append(Line(x=kwargs['x'], y=kwargs['y'], z=args[0], format='')) else: lines.append(Line(x=kwargs['x'], y=kwargs['y'], z=args[0], format=args[1])) else: for i in range(len(args)-1): if not isinstance(args[i], str): if isinstance(args[i+1], str): | def plot3(self, *args, **kwargs): """Draw lines and points in 3D space. |
z=args[i+1], | z=args[i], format=args[1+i])) else: lines.append(Line(x=kwargs['x'], y=kwargs['y'], z=args[i], | def plot3(self, *args, **kwargs): """Draw lines and points in 3D space. |
self._browser = WebBrowser() | self._browser = WebView() | def __init__(self, browser=None): Activity.__init__(self) |
s = re.sub('&[A-Za-z.-]*;', ' ', s) s = re.sub('<[a-zA-Z0-9 =\\\\":/.-_?]*>', ' ', s) s = re.sub('<[a-zA-Z0-9 =\\\\":/.-_?]*/>', ' ', s) s = re.sub('</[a-zA-Z0-9]*>', ' ', s) | s = re.sub('&[^;]*;', ' ', s) s = re.sub('<[^>]*>', ' ', s) s = re.sub('<[^>]*/>', ' ', s) s = re.sub('</[^>]*>', ' ', s) | def getCleanMsgstr(self): s = self.getMsgstr() space = ('\\n', '\\t') empty = ('&',) import re s = re.sub('&[A-Za-z.-]*;', ' ', s) s = re.sub('<[a-zA-Z0-9 =\\\\":/.-_?]*>', ' ', s) s = re.sub('<[a-zA-Z0-9 =\\\\":/.-_?]*/>', ' ', s) s = re.sub('</[a-zA-Z0-9]*>', ' ', s) for t in space: s = s.replace(t, ' ') for t in empty: s = s.replace(t, '') return s |
if x < lts - lwf + 1 and textToSearch[x + lwf] in string.letters: | if x < lts - lwf - 1 and textToSearch[x + lwf] in string.letters: | def searchWordInText(self, wordToFind, textToSearch, context, index): x = textToSearch.find(wordToFind, index) if x < 0: return (x, None) lwf = len(wordToFind) lts = len(textToSearch) if x > 0 and textToSearch[x - 1] in string.letters: return (x, None) if x < lts - lwf + 1 and textToSearch[x + lwf] in string.letters: return (x, None) xa, ea = x - context, '...' if xa < 0: xa, ea = 0, '' xb, eb = x + lwf + context, '...' if xb > lts: xb, eb = lts, '' return (x, ea + textToSearch[xa:xb] + eb) |
print "usage: %s connstring [-d dblib] outdir [propsfile]" % progname | print "usage: %s [-d dbmodule] connstring outdir [propsfile]" % progname | def usage_exit(progname, msg=None): if msg: print msg print print "usage: %s connstring [-d dblib] outdir [propsfile]" % progname sys.exit(2) |
nullable = (notnull != 't') | assert notnull in ('t', 'f', 1, 0), notnull nullable = (notnull not in ('t', 1)) | def _get_column_info(conn): Q = """SELECT c.relname, a.attname, t.typname, a.attlen, a.attnotnull, a.atthasdef, a.atttypmod FROM pg_class c, pg_attribute a, pg_type t WHERE c.relname !~ '^pg_' and c.relname !~ '^Inv' and c.relkind = 'r' and a.attnum > 0 and a.attrelid = c.oid and a.atttypid = t.oid""" tables = {} for table, attr, typ, length, notnull, hasdef, typmod in _query(conn, Q): t = tables.get(table, None) if not t: t = [] tables[table] = t nullable = (notnull != 't') hasdef = (hasdef == 't') if length == -1: length = typmod t.append((attr, typ, nullable, hasdef, length)) return tables |
self._index_items.append((table.name, "table", tablefilename)) | self._index_items.append((table.name, "table", "table-%s.html" % table.name)) | def _generate_table_pages(self): for table in self.tables: print "doing table", table.name tablefilename = os.path.join(self.outdir, "table-%s.html" % table.name) self._index_items.append((table.name, "table", tablefilename)) f = open(tablefilename, 'w') nav = '<a href="index.html">Table index</a> | <a href="symbol-index.html">Symbol index</a> | %s' % table.name f.write(self._standard_header(table.name, nav)) f.write('<h1>Table %s</h1>\n' % table.name) f.write('<hr noshade size=1>\n') shortdesc = self.descs.get('table.%s.shortdesc' % table.name, None) if shortdesc: f.write('<p>%s</p>\n' % shortdesc) notes = self.descs.get('table.%s.notes' % table.name, None) if notes: f.write('<h2>Notes</h2>\n') f.write(notes) # allows html f.write('<h2>Columns</h2>\n') f.write('<table border=1>\n<tr bgcolor="%s"><th>Column</th><th>Type</th><th>Nullable</th><th>Default</th><th>Description</th></tr>\n' % self.heading_bg_colour) for col in table.get_columns(): self._index_items.append((col.name, "column in table %s" % table.name, "table-%s.html#col-%s" % (table.name, col.name))) f.write('<tr>') pkey = (col.name == table.primary_key_name) if pkey: name_str = '<strong>%s</strong>' % col.name else: name_str = col.name if col.references is not None: other_table, other_col = col.references f.write('<td><a href="table-%s.html#col-%s">%s</a></td>' % (other_table, other_col, name_str)) else: f.write('<td>%s</td>' % name_str) f.write('<td>%s (%s)</td>' % (col.type, col.length)) f.write('<td>%s</td>' % (col.nullable and 'yes' or 'no')) f.write('<td>%s</td>' % (col.default_value)) col_desc = self.descs.get('table.%s.column.%s.shortdesc' % (table.name, col.name), " ") f.write('<td>%s</td>' % col_desc) f.write('</tr>\n') |
tgargs = string.split(tgargs, '\\000') | if string.find(tgargs, '\000') != -1: tgargs = string.split(tgargs, '\000') else: tgargs = string.split(tgargs, '\\000') if len(tgargs) != 7: raise RuntimeError, "error parsing trigger args for foreign key: %s" \ % repr(tgargs) | def _get_foreign_keys(conn): """Find foreign keys by looking at triggers. (Query adapted from query posted to pgsql-general by Michael Fork according to http://www.geocrawler.com/mail/msg.php3?msg_id=4895586&list=12) """ fkeys = {} for (tgargs,) in _query(conn, ''' SELECT pt.tgargs FROM pg_class pc, pg_proc pg_proc, pg_proc pg_proc_1, pg_trigger pg_trigger, pg_trigger pg_trigger_1, pg_proc pp, pg_trigger pt WHERE pt.tgrelid = pc.oid AND pp.oid = pt.tgfoid AND pg_trigger.tgconstrrelid = pc.oid AND pg_proc.oid = pg_trigger.tgfoid AND pg_trigger_1.tgfoid = pg_proc_1.oid AND pg_trigger_1.tgconstrrelid = pc.oid AND ((pp.proname LIKE '%ins') AND (pg_proc.proname LIKE '%upd') AND (pg_proc_1.proname LIKE '%del') AND (pg_trigger.tgrelid=pt.tgconstrrelid) AND (pg_trigger_1.tgrelid = pt.tgconstrrelid))'''): tgargs = string.split(tgargs, '\\000') (name, owner_table, referenced_table, unknown, column, referenced_table_pkey, blank) = tgargs t = fkeys.get(owner_table, None) if not t: t = {} fkeys[owner_table] = t t[column] = (referenced_table, referenced_table_pkey) return fkeys |
cols = string.replace(cols, ' ', ',') | def first(row): return row[0] |
|
decode(data_type, 'DATE', 11, | decode(data_type, 'DATE', '11', | def _get_column_info(conn): "Get a dictionary of (table, [list of column details]) tuples for all tables" # AJT 13.11.2001 - Note date is hard coded to '11'. Probably should special # case this in the documentation class to ignore the length of dates. stmt = """SELECT table_name, column_name, data_type, nullable, decode(default_length, NULL, 0, 1) hasdef, decode(data_type, 'DATE', 11, 'NUMBER', nvl(data_precision,38)||'.'||data_scale, data_length) data_length FROM user_tab_columns""" tables = {} for table, attr, typ, notnull, hasdef, length in _query(conn, stmt): t = tables.get(table, None) if not t: t = [] tables[table] = t # If notnull is not one of 'Y' or 'N' raise AssertionError assert notnull in ('Y', 'N'), notnull nullable = (notnull == 'Y') t.append((attr, typ, nullable, hasdef, length)) return tables |
for table, attr, typ, notnull, hasdef, length in _query(conn, stmt): | for table, attr, typ, nullable, hasdef, length in _query(conn, stmt): | def _get_column_info(conn): "Get a dictionary of (table, [list of column details]) tuples for all tables" # AJT 13.11.2001 - Note date is hard coded to '11'. Probably should special # case this in the documentation class to ignore the length of dates. stmt = """SELECT table_name, column_name, data_type, nullable, decode(default_length, NULL, 0, 1) hasdef, decode(data_type, 'DATE', 11, 'NUMBER', nvl(data_precision,38)||'.'||data_scale, data_length) data_length FROM user_tab_columns""" tables = {} for table, attr, typ, notnull, hasdef, length in _query(conn, stmt): t = tables.get(table, None) if not t: t = [] tables[table] = t # If notnull is not one of 'Y' or 'N' raise AssertionError assert notnull in ('Y', 'N'), notnull nullable = (notnull == 'Y') t.append((attr, typ, nullable, hasdef, length)) return tables |
assert notnull in ('Y', 'N'), notnull nullable = (notnull == 'Y') | assert nullable in ('Y', 'N'), nullable nullable = (nullable == 'Y') | def _get_column_info(conn): "Get a dictionary of (table, [list of column details]) tuples for all tables" # AJT 13.11.2001 - Note date is hard coded to '11'. Probably should special # case this in the documentation class to ignore the length of dates. stmt = """SELECT table_name, column_name, data_type, nullable, decode(default_length, NULL, 0, 1) hasdef, decode(data_type, 'DATE', 11, 'NUMBER', nvl(data_precision,38)||'.'||data_scale, data_length) data_length FROM user_tab_columns""" tables = {} for table, attr, typ, notnull, hasdef, length in _query(conn, stmt): t = tables.get(table, None) if not t: t = [] tables[table] = t # If notnull is not one of 'Y' or 'N' raise AssertionError assert notnull in ('Y', 'N'), notnull nullable = (notnull == 'Y') t.append((attr, typ, nullable, hasdef, length)) return tables |
if self.protocol not in [ '1910' ] : | if self.protocol not in [ '0014' ] : | def __init__( self , host = '127.0.0.1' , port = 5154 ) : |
data = struct.unpack( '>18H' , data ) | data = struct.unpack( '>21H' , data ) | def queryGame( self ) : |
redSize , greenSize , blueSize , purpleSize , rogueMax , \ redMax , greenMax , blueMax , purpleMax , shakeWins , \ shakeTimeout , maxPlayerScore , maxTeamScore , maxTime \ | redSize , greenSize , blueSize , purpleSize , obsSize, \ rogueMax , redMax , greenMax , blueMax , purpleMax , obsMax, \ shakeWins , shakeTimeout , maxPlayerScore , maxTeamScore , \ maxTime , elapsedTime \ | def queryGame( self ) : |
'rogue' : ( rogueSize , rogueMax ) , 'red' : ( redSize , redMax ) , 'green' : ( greenSize , greenMax ) , 'blue' : ( blueSize , blueMax ) , 'purple' : ( purpleSize , purpleMax ) , | 'rogue' : ( rogueSize , rogueMax ) , 'red' : ( redSize , redMax ) , 'green' : ( greenSize , greenMax ) , 'blue' : ( blueSize , blueMax ) , 'purple' : ( purpleSize , purpleMax ) , 'observer' : ( obsSize , obsMax ) , | def queryGame( self ) : |
'maxTime' : maxTime / 10. | 'maxTime' : maxTime / 10 , 'elapsedTime' : elapsedTime / 10 , | def queryGame( self ) : |
sys.stderr.write(filename + '\n') | def check_file(filename, lines): sys.stderr.write(filename + '\n') line_checks = [ check_tab_characters, check_long_lines ] token_checks = [ check_double_semicolons, check_missing_spaces_around, check_missing_spaces_after, check_extra_spaces_after, check_missing_spaces_before, check_extra_spaces_before, check_spaced_unary_pm, check_singlular_opening_braces, check_keyword_spacing, check_multistatements, check_oneliners, check_eol_operators, check_function_call_spaces, check_return_case_parentheses, check_boolean_comparisons, check_boolean_arguments ] # Check trailing spaces and then pre-rstrip lines, after tokenization # we can lstrip them too warnings = [] check_trailing_spaces(lines, warnings) lines = [l.rstrip() for l in lines] for check in line_checks: check(lines, warnings) tokens = tokenize(lines, warnings) find_matching_parentheses(tokens) lines = [l.lstrip() for l in lines] for check in token_checks: check(tokens, lines, warnings) warnings.sort() for w in warnings: print '%s:%d: %s' % (filename, w[0]+1, w[1]) |
|
line = re.sub(r'[bB]ug '<a href="' + bugzilla_url + '\\1">\\g<0></a>', line) | def format_date(d): # Don't depend on locale months = (None, 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December') d = time.strptime(d, '%Y-%m-%d') return '%s %d, %d' % (months[d.tm_mon], d.tm_mday, d.tm_year) |
|
re_enum = re.compile(r'^\s+(?P<ident>[A-Z][A-Z0-9_]+)\s*=', re.M) | re_enum = re.compile(r'^\s+(?P<ident>[A-Z][A-Z0-9_]+)\s*[=,]', re.M) | command -nargs=+ HiLink hi def link <args> |
def print_decls(decldict): | def print_decls(decldict, value): | def print_decls(decldict): for t, d in decldict.items(): d = d.keys() if not d: continue d.sort() print 'syn keyword %s%s %s' % (syntax_name, normalize(t), ' '.join(d)) |
d = d.keys() | d = [k for k, v in d.items() if v == value] | def print_decls(decldict): for t, d in decldict.items(): d = d.keys() if not d: continue d.sort() print 'syn keyword %s%s %s' % (syntax_name, normalize(t), ' '.join(d)) |
depdecls = dict([(x, {}) for x in types]) | deprecated_found = False identdefs = {} | def override(decldict, overides): for o, v in overides.items(): v = v.upper() has_it = False for k, d in decldict.items(): if d.has_key(o): has_it = True del d[o] if has_it: decldict[v][o] = 1 |
insert_to = depdecls | value = 'deprecated' deprecated_found = True | def override(decldict, overides): for o, v in overides.items(): v = v.upper() has_it = False for k, d in decldict.items(): if d.has_key(o): has_it = True del d[o] if has_it: decldict[v][o] = 1 |
insert_to = decls | value = None | def override(decldict, overides): for o, v in overides.items(): v = v.upper() has_it = False for k, d in decldict.items(): if d.has_key(o): has_it = True del d[o] if has_it: decldict[v][o] = 1 |
insert_to[d['type']][d['ident']] = 1 | m = re_ident_macro.search(d['body']) if m: identdefs[d['ident']] = m.group('ident') decls[d['type']][d['ident']] = value | def override(decldict, overides): for o, v in overides.items(): v = v.upper() has_it = False for k, d in decldict.items(): if d.has_key(o): has_it = True del d[o] if has_it: decldict[v][o] = 1 |
insert_to['CONSTANT'][e.group('ident')] = 1 | decls['CONSTANT'][e.group('ident')] = value for macro, body in identdefs.items(): for k, d in decls.items(): if not d.has_key(body): continue if k == 'FUNCTION' or k == 'MACRO': decls['MACRO'][macro] = decls['DEFINE'][macro] del decls['DEFINE'][macro] | def override(decldict, overides): for o, v in overides.items(): v = v.upper() has_it = False for k, d in decldict.items(): if d.has_key(o): has_it = True del d[o] if has_it: decldict[v][o] = 1 |
override(depdecls, options['override']) | def override(decldict, overides): for o, v in overides.items(): v = v.upper() has_it = False for k, d in decldict.items(): if d.has_key(o): has_it = True del d[o] if has_it: decldict[v][o] = 1 |
|
print_decls(decls) if [x for x in depdecls.values() if x]: | print_decls(decls, None) if deprecated_found: | def override(decldict, overides): for o, v in overides.items(): v = v.upper() has_it = False for k, d in decldict.items(): if d.has_key(o): has_it = True del d[o] if has_it: decldict[v][o] = 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.