rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
'<IMG SRC="%s/p_/mi" BORDER=0></A>' % | '<IMG SRC="%s/p_/mi" BORDER=0></A></A>' % | def tpRenderTABLE(self, id, root_url, url, state, substate, diff, data, colspan, section, md, treeData, level=0, args=None, simple_type={type(''):0, type(1):0, type(1.0):0}.has_key, ): "Render a tree as a table" have_arg=args.has_key exp=0 if level >= 0: urlattr=args['url'] if urlattr and hasattr(self, urlattr): tpUrl=getattr(self, urlattr) if not simple_type(type(tpUrl)): tpUrl=tpUrl() url = (url and ('%s/%s' % (url, tpUrl))) or tpUrl root_url = root_url or tpUrl treeData['tree-item-url']=url treeData['tree-level']=level treeData['tree-item-expanded']=0 idattr=args['id'] output=data.append items=None if (have_arg('assume_children') and args['assume_children'] and substate is not state): # We should not compute children unless we have to. # See if we've been asked to expand our children. for i in range(len(substate)): sub=substate[i] if sub[0]==id: exp=i+1 break if not exp: items=1 if items is None: validate=md.validate if have_arg('branches') and hasattr(self, args['branches']): if validate is None or not hasattr(self, 'aq_acquire'): items=getattr(self, args['branches']) else: items=self.aq_acquire(args['branches'],validate,md) items=items() elif have_arg('branches_expr'): items=args['branches_expr'](md) if not items and have_arg('leaves'): items=1 if items and items != 1: if validate is not None: unauth=[] index=0 for i in items: try: v=validate(items,items,index,i,md) except: v=0 if not v: unauth.append(index) index=index+1 if unauth: if have_arg('skip_unauthorized') and args['skip_unauthorized']: items=list(items) unauth.reverse() for i in unauth: del items[i] else: raise ValidationError, unauth if have_arg('sort'): # Faster/less mem in-place sort if type(items)==type(()): items=list(items) sort=args['sort'] size=range(len(items)) for i in size: v=items[i] k=getattr(v,sort) try: k=k() except: pass items[i]=(k,v) items.sort() for i in size: items[i]=items[i][1] diff.append(id) sub=None if substate is state: output('<TABLE CELLSPACING="0">\n') sub=substate[0] exp=items else: # Add prefix output('<TR>\n') # Add +/- icon if items: if level: if level > 3: output( '<TD COLSPAN="%s"></TD>' % (level-1)) elif level > 1: output('<TD></TD>' * (level-1)) output('<TD WIDTH="16"></TD>\n') output('<TD WIDTH="16" VALIGN="TOP">') for i in range(len(substate)): sub=substate[i] if sub[0]==id: exp=i+1 break #################################### # Mostly inline encode_seq for speed s=compress(str(diff)) if len(s) > 57: s=encode_str(s) else: s=b2a_base64(s)[:-1] l=find(s,'=') if l >= 0: s=s[:l] s=translate(s, tplus) #################################### script=md['SCRIPT_NAME'] if exp: treeData['tree-item-expanded']=1 output('<A NAME="%s">' '<A HREF="%s?tree-c=%s#%s">' '<IMG SRC="%s/p_/mi" BORDER=0></A>' % (id, root_url, s, id, script)) else: output('<A NAME="%s">' '<A HREF="%s?tree-e=%s#%s">' '<IMG SRC="%s/p_/pl" BORDER=0></A>' % (id, root_url, s, id, script)) output('</TD>\n') else: if level > 2: output('<TD COLSPAN="%s"></TD>' % level) elif level > 0: output('<TD></TD>' * level) output('<TD WIDTH="16"></TD>\n') # add item text dataspan=colspan-level output('<TD%s%s VALIGN="TOP" ALIGN="LEFT">' % ((dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''), (have_arg('nowrap') and args['nowrap'] and ' NOWRAP' or '')) ) output(render_blocks(section, md)) output('</TD>\n</TR>\n') if exp: level=level+1 dataspan=colspan-level if level > 3: h='<TD COLSPAN="%s"></TD>' % (level-1) elif level > 1: h='<TD></TD>' * (level-1) else: h='' if have_arg('header'): doc=args['header'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: output(doc( None, md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) if items==1: # leaves if have_arg('leaves'): doc=args['leaves'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: treeData['-tree-substate-']=sub treeData['tree-level']=level md._push(treeData) try: output(doc( None,md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) finally: md._pop(1) elif have_arg('expand'): doc=args['expand'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: treeData['-tree-substate-']=sub treeData['tree-level']=level md._push(treeData) try: output(doc( None,md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) finally: md._pop(1) else: __traceback_info__=sub, args, state, substate ids={} for item in items: if hasattr(item, idattr): id=getattr(item, idattr) if not simple_type(type(id)): id=id() elif hasattr(item, '_p_oid'): id=item._p_oid else: id=pyid(item) if len(sub)==1: sub.append([]) substate=sub[1] ids[id]=1 md._push(InstanceDict(item,md)) try: data=tpRenderTABLE( item,id,root_url,url,state,substate,diff,data, colspan, section, md, treeData, level, args) finally: md._pop() if not sub[1]: del sub[1] ids=ids.has_key for i in range(len(substate)-1,-1): if not ids(substate[i][0]): del substate[i] if have_arg('footer'): doc=args['footer'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: output(doc( None, md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) del diff[-1] if not diff: output('</TABLE>\n') return data |
'<IMG SRC="%s/p_/pl" BORDER=0></A>' % | '<IMG SRC="%s/p_/pl" BORDER=0></A></A>' % | def tpRenderTABLE(self, id, root_url, url, state, substate, diff, data, colspan, section, md, treeData, level=0, args=None, simple_type={type(''):0, type(1):0, type(1.0):0}.has_key, ): "Render a tree as a table" have_arg=args.has_key exp=0 if level >= 0: urlattr=args['url'] if urlattr and hasattr(self, urlattr): tpUrl=getattr(self, urlattr) if not simple_type(type(tpUrl)): tpUrl=tpUrl() url = (url and ('%s/%s' % (url, tpUrl))) or tpUrl root_url = root_url or tpUrl treeData['tree-item-url']=url treeData['tree-level']=level treeData['tree-item-expanded']=0 idattr=args['id'] output=data.append items=None if (have_arg('assume_children') and args['assume_children'] and substate is not state): # We should not compute children unless we have to. # See if we've been asked to expand our children. for i in range(len(substate)): sub=substate[i] if sub[0]==id: exp=i+1 break if not exp: items=1 if items is None: validate=md.validate if have_arg('branches') and hasattr(self, args['branches']): if validate is None or not hasattr(self, 'aq_acquire'): items=getattr(self, args['branches']) else: items=self.aq_acquire(args['branches'],validate,md) items=items() elif have_arg('branches_expr'): items=args['branches_expr'](md) if not items and have_arg('leaves'): items=1 if items and items != 1: if validate is not None: unauth=[] index=0 for i in items: try: v=validate(items,items,index,i,md) except: v=0 if not v: unauth.append(index) index=index+1 if unauth: if have_arg('skip_unauthorized') and args['skip_unauthorized']: items=list(items) unauth.reverse() for i in unauth: del items[i] else: raise ValidationError, unauth if have_arg('sort'): # Faster/less mem in-place sort if type(items)==type(()): items=list(items) sort=args['sort'] size=range(len(items)) for i in size: v=items[i] k=getattr(v,sort) try: k=k() except: pass items[i]=(k,v) items.sort() for i in size: items[i]=items[i][1] diff.append(id) sub=None if substate is state: output('<TABLE CELLSPACING="0">\n') sub=substate[0] exp=items else: # Add prefix output('<TR>\n') # Add +/- icon if items: if level: if level > 3: output( '<TD COLSPAN="%s"></TD>' % (level-1)) elif level > 1: output('<TD></TD>' * (level-1)) output('<TD WIDTH="16"></TD>\n') output('<TD WIDTH="16" VALIGN="TOP">') for i in range(len(substate)): sub=substate[i] if sub[0]==id: exp=i+1 break #################################### # Mostly inline encode_seq for speed s=compress(str(diff)) if len(s) > 57: s=encode_str(s) else: s=b2a_base64(s)[:-1] l=find(s,'=') if l >= 0: s=s[:l] s=translate(s, tplus) #################################### script=md['SCRIPT_NAME'] if exp: treeData['tree-item-expanded']=1 output('<A NAME="%s">' '<A HREF="%s?tree-c=%s#%s">' '<IMG SRC="%s/p_/mi" BORDER=0></A>' % (id, root_url, s, id, script)) else: output('<A NAME="%s">' '<A HREF="%s?tree-e=%s#%s">' '<IMG SRC="%s/p_/pl" BORDER=0></A>' % (id, root_url, s, id, script)) output('</TD>\n') else: if level > 2: output('<TD COLSPAN="%s"></TD>' % level) elif level > 0: output('<TD></TD>' * level) output('<TD WIDTH="16"></TD>\n') # add item text dataspan=colspan-level output('<TD%s%s VALIGN="TOP" ALIGN="LEFT">' % ((dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''), (have_arg('nowrap') and args['nowrap'] and ' NOWRAP' or '')) ) output(render_blocks(section, md)) output('</TD>\n</TR>\n') if exp: level=level+1 dataspan=colspan-level if level > 3: h='<TD COLSPAN="%s"></TD>' % (level-1) elif level > 1: h='<TD></TD>' * (level-1) else: h='' if have_arg('header'): doc=args['header'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: output(doc( None, md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) if items==1: # leaves if have_arg('leaves'): doc=args['leaves'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: treeData['-tree-substate-']=sub treeData['tree-level']=level md._push(treeData) try: output(doc( None,md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) finally: md._pop(1) elif have_arg('expand'): doc=args['expand'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: treeData['-tree-substate-']=sub treeData['tree-level']=level md._push(treeData) try: output(doc( None,md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) finally: md._pop(1) else: __traceback_info__=sub, args, state, substate ids={} for item in items: if hasattr(item, idattr): id=getattr(item, idattr) if not simple_type(type(id)): id=id() elif hasattr(item, '_p_oid'): id=item._p_oid else: id=pyid(item) if len(sub)==1: sub.append([]) substate=sub[1] ids[id]=1 md._push(InstanceDict(item,md)) try: data=tpRenderTABLE( item,id,root_url,url,state,substate,diff,data, colspan, section, md, treeData, level, args) finally: md._pop() if not sub[1]: del sub[1] ids=ids.has_key for i in range(len(substate)-1,-1): if not ids(substate[i][0]): del substate[i] if have_arg('footer'): doc=args['footer'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: output(doc( None, md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) del diff[-1] if not diff: output('</TABLE>\n') return data |
if indexes[index] is "TDdivider" or indexes[index] is THdivider: | if indexes[index] is "TDdivider" or indexes[index] is "THdivider": | def doc_table(self, paragraph, expr = re.compile(r'\s*\|[-]+\|').match): text = paragraph.getColorizableTexts()[0] m = expr(text) subs = paragraph.getSubparagraphs() if not (m): return None rows = [] spans = [] ROWS = [] COLS = [] indexes = [] ignore = [] TDdivider = re.compile("[\-]+").match THdivider = re.compile("[\=]+").match col = re.compile('\|').search innertable = re.compile('\|([-]+|[=]+)\|').search text = strip(text) rows = split(text,'\n') foo = "" for row in range(len(rows)): rows[row] = strip(rows[row]) # have indexes store if a row is a divider # or a cell part for index in range(len(rows)): tmpstr = rows[index][1:len(rows[index])-1] if TDdivider(tmpstr): indexes.append("TDdivider") elif THdivider(tmpstr): indexes.append("THdivider") else: indexes.append("cell") |
items.reverse() | items=self.reverse_items(items) | def tpRenderTABLE(self, id, root_url, url, state, substate, diff, data, colspan, section, md, treeData, level=0, args=None, simple_type={type(''):0, type(1):0, type(1.0):0}.has_key, ): "Render a tree as a table" have_arg=args.has_key exp=0 if level >= 0: urlattr=args['url'] if urlattr and hasattr(self, urlattr): tpUrl=getattr(self, urlattr) if not simple_type(type(tpUrl)): tpUrl=tpUrl() url = (url and ('%s/%s' % (url, tpUrl))) or tpUrl root_url = root_url or tpUrl treeData['tree-item-url']=url treeData['tree-level']=level treeData['tree-item-expanded']=0 idattr=args['id'] output=data.append items=None if (have_arg('assume_children') and args['assume_children'] and substate is not state): # We should not compute children unless we have to. # See if we've been asked to expand our children. for i in range(len(substate)): sub=substate[i] if sub[0]==id: exp=i+1 break if not exp: items=1 if items is None: validate=md.validate if have_arg('branches') and hasattr(self, args['branches']): if validate is None or not hasattr(self, 'aq_acquire'): items=getattr(self, args['branches']) else: items=self.aq_acquire(args['branches'],validate,md) items=items() elif have_arg('branches_expr'): items=args['branches_expr'](md) if not items and have_arg('leaves'): items=1 if items and items != 1: if validate is not None: unauth=[] index=0 for i in items: try: v=validate(items,items,index,i,md) except: v=0 if not v: unauth.append(index) index=index+1 if unauth: if have_arg('skip_unauthorized') and args['skip_unauthorized']: items=list(items) unauth.reverse() for i in unauth: del items[i] else: raise ValidationError, unauth if have_arg('sort'): # Faster/less mem in-place sort if type(items)==type(()): items=list(items) sort=args['sort'] size=range(len(items)) for i in size: v=items[i] k=getattr(v,sort) try: k=k() except: pass items[i]=(k,v) items.sort() for i in size: items[i]=items[i][1] if have_arg('reverse'): items.reverse() diff.append(id) sub=None if substate is state: output('<TABLE CELLSPACING="0">\n') sub=substate[0] exp=items else: # Add prefix output('<TR>\n') # Add +/- icon if items: if level: if level > 3: output( '<TD COLSPAN="%s"></TD>' % (level-1)) elif level > 1: output('<TD></TD>' * (level-1)) output('<TD WIDTH="16"></TD>\n') output('<TD WIDTH="16" VALIGN="TOP">') for i in range(len(substate)): sub=substate[i] if sub[0]==id: exp=i+1 break #################################### # Mostly inline encode_seq for speed s=compress(str(diff)) if len(s) > 57: s=encode_str(s) else: s=b2a_base64(s)[:-1] l=find(s,'=') if l >= 0: s=s[:l] s=translate(s, tplus) #################################### script=md['SCRIPT_NAME'] if exp: treeData['tree-item-expanded']=1 output('<A NAME="%s">' '<A HREF="%s?tree-c=%s#%s">' '<IMG SRC="%s/p_/mi" BORDER=0></A></A>' % (id, root_url, s, id, script)) else: output('<A NAME="%s">' '<A HREF="%s?tree-e=%s#%s">' '<IMG SRC="%s/p_/pl" BORDER=0></A></A>' % (id, root_url, s, id, script)) output('</TD>\n') else: if level > 2: output('<TD COLSPAN="%s"></TD>' % level) elif level > 0: output('<TD></TD>' * level) output('<TD WIDTH="16"></TD>\n') # add item text dataspan=colspan-level output('<TD%s%s VALIGN="TOP" ALIGN="LEFT">' % ((dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''), (have_arg('nowrap') and args['nowrap'] and ' NOWRAP' or '')) ) output(render_blocks(section, md)) output('</TD>\n</TR>\n') if exp: level=level+1 dataspan=colspan-level if level > 3: h='<TD COLSPAN="%s"></TD>' % (level-1) elif level > 1: h='<TD></TD>' * (level-1) else: h='' if have_arg('header'): doc=args['header'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: output(doc( None, md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) if items==1: # leaves if have_arg('leaves'): doc=args['leaves'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: treeData['-tree-substate-']=sub treeData['tree-level']=level md._push(treeData) try: output(doc( None,md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) finally: md._pop(1) elif have_arg('expand'): doc=args['expand'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: treeData['-tree-substate-']=sub treeData['tree-level']=level md._push(treeData) try: output(doc( None,md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) finally: md._pop(1) else: __traceback_info__=sub, args, state, substate ids={} for item in items: if hasattr(item, idattr): id=getattr(item, idattr) if not simple_type(type(id)): id=id() elif hasattr(item, '_p_oid'): id=oid(item) else: id=pyid(item) if len(sub)==1: sub.append([]) substate=sub[1] ids[id]=1 md._push(InstanceDict(item,md)) try: data=tpRenderTABLE( item,id,root_url,url,state,substate,diff,data, colspan, section, md, treeData, level, args) finally: md._pop() if not sub[1]: del sub[1] ids=ids.has_key for i in range(len(substate)-1,-1): if not ids(substate[i][0]): del substate[i] if have_arg('footer'): doc=args['footer'] if md.has_key(doc): doc=md.getitem(doc,0) else: doc=None if doc is not None: output(doc( None, md, standard_html_header=( '<TR>%s<TD WIDTH="16"></TD>' '<TD%s VALIGN="TOP">' % (h, (dataspan > 1 and (' COLSPAN="%s"' % dataspan) or ''))), standard_html_footer='</TD></TR>', )) del diff[-1] if not diff: output('</TABLE>\n') return data |
if type(HTTP_PORT) is type(''): HTTP_PORT=((IP_ADDRESS, HTTP_PORT),) | if type(HTTP_PORT) is type(0): HTTP_PORT=((IP_ADDRESS, HTTP_PORT),) | def set_locale(val): try: import locale except: raise SystemExit, ( 'The locale module could not be imported.\n' \ 'To use localization options, you must ensure\n' \ 'that the locale module is compiled into your\n' \ 'Python installation.' ) try: locale.setlocale(locale.LC_ALL, val) except: raise SystemExit, ( 'The specified locale is not supported by your system.\n' \ 'See your operating system documentation for more\n' \ 'information on locale support.' ) |
if type(FTP_PORT) is type(''): FTP_PORT=((IP_ADDRESS, FTP_PORT),) | if type(FTP_PORT) is type(0): FTP_PORT=((IP_ADDRESS, FTP_PORT),) | def set_locale(val): try: import locale except: raise SystemExit, ( 'The locale module could not be imported.\n' \ 'To use localization options, you must ensure\n' \ 'that the locale module is compiled into your\n' \ 'Python installation.' ) try: locale.setlocale(locale.LC_ALL, val) except: raise SystemExit, ( 'The specified locale is not supported by your system.\n' \ 'See your operating system documentation for more\n' \ 'information on locale support.' ) |
if type(MONITOR_PORT) is type(''): | if type(MONITOR_PORT) is type(0): | def set_locale(val): try: import locale except: raise SystemExit, ( 'The locale module could not be imported.\n' \ 'To use localization options, you must ensure\n' \ 'that the locale module is compiled into your\n' \ 'Python installation.' ) try: locale.setlocale(locale.LC_ALL, val) except: raise SystemExit, ( 'The specified locale is not supported by your system.\n' \ 'See your operating system documentation for more\n' \ 'information on locale support.' ) |
local_roles = dict.get(userid, []) | local_roles = list(dict.get(userid, [])) | def manage_addLocalRoles(self, userid, roles, REQUEST=None): """Set local roles for a user.""" if not roles: raise ValueError, 'One or more roles must be given!' dict=self.__ac_local_roles__ or {} local_roles = dict.get(userid, []) for r in roles: if r not in local_roles: local_roles.append(r) dict[userid] = local_roles self.__ac_local_roles__=dict if REQUEST is not None: stat='Your changes have been saved.' return self.manage_listLocalRoles(self, REQUEST, stat=stat) |
def StructuredText(paragraphs, paragraph_delimiter=re.compile('\n\s*\n')): | def StructuredText(paragraphs, delimiter=re.compile(para_delim)): | def StructuredText(paragraphs, paragraph_delimiter=re.compile('\n\s*\n')): """ StructuredText accepts paragraphs, which is a list of lines to be parsed. StructuredText creates a structure which mimics the structure of the paragraphs. Structure => [paragraph,[sub-paragraphs]] """ currentlevel = 0 currentindent = 0 levels = {0:0} level = 0 # which header are we under struct = [] # the structure to be returned run = struct paragraphs = filter( strip, paragraph_delimiter.split(expandtabs('\n\n'+paragraphs+'\n\n')) ) if not paragraphs: return StructuredTextDocument() ind = [] # structure based on indention levels for paragraph in paragraphs: ind.append([indention(paragraph), paragraph]) currentindent = indention(paragraphs[0]) levels[0] = currentindent ############################################################# # updated # ############################################################# for indent,paragraph in ind : if indent == 0: level = level + 1 currentlevel = 0 currentindent = 0 levels = {0:0} struct.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) elif indent > currentindent: currentlevel = currentlevel + 1 currentindent = indent levels[currentlevel] = indent run = insert(struct,level,currentlevel) run.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) elif indent < currentindent: result = findlevel(levels,indent) if result > 0: currentlevel = result currentindent = indent if not level: struct.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) else: run = insert(struct,level,currentlevel) run.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) else: if insert(struct,level,currentlevel): run = insert(struct,level,currentlevel) else: run = struct currentindet = indent run.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) return StructuredTextDocument(struct) |
paragraphs = filter( strip, paragraph_delimiter.split(expandtabs('\n\n'+paragraphs+'\n\n')) ) | paragraphs = expandtabs(paragraphs) paragraphs = '%s%s%s' % ('\n\n', paragraphs, '\n\n') paragraphs = delimiter.split(paragraphs) paragraphs = filter(strip, paragraphs) | def StructuredText(paragraphs, paragraph_delimiter=re.compile('\n\s*\n')): """ StructuredText accepts paragraphs, which is a list of lines to be parsed. StructuredText creates a structure which mimics the structure of the paragraphs. Structure => [paragraph,[sub-paragraphs]] """ currentlevel = 0 currentindent = 0 levels = {0:0} level = 0 # which header are we under struct = [] # the structure to be returned run = struct paragraphs = filter( strip, paragraph_delimiter.split(expandtabs('\n\n'+paragraphs+'\n\n')) ) if not paragraphs: return StructuredTextDocument() ind = [] # structure based on indention levels for paragraph in paragraphs: ind.append([indention(paragraph), paragraph]) currentindent = indention(paragraphs[0]) levels[0] = currentindent ############################################################# # updated # ############################################################# for indent,paragraph in ind : if indent == 0: level = level + 1 currentlevel = 0 currentindent = 0 levels = {0:0} struct.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) elif indent > currentindent: currentlevel = currentlevel + 1 currentindent = indent levels[currentlevel] = indent run = insert(struct,level,currentlevel) run.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) elif indent < currentindent: result = findlevel(levels,indent) if result > 0: currentlevel = result currentindent = indent if not level: struct.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) else: run = insert(struct,level,currentlevel) run.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) else: if insert(struct,level,currentlevel): run = insert(struct,level,currentlevel) else: run = struct currentindet = indent run.append(StructuredTextParagraph(paragraph, indent=indent, level=currentlevel)) return StructuredTextDocument(struct) |
s = regsub.gsub('[%s]+and[%s]*not[%s]+' % (ws * 3), ' andnot ', s) | s = ts_regex.gsub('[%s]+and[%s]*not[%s]+' % (ws * 3), ' andnot ', s) | def query(s, index, default_operator = Or, ws = (string.whitespace,)): # First replace any occurences of " and not " with " andnot " s = regsub.gsub('[%s]+and[%s]*not[%s]+' % (ws * 3), ' andnot ', s) q = parse(s) q = parse2(q, default_operator) return evaluate(q, index) |
def parens(s, parens_regex = regex.compile("(\|)")): '''Find the beginning and end of the first set of parentheses''' if (parens_regex.search(s) < 0): return None if (parens_regex.group(0) == ")"): raise QueryError, "Mismatched parentheses" open = parens_regex.regs[0][0] + 1 start = parens_regex.regs[0][1] p = 1 while (parens_regex.search(s, start) >= 0): if (parens_regex.group(0) == ")"): p = p - 1 else: p = p + 1 start = parens_regex.regs[0][1] if (p == 0): return (open, parens_regex.regs[0][0]) raise QueryError, "Mismatched parentheses" | def parens(s, parens_re = regex.compile('(\|)').search): index=open_index=paren_count = 0 while 1: index = parens_re(s, index) if index < 0 : break if s[index] == '(': paren_count = paren_count + 1 if open_index == 0 : open_index = index + 1 else: paren_count = paren_count - 1 if paren_count == 0: return open_index, index else: index = index + 1 if paren_count == 0: return None else: raise QueryError, "Mismatched parentheses" | def parens(s, parens_regex = regex.compile("(\|)")): '''Find the beginning and end of the first set of parentheses''' if (parens_regex.search(s) < 0): return None if (parens_regex.group(0) == ")"): raise QueryError, "Mismatched parentheses" open = parens_regex.regs[0][0] + 1 start = parens_regex.regs[0][1] p = 1 while (parens_regex.search(s, start) >= 0): if (parens_regex.group(0) == ")"): p = p - 1 else: p = p + 1 start = parens_regex.regs[0][1] if (p == 0): return (open, parens_regex.regs[0][0]) raise QueryError, "Mismatched parentheses" |
splitted = regsub.split(s, '[%s]*\"[%s]*' % (ws * 2)) | splitted = ts_regex.split(s, '[%s]*\"[%s]*' % (ws * 2)) | def quotes(s, ws = (string.whitespace,)): # split up quoted regions splitted = regsub.split(s, '[%s]*\"[%s]*' % (ws * 2)) split=string.split if (len(splitted) > 1): if ((len(splitted) % 2) == 0): raise QueryError, "Mismatched quotes" for i in range(1,len(splitted),2): # split the quoted region into words splitted[i] = filter(None, split(splitted[i])) # put the Proxmity operator in between quoted words for j in range(1, len(splitted[i])): splitted[i][j : j] = [ Near ] for i in range(len(splitted)-1,-1,-2): # split the non-quoted region into words splitted[i:i+1] = filter(None, split(splitted[i])) splitted = filter(None, splitted) else: # No quotes, so just split the string into words splitted = filter(None, split(s)) return splitted |
self.handleError() | self.handleError(record) | def emit(self, record): try: self.buffer.append(record) msg = self.format(record) self.stream.write("%s\n" % msg) self.flush() except: self.handleError() |
subobject=getattr(object.aq_base, entry_name) else: subobject=getattr(object,entry_name) | if hasattr(object.aq_base, entry_name): subobject=getattr(object, entry_name) else: raise AttributeError, entry_name else: subobject=getattr(object, entry_name) | def traverse(self, path, response=None): """Traverse the object space |
if debug_mode: | if entry_name=='.': subobject=object elif entry_name=='..' and parents: subobject=parents[-1] elif debug_mode: | def traverse(self, path, response=None): """Traverse the object space |
try: isheets_base_classes.append( z._zclass_.propertysheets.__class__) | try: psc=z._zclass_.propertysheets.__class__ if getattr(psc, '_implements_the_notional' '_subclassable_propertysheet' '_class_interface', 0): isheets_base_classes.append(psc) | def __init__(self, id, title, bases): """Build a Zope class |
def manage_workspace(self, URL2): "Emulate standard interface for use with navigation" raise 'Redirect', URL2+'/manage_workspace' | #def manage_workspace(self, URL1): |
|
def __init__(self,id,title,file, precondition='',content_type='application/octet-stream'): | def __init__(self,id,title,file,content_type='application/octet-stream', precondition=''): | def __init__(self,id,title,file, precondition='',content_type='application/octet-stream'): try: headers=file.headers except: headers=None if headers is None: if not content_type: raise 'BadValue', 'No content type specified' self.content_type=content_type self.data=Pdata(file) else: if headers.has_key('content-type'): self.content_type=headers['content-type'] else: if not content_type: raise 'BadValue', 'No content type specified' self.content_type=content_type self.data=Pdata(file.read()) self.__name__=id self.title=title if precondition: self.precondition=precondition self.size=len(self.data) |
If the 'start' argument is specified in the form 'DD/MM/YYYY HH:MM:SS' (UTC), | If the 'start' argument is specified in the form 'YYYY/MM/DD HH:MM:SS' (UTC), | def detailedusage(): details = usage(0) pname = sys.argv[0] details = details + """ |
If the 'end' argument is specified in the form 'DD/MM/YYYY HH:MM:SS' (UTC), | If the 'end' argument is specified in the form 'YYYY/MM/DD HH:MM:SS' (UTC), | def detailedusage(): details = usage(0) pname = sys.argv[0] details = details + """ |
try: return self._length() except AttributeError: l = len(self._unindex) self._length = Length(l) return l | self._migrate_length() return self._length def _migrate_length(self): """ migrate index to use new _length attribute """ if not hasattr(self, '_length'): self._length = Length(len(self._unindex)) | def numObjects(self): """ return the number of indexed objects""" try: return self._length() except AttributeError: # backward compatibility l = len(self._unindex) self._length = Length(l) return l |
class trigger (asyncore.dispatcher): address = ('127.9.9.9', 19999) | class trigger(asyncore.dispatcher): | def handle_read (self): self.recv (8192) try: self.lock.acquire() for thunk in self.thunks: try: thunk() except: (file, fun, line), t, v, tbinfo = asyncore.compact_traceback() print 'exception in trigger thunk: (%s:%s %s)' % (t, v, tbinfo) self.thunks = [] finally: self.lock.release() |
a = socket.socket (socket.AF_INET, socket.SOCK_STREAM) w = socket.socket (socket.AF_INET, socket.SOCK_STREAM) w.setsockopt(socket.IPPROTO_TCP, 1, 1) host='127.0.0.1' port=19999 | w = socket.socket() w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) count = 0 | def __init__ (self): a = socket.socket (socket.AF_INET, socket.SOCK_STREAM) w = socket.socket (socket.AF_INET, socket.SOCK_STREAM) # set TCP_NODELAY to true to avoid buffering w.setsockopt(socket.IPPROTO_TCP, 1, 1) # tricky: get a pair of connected sockets host='127.0.0.1' port=19999 while 1: try: self.address=(host, port) a.bind(self.address) break except: if port <= 19950: raise BindError, 'Cannot bind trigger!' port=port - 1 a.listen (1) w.setblocking (0) try: w.connect (self.address) except: pass r, addr = a.accept() a.close() w.setblocking (1) self.trigger = w asyncore.dispatcher.__init__ (self, r) self.lock = thread.allocate_lock() self.thunks = [] self._trigger_connected = 0 |
try: self.address=(host, port) a.bind(self.address) break except: if port <= 19950: raise BindError, 'Cannot bind trigger!' port=port - 1 a.listen (1) w.setblocking (0) try: w.connect (self.address) except: pass r, addr = a.accept() | count += 1 a = socket.socket() a.bind(("127.0.0.1", 0)) connect_address = a.getsockname() a.listen(1) try: w.connect(connect_address) break except socket.error, detail: if detail[0] != errno.WSAEADDRINUSE: raise if count >= 10: a.close() w.close() raise BindError("Cannot bind trigger!") a.close() r, addr = a.accept() | def __init__ (self): a = socket.socket (socket.AF_INET, socket.SOCK_STREAM) w = socket.socket (socket.AF_INET, socket.SOCK_STREAM) # set TCP_NODELAY to true to avoid buffering w.setsockopt(socket.IPPROTO_TCP, 1, 1) # tricky: get a pair of connected sockets host='127.0.0.1' port=19999 while 1: try: self.address=(host, port) a.bind(self.address) break except: if port <= 19950: raise BindError, 'Cannot bind trigger!' port=port - 1 a.listen (1) w.setblocking (0) try: w.connect (self.address) except: pass r, addr = a.accept() a.close() w.setblocking (1) self.trigger = w asyncore.dispatcher.__init__ (self, r) self.lock = thread.allocate_lock() self.thunks = [] self._trigger_connected = 0 |
w.setblocking (1) | def __init__ (self): a = socket.socket (socket.AF_INET, socket.SOCK_STREAM) w = socket.socket (socket.AF_INET, socket.SOCK_STREAM) # set TCP_NODELAY to true to avoid buffering w.setsockopt(socket.IPPROTO_TCP, 1, 1) # tricky: get a pair of connected sockets host='127.0.0.1' port=19999 while 1: try: self.address=(host, port) a.bind(self.address) break except: if port <= 19950: raise BindError, 'Cannot bind trigger!' port=port - 1 a.listen (1) w.setblocking (0) try: w.connect (self.address) except: pass r, addr = a.accept() a.close() w.setblocking (1) self.trigger = w asyncore.dispatcher.__init__ (self, r) self.lock = thread.allocate_lock() self.thunks = [] self._trigger_connected = 0 |
|
reg = re.compile("Status: ([0-9]{1,4}) (.*)",re.I)\ | def _request(self,*args,**kw): |
|
mo = reg.search(outp) | mo = self._reg.search(outp) | def _request(self,*args,**kw): |
return '0'*(11-len(i))+i+' ' | v = '0'*(11-len(i))+i+' ' if len(v) > 12: left = v[:-12] for c in left: if c != '0': raise ValueError, 'value too large for oct12' return v[-12:] return v | def oct12(i): i=oct(i) return '0'*(11-len(i))+i+' ' |
if REQUEST is not None: return self.manage_main(self,REQUEST) | if REQUEST is not None: REQUEST['RESPONSE'].redirect(self.absolute_url()+'/manage_main') | def manage_addFile(self,id,file,title='',precondition='', content_type='', REQUEST=None): """Add a new File object. Creates a new File object 'id' with the contents of 'file'""" id, title = cookId(id, title, file) self=self.this() # First, we create the image without data: self._setObject(id, File(id,title,'',content_type, precondition)) # And commit to a sub-transaction: if Globals.DatabaseVersion=='3': get_transaction().commit(1) # Now we "upload" the data. By commiting the add first, the # object can use a database trick to make the upload more efficient. self._getOb(id).manage_upload(file) if REQUEST is not None: return self.manage_main(self,REQUEST) |
meth=None | def __init__(self, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr, environ=os.environ): self.environ=environ fp=None try: if environ['REQUEST_METHOD'] != 'GET': fp=stdin except: pass |
|
def getEntryForObject(self, documentId, default=None): | def getEntryForObject(self, documentId, default=MV): | def getEntryForObject(self, documentId, default=None): """Takes a document ID and returns all the information we have on that specific object.""" if default is None: return self._unindex.get(documentId, default) else: return self._unindex.get(documentId) |
if default is None: | if default is not MV: | def getEntryForObject(self, documentId, default=None): """Takes a document ID and returns all the information we have on that specific object.""" if default is None: return self._unindex.get(documentId, default) else: return self._unindex.get(documentId) |
cache[query]= now, r | if self.cache_time_ > 0: cache[query]= now, r | def _cached_result(self, DB__, query, compressed=0): |
cb_isMovable=cb_isCopyable | cb_isMoveable=cb_isCopyable | def cb_isCopyable(self): pass # for now, we don't allow ZClasses to be copied. |
$Id: Publish.py,v 1.31 1997/01/30 00:50:18 jim Exp $""" | $Id: Publish.py,v 1.32 1997/02/07 14:41:32 jim Exp $""" | def taste(spam): "a favorable reviewer" return spam,'yum yum, I like ' + spam |
__version__='$Revision: 1.31 $'[11:-2] | __version__='$Revision: 1.32 $'[11:-2] | def taste(spam): "a favorable reviewer" return spam,'yum yum, I like ' + spam |
import sys, os, string, types, newcgi, regex | import sys, os, string, types, newcgi, regex, regsub | def main(): # The "main" program for this module pass |
def field2lines(v): | def field2lines(v, crlf=regex.compile('\r\n\|\n\r')): | def field2lines(v): try: v=v.read() except: v=str(v) return string.split(v,'\n') |
"The database connection, <em>%s</em>, cannot be found.") | "The database connection <em>%s</em> cannot be found." % ( self.connection_id)) | def __call__(self, REQUEST=None, __ick__=None, src__=0, test__=0, **kw): """Call the database method |
print datefmt | def __init__(self,*args, **kw): """Return a new date-time object |
|
print "Bad syntax in z:attributes:", `part` | print "Bad syntax in attributes:", `part` | def parseAttributeReplacements(arg): dict = {} for part in splitParts(arg): m = _attr_re.match(part) if not m: print "Bad syntax in z:attributes:", `part` continue name, expr = m.group(1, 2) if dict.has_key(name): print "Duplicate attribute name in z:attributes:", `part` continue dict[name] = expr return dict |
print "Duplicate attribute name in z:attributes:", `part` | print "Duplicate attribute name in attributes:", `part` | def parseAttributeReplacements(arg): dict = {} for part in splitParts(arg): m = _attr_re.match(part) if not m: print "Bad syntax in z:attributes:", `part` continue name, expr = m.group(1, 2) if dict.has_key(name): print "Duplicate attribute name in z:attributes:", `part` continue dict[name] = expr return dict |
print "Bad syntax in z:insert/replace:", `arg` | print "Bad syntax in insert/replace:", `arg` | def parseSubstitution(arg): m = _subst_re.match(arg) if not m: print "Bad syntax in z:insert/replace:", `arg` return None, None key, expr = m.group(1, 2) if not key: key = "text" return key, expr |
parts = map(lambda s, repl=string.replace: repl(s, "\0", ";;"), parts) | parts = map(lambda s, repl=string.replace: repl(s, "\0", ";"), parts) | def splitParts(arg): # Break in pieces at undoubled semicolons and # change double semicolons to singles: import string arg = string.replace(arg, ";;", "\0") parts = string.split(arg, ';') parts = map(lambda s, repl=string.replace: repl(s, "\0", ";;"), parts) if len(parts) > 1 and not string.strip(parts[-1]): del parts[-1] # It ended in a semicolon return parts |
self.definingMacro = None | def __init__(self, program, macros, engine, stream=None, debug=0, wrap=60, metal=1, tal=1, showtal=-1, strictinsert=1, stackLimit=100): self.program = program self.macros = macros self.engine = engine self.TALESError = engine.getTALESError() self.Default = engine.getDefault() self.stream = stream or sys.stdout self.debug = debug self.wrap = wrap self.metal = metal self.tal = tal assert showtal in (-1, 0, 1) if showtal == -1: showtal = (not tal) self.showtal = showtal self.strictinsert = strictinsert self.stackLimit = stackLimit self.html = 0 self.endsep = "/>" self.macroStack = [] self.definingMacro = None self.position = None, None # (lineno, offset) self.col = 0 self.level = 0 self.scopeLevel = 0 |
|
elif action == 2 and self.macroStack: | elif action == 2 and self.metal: | def attrAction(self, item): name, value = item[:2] try: action = self.actionIndex[item[2]] except KeyError: raise TALError, ('Error in TAL program', self.position) if not self.showtal and action > 1: return 0, name, value ok = 1 if action <= 1 and self.tal: if self.html and string.lower(name) in BOOLEAN_HTML_ATTRS: evalue = self.engine.evaluateBoolean(item[3]) if evalue is self.Default: if action == 1: # Cancelled insert ok = 0 elif not evalue: ok = 0 else: value = None else: evalue = self.engine.evaluateText(item[3]) if evalue is self.Default: if action == 1: # Cancelled insert ok = 0 else: value = evalue if value is None: ok = 0 elif action == 2 and self.macroStack: i = string.rfind(name, ":") + 1 prefix, suffix = name[:i], name[i:] if suffix == "define-macro": if len(self.macroStack) == 1: macroName, slots = self.macroStack[-1] name = prefix + "use-macro" value = macroName else: ok = 0 if suffix == "fill-slot": macroName, slots = self.macroStack[0] if not slots.has_key(value): ok = 0 if suffix == "define-slot" and not self.definingMacro: name = prefix + "fill-slot" elif action == 1: # Unexecuted insert ok = 0 return ok, name, value |
if len(self.macroStack) == 1: macroName, slots = self.macroStack[-1] | if what == "use-macro": | def attrAction(self, item): name, value = item[:2] try: action = self.actionIndex[item[2]] except KeyError: raise TALError, ('Error in TAL program', self.position) if not self.showtal and action > 1: return 0, name, value ok = 1 if action <= 1 and self.tal: if self.html and string.lower(name) in BOOLEAN_HTML_ATTRS: evalue = self.engine.evaluateBoolean(item[3]) if evalue is self.Default: if action == 1: # Cancelled insert ok = 0 elif not evalue: ok = 0 else: value = None else: evalue = self.engine.evaluateText(item[3]) if evalue is self.Default: if action == 1: # Cancelled insert ok = 0 else: value = evalue if value is None: ok = 0 elif action == 2 and self.macroStack: i = string.rfind(name, ":") + 1 prefix, suffix = name[:i], name[i:] if suffix == "define-macro": if len(self.macroStack) == 1: macroName, slots = self.macroStack[-1] name = prefix + "use-macro" value = macroName else: ok = 0 if suffix == "fill-slot": macroName, slots = self.macroStack[0] if not slots.has_key(value): ok = 0 if suffix == "define-slot" and not self.definingMacro: name = prefix + "fill-slot" elif action == 1: # Unexecuted insert ok = 0 return ok, name, value |
ok = 0 if suffix == "fill-slot": macroName, slots = self.macroStack[0] if not slots.has_key(value): ok = 0 if suffix == "define-slot" and not self.definingMacro: name = prefix + "fill-slot" | assert what == "define-macro" i = self.macroContext("use-macro") if i >= 0: j = self.macroContext("define-slot") if j > i: name = prefix + "use-macro" else: ok = 0 elif suffix == "define-slot": assert what == "define-slot" if self.macroContext("use-macro") >= 0: name = prefix + "fill-slot" | def attrAction(self, item): name, value = item[:2] try: action = self.actionIndex[item[2]] except KeyError: raise TALError, ('Error in TAL program', self.position) if not self.showtal and action > 1: return 0, name, value ok = 1 if action <= 1 and self.tal: if self.html and string.lower(name) in BOOLEAN_HTML_ATTRS: evalue = self.engine.evaluateBoolean(item[3]) if evalue is self.Default: if action == 1: # Cancelled insert ok = 0 elif not evalue: ok = 0 else: value = None else: evalue = self.engine.evaluateText(item[3]) if evalue is self.Default: if action == 1: # Cancelled insert ok = 0 else: value = evalue if value is None: ok = 0 elif action == 2 and self.macroStack: i = string.rfind(name, ":") + 1 prefix, suffix = name[:i], name[i:] if suffix == "define-macro": if len(self.macroStack) == 1: macroName, slots = self.macroStack[-1] name = prefix + "use-macro" value = macroName else: ok = 0 if suffix == "fill-slot": macroName, slots = self.macroStack[0] if not slots.has_key(value): ok = 0 if suffix == "define-slot" and not self.definingMacro: name = prefix + "fill-slot" elif action == 1: # Unexecuted insert ok = 0 return ok, name, value |
def dumpMacroStack(self, prefix, suffix, value): sys.stderr.write("+---- %s%s = %s\n" % (prefix, suffix, value)) for i in range(len(self.macroStack)): what, macroName, slots = self.macroStack[i] sys.stderr.write("| %2d. %-12s %-12s %s\n" % (i, what, macroName, slots and slots.keys())) sys.stderr.write("+--------------------------------------\n") | ## def dumpMacroStack(self, prefix, suffix, value): |
|
save = self.definingMacro self.definingMacro = macroName | if not self.metal: self.interpret(macro) return self.pushMacro("define-macro", macroName, None) | def do_defineMacro(self, macroName, macro): save = self.definingMacro self.definingMacro = macroName self.interpret(macro) self.definingMacro = save |
self.definingMacro = save | self.popMacro() | def do_defineMacro(self, macroName, macro): save = self.definingMacro self.definingMacro = macroName self.interpret(macro) self.definingMacro = save |
if len(self.macroStack) >= self.stackLimit: raise METALError("macro nesting limit (%d) exceeded " "by macro %s" % (self.stackLimit, `macroName`)) self.macroStack.append((macroName, compiledSlots)) | self.pushMacro("use-macro", macroName, compiledSlots) | def do_useMacro(self, macroName, macroExpr, compiledSlots, block): if not self.metal: self.interpret(block) return macro = self.engine.evaluateMacro(macroExpr) if macro is self.Default: self.interpret(block) return if not isCurrentVersion(macro): raise METALError("macro %s has incompatible version %s" % (`macroName`, `getProgramVersion(macro)`), self.position) mode = getProgramMode(macro) if mode != (self.html and "html" or "xml"): raise METALError("macro %s has incompatible mode %s" % (`macroName`, `mode`), self.position) if len(self.macroStack) >= self.stackLimit: raise METALError("macro nesting limit (%d) exceeded " "by macro %s" % (self.stackLimit, `macroName`)) self.macroStack.append((macroName, compiledSlots)) self.interpret(macro) self.macroStack.pop() |
self.macroStack.pop() | self.popMacro() | def do_useMacro(self, macroName, macroExpr, compiledSlots, block): if not self.metal: self.interpret(block) return macro = self.engine.evaluateMacro(macroExpr) if macro is self.Default: self.interpret(block) return if not isCurrentVersion(macro): raise METALError("macro %s has incompatible version %s" % (`macroName`, `getProgramVersion(macro)`), self.position) mode = getProgramMode(macro) if mode != (self.html and "html" or "xml"): raise METALError("macro %s has incompatible mode %s" % (`macroName`, `mode`), self.position) if len(self.macroStack) >= self.stackLimit: raise METALError("macro nesting limit (%d) exceeded " "by macro %s" % (self.stackLimit, `macroName`)) self.macroStack.append((macroName, compiledSlots)) self.interpret(macro) self.macroStack.pop() |
for macroName, slots in self.macroStack: slot = slots.get(slotName) or slot | for what, macroName, slots in self.macroStack: if what == "use-macro" and slots is not None: slot = slots.get(slotName, slot) self.pushMacro("define-slot", slotName, None) | def do_defineSlot(self, slotName, block): slot = None for macroName, slots in self.macroStack: slot = slots.get(slotName) or slot if slot: self.interpret(slot) else: self.interpret(block) |
s=str(error_value) if tagSearch(s) >= 0: error_message=error_value | try: s=str(error_value) except: pass else: if tagSearch(s) >= 0: error_message=error_value | def raise_standardErrorMessage( self, client=None, REQUEST={}, error_type=None, error_value=None, tb=None, error_tb=None, error_message='', tagSearch=ts_regex.compile('[a-zA-Z]>').search): |
assert ec.evaluate('x | python:int') == int | assert ec.evaluate('x | python:int') == 0 | def testHybrid(self): '''Test hybrid path expressions''' ec = self.ec assert ec.evaluate('x | python:1+1') == 2 |
my_t = self._t + _tzoffset(self._tz, self._t) ob_t = other._t + _tzoffset(other._tz, other._t) return (my_t - ob_t) / 86400.0 | if 0: my_t = self._t + _tzoffset(self._tz, self._t) ob_t = other._t + _tzoffset(other._tz, other._t) return (my_t - ob_t) / 86400.0 return self._d - other._d | def __sub__(self,other): """Either a DateTime or a number may be subtracted from a DateTime, however, a DateTime may not be subtracted from a number.""" if hasattr(other, '_d'): my_t = self._t + _tzoffset(self._tz, self._t) ob_t = other._t + _tzoffset(other._tz, other._t) return (my_t - ob_t) / 86400.0 else: return self.__add__(-(other)) |
provided by default, and 'alt' comes from the 'title_or_id' method. | provided by default, and 'alt' comes from the 'title' property. | def tag(height=None, width=None, alt=None, scale=0, xscale=0, yscale=0, **args): """ This method returns a string which contains an HTML IMG tag reference to the image. |
toc = TransientObjectContainer( 'session_data', 'Session Data Container', addNotification = addnotify, delNotification = delnotify, limit=limit, period_secs=period_spec) if timeout_spec is not None: | if 1: | def install_tempfolder_and_sdc(self): app = self.getApp() from Products.ZODBMountPoint.MountedObject import manage_addMounts,\ MountedObject from Products.ZODBMountPoint.MountedObject import getConfiguration as \ getDBTabConfiguration |
unindex[i] = () | unindex[i] = [] | def index_object(self, i, obj, tupleType=type(()), dictType=type({}), callable=callable): """Recompute index data for data with ids >= start. if 'obj' is passed in, it is indexed instead of _data[i]""" |
def _updateProperty(self, id, value): | def _updateProperty(self, id, value, meta=None): | def _updateProperty(self, id, value): # Update the value of an existing property. If value is a string, # an attempt will be made to convert the value to the type of the # existing property. if not self.hasProperty(id): raise 'Bad Request', 'The property %s does not exist.' % id if type(value)==type(''): proptype=self.propertyInfo(id).get('type', 'string') if type_converters.has_key(proptype): value=type_converters[proptype](value) setattr(self.v_self(), id, value) |
if parent: | if parent is not None: | def getRolesInContext(self, object): """Return the list of roles assigned to the user, including local roles assigned in context of the passed in object.""" name=self.getUserName() roles=self.getRoles() local={} object=getattr(object, 'aq_inner', object) while 1: local_roles = getattr(object, '__ac_local_roles__', None) if local_roles: if callable(local_roles): local_roles=local_roles() dict=local_roles or {} for r in dict.get(name, []): local[r]=1 inner = getattr(object, 'aq_inner', object) parent = getattr(inner, 'aq_parent', None) if parent: object = parent continue if hasattr(object, 'im_self'): object=object.im_self object=getattr(object, 'aq_inner', object) continue break roles=list(roles) + local.keys() return roles |
exc_info=exc_info()) | exc_info=True) | def __setstate__(self, state): Globals.Persistent.__setstate__(self, state) if self.connection_string: try: self.connect(self.connection_string) except: logger.error('Error connecting to relational database.', exc_info=exc_info()) |
error=exc_info()) | exc_info=True) | def manage_close_connection(self, REQUEST=None): " " try: if hasattr(self,'_v_database_connection'): self._v_database_connection.close() except: logger.error('Error closing relational database connection.', error=exc_info()) self._v_connected='' if REQUEST is not None: return self.manage_main(self, REQUEST) |
klass=self.aq_inner.aq_parent.aq_parent._zclass_ | def manage_edit(self, meta_type='', icon='', file='', REQUEST=None): """Set basic item properties. """ klass=self.aq_inner.aq_parent.aq_parent._zclass_ if meta_type: self.setClassAttr('meta_type', meta_type) if file: # and hasattr(file, 'content_type'): if hasattr(klass, 'ziconImage'): klass.ziconImage.manage_upload(file) else: self.setClassAttr('ziconImage', OFS.Image.Image('ziconImage','',file)) if not icon: self.setClassAttr('icon', REQUEST['URL1']+'/ziconImage') |
|
if hasattr(klass, 'ziconImage'): klass.ziconImage.manage_upload(file) else: | __traceback_info__=file image=self.getClassAttr('ziconImage', None) if image is None: | def manage_edit(self, meta_type='', icon='', file='', REQUEST=None): """Set basic item properties. """ klass=self.aq_inner.aq_parent.aq_parent._zclass_ if meta_type: self.setClassAttr('meta_type', meta_type) if file: # and hasattr(file, 'content_type'): if hasattr(klass, 'ziconImage'): klass.ziconImage.manage_upload(file) else: self.setClassAttr('ziconImage', OFS.Image.Image('ziconImage','',file)) if not icon: self.setClassAttr('icon', REQUEST['URL1']+'/ziconImage') |
if not icon: self.setClassAttr('icon', REQUEST['URL1']+'/ziconImage') | else: image.manage_upload(file) if (not icon) and REQUEST: icon=(REQUEST['URL3'][len(REQUEST['BASE1'])+1:] +'/ziconImage') | def manage_edit(self, meta_type='', icon='', file='', REQUEST=None): """Set basic item properties. """ klass=self.aq_inner.aq_parent.aq_parent._zclass_ if meta_type: self.setClassAttr('meta_type', meta_type) if file: # and hasattr(file, 'content_type'): if hasattr(klass, 'ziconImage'): klass.ziconImage.manage_upload(file) else: self.setClassAttr('ziconImage', OFS.Image.Image('ziconImage','',file)) if not icon: self.setClassAttr('icon', REQUEST['URL1']+'/ziconImage') |
class ziconImage(ExtensionClass.Base): "Computed icon attribute" def __of__(self, parent): return _ziconImage() | def manage_edit(self, meta_type='', icon='', file='', REQUEST=None): """Set basic item properties. """ klass=self.aq_inner.aq_parent.aq_parent._zclass_ if meta_type: self.setClassAttr('meta_type', meta_type) if file: # and hasattr(file, 'content_type'): if hasattr(klass, 'ziconImage'): klass.ziconImage.manage_upload(file) else: self.setClassAttr('ziconImage', OFS.Image.Image('ziconImage','',file)) if not icon: self.setClassAttr('icon', REQUEST['URL1']+'/ziconImage') |
|
ziconImage=ziconImage() | def icon(self): return self.getClassAttr('icon','') | def __of__(self, parent): return _ziconImage() |
def icon_(self): icon=self.aq_inner.aq_parent.aq_parent._zclass_.icon class _ziconImage(ExtensionClass.Base): "The real Computed icon attribute" def __of__(self, ps): klass=ps.aq_inner.aq_parent.aq_parent._zclass_ return klass.ziconImage | def meta_type(self): return self.getClassAttr('meta_type','') | def icon_(self): icon=self.aq_inner.aq_parent.aq_parent._zclass_.icon |
$Id: Publish.py,v 1.53 1997/09/23 10:32:57 jim Exp $""" | $Id: Publish.py,v 1.54 1997/09/24 18:47:18 jim Exp $""" | def taste(spam): "a favorable reviewer" return spam,'yum yum, I like ' + spam |
__version__='$Revision: 1.53 $'[11:-2] | __version__='$Revision: 1.54 $'[11:-2] | def taste(spam): "a favorable reviewer" return spam,'yum yum, I like ' + spam |
if fslist is None: form={'BODY':fs} | if fslist is None: form={'BODY':fs.value} | def __init__(self, |
def manage_profile_stats(self, sort='time', limit=200): | def manage_profile_stats(self, sort='time', limit=200, stripDirs=1, mode='stats'): | def manage_profile_stats(self, sort='time', limit=200): """Return profile data if available""" stats=getattr(sys, '_ps_', None) if stats is None: return None output=StringIO() stdout=sys.stdout sys.stdout=output stats.strip_dirs().sort_stats(sort).print_stats(limit) sys.stdout.flush() sys.stdout=stdout return output.getvalue() |
stats.strip_dirs().sort_stats(sort).print_stats(limit) | getattr(stats,'print_%s' % mode)(limit) | def manage_profile_stats(self, sort='time', limit=200): """Return profile data if available""" stats=getattr(sys, '_ps_', None) if stats is None: return None output=StringIO() stdout=sys.stdout sys.stdout=output stats.strip_dirs().sort_stats(sort).print_stats(limit) sys.stdout.flush() sys.stdout=stdout return output.getvalue() |
container_class = 'OFS.Folder.Folder' | def __init__(self, section): self.container_class = section.container_class or 'OFS.Folder.Folder' ZODBDatabase.__init__(self, section) | def root_config(section): from ZConfig import ConfigurationError here = os.path.dirname(os.path.abspath(__file__)) swhome = os.path.dirname(os.path.dirname(here)) section.softwarehome = swhome section.zopehome = os.path.dirname(os.path.dirname(swhome)) if section.cgi_environment is None: section.cgi_environment = {} if section.clienthome is None: section.clienthome = os.path.join(section.instancehome, "var") # set up defaults for pid_filename and lock_filename if they're # not in the config if section.pid_filename is None: section.pid_filename = os.path.join(section.clienthome, 'Z2.pid') if section.lock_filename is None: section.lock_filename = os.path.join(section.clienthome, 'Z2.lock') if not section.databases: section.databases = getDefaultDatabaseFactories(section) mount_factories = {} # { name -> factory} mount_points = {} # { virtual path -> name } dup_err = ('Invalid configuration: ZODB databases named "%s" and "%s" are ' 'both configured to use the same mount point, named "%s"') for database in section.databases: points = database.getVirtualMountPaths() name = database.config.getSectionName() mount_factories[name] = database for point in points: if mount_points.has_key(point): raise ConfigurationError(dup_err % (mount_points[point], name, point)) mount_points[point] = name from DBTab.DBTab import DBTab section.dbtab = DBTab(mount_factories, mount_points) return section |
print DB, DB.klass | def open(self): DB = self.createDB() if self.config.connection_class: # set the connection class DB.klass = self.config.connection_class print DB, DB.klass if self.config.class_factory is not None: DB.setClassFactory(self.config.class_factory) from ZODB.ActivityMonitor import ActivityMonitor DB.setActivityMonitor(ActivityMonitor()) return DB |
|
class_factory=None)) | class_factory=None, container_class=None)) | def getSectionName(self): return self.name |
class_factory=None)) temporary.container_class = ('Products.TemporaryFolder.TemporaryFolder.' 'SimpleTemporaryContainer') | class_factory=None, container_class=('Products.TemporaryFolder.' 'TemporaryFolder.' 'SimpleTemporaryContainer') )) | def getSectionName(self): return self.name |
{'id':'height', 'type':'int'}, {'id':'width', 'type':'int'}, | {'id':'height', 'type':'string'}, {'id':'width', 'type':'string'}, | def manage_addImage(self,id,file,title='',REQUEST=None): """ Add a new Image object. Creates a new Image object 'id' with the contents of 'file'. """ id, title = cookId(id, title, file) self._setObject(id, Image(id,title,file)) if REQUEST is not None: return self.manage_main(self,REQUEST) return id |
return '%s %s' % (url, join(out,' / ')) | return '%s%s' % (url, join(out,'/')) | def tabs_path_default(self, REQUEST, # Static var unquote=urllib.unquote, ): steps = REQUEST._steps[:-1] script = REQUEST['BASEPATH1'] linkpat = '<a href="%s/manage_workspace">%s</a>' out = [] url = linkpat % (script, ' /') if not steps: return url last = steps.pop() for step in steps: script = '%s/%s' % (script, step) out.append(linkpat % (script, unquote(step))) out.append(unquote(last)) return '%s %s' % (url, join(out,' / ')) |
del self._index[entry] | try: del self._index[entry] except KeyError: pass if isinstance(self.__len__, BTrees.Length.Length): self._length = self.__len__ del self.__len__ | def removeForwardIndexEntry(self, entry, documentId): """Take the entry provided and remove any reference to documentId in its entry in the index. """ indexRow = self._index.get(entry, _marker) if indexRow is not _marker: try: indexRow.remove(documentId) if not indexRow: del self._index[entry] self._length.change(-1) |
self._length.change(1) | try: self._length.change(1) except AttributeError: if isinstance(self.__len__, BTrees.Length.Length): self._length = self.__len__ del self.__len__ self._length.change(1) | def insertForwardIndexEntry(self, entry, documentId): """Take the entry provided and put it in the correct place in the forward index. |
return getattr(self.aq_parent, '%s__roles__' % self.__name__) | imp = getattr(self.aq_parent, '%s__roles__' % self.__name__) return imp.__of__(self) | def _get__roles__(self): return getattr(self.aq_parent, '%s__roles__' % self.__name__) |
from cStringIO import cStringIO self.ParseStream(cStringIO(s)) | from cStringIO import StringIO self.parseStream(StringIO(s)) | def parseString(self, s): from cStringIO import cStringIO self.ParseStream(cStringIO(s)) |
return pt_render(extra_context=bound_names) | return self.pt_render(extra_context=bound_names) | def _exec(self, bound_names, args, kw): """Call a Page Template""" bound_names['options'] = kw |
'<span tal:replace="structure foo" i18n:name="foo_name"/>' | '<span tal:replace="structure foo" i18n:name="foo_name"' ' i18n:translate=""/>' | def test_structure_replace_with_messageid_and_i18nname(self): program, macros = self._compile( '<div i18n:translate="" >' '<span tal:replace="structure foo" i18n:name="foo_name"/>' '</div>') self._check(program, '<div>FOOVALUE</div>\n') |
'<em i18n:name="foo_name">' | '<em i18n:name="foo_name" tal:omit-tag="">' | def test_complex_replace_with_messageid_and_i18nname(self): program, macros = self._compile( '<div i18n:translate="" >' '<em i18n:name="foo_name">' '<span tal:replace="foo"/>' '</em>' '</div>') self._check(program, '<div>FOOVALUE</div>\n') |
program = [('version', '1.5'), | program = [('version', '1.6'), | def test_translate_static_text_as_dynamic_from_bytecode(self): program = [('version', '1.5'), |
def _getCollectingTranslationDomain(self): class CollectingTranslationService(DummyTranslationService): data = [] def translate(self, domain, msgid, mapping=None, context=None, target_language=None, default=None): self.data.append((msgid, mapping)) return DummyTranslationService.translate( self, domain, msgid, mapping, context, target_language, default) xlatsvc = CollectingTranslationService() self.engine.translationService = xlatsvc return xlatsvc | def _getCollectingTranslationDomain(self): class CollectingTranslationService(DummyTranslationService): data = [] |
|
xlatdmn = self._getCollectingTranslationDomain() | self.engine.translationDomain.clearMsgids() | def test_for_correct_msgids(self): xlatdmn = self._getCollectingTranslationDomain() result = StringIO() program, macros = self._compile( '<div i18n:translate="">This is text for ' '<span i18n:translate="" tal:content="bar" ' 'i18n:name="bar_name"/>.</div>') self.interpreter = TALInterpreter(program, {}, self.engine, stream=result) self.interpreter() msgids = list(xlatdmn.data) msgids.sort() self.assertEqual(2, len(msgids)) self.assertEqual('BaRvAlUe', msgids[0][0]) self.assertEqual('This is text for ${bar_name}.', msgids[1][0]) self.assertEqual({'bar_name': '<span>BARVALUE</span>'}, msgids[1][1]) self.assertEqual( '<div>THIS IS TEXT FOR <span>BARVALUE</span>.</div>\n', result.getvalue()) |
msgids = list(xlatdmn.data) | msgids = self.engine.translationDomain.getMsgids('default') | def test_for_correct_msgids(self): xlatdmn = self._getCollectingTranslationDomain() result = StringIO() program, macros = self._compile( '<div i18n:translate="">This is text for ' '<span i18n:translate="" tal:content="bar" ' 'i18n:name="bar_name"/>.</div>') self.interpreter = TALInterpreter(program, {}, self.engine, stream=result) self.interpreter() msgids = list(xlatdmn.data) msgids.sort() self.assertEqual(2, len(msgids)) self.assertEqual('BaRvAlUe', msgids[0][0]) self.assertEqual('This is text for ${bar_name}.', msgids[1][0]) self.assertEqual({'bar_name': '<span>BARVALUE</span>'}, msgids[1][1]) self.assertEqual( '<div>THIS IS TEXT FOR <span>BARVALUE</span>.</div>\n', result.getvalue()) |
xlatdmn = self._getCollectingTranslationDomain() | self.engine.translationDomain.clearMsgids() | def test_for_raw_msgids(self): # Test for Issue 314: i18n:translate removes line breaks from # <pre>...</pre> contents # HTML mode xlatdmn = self._getCollectingTranslationDomain() result = StringIO() program, macros = self._compile( '<div i18n:translate=""> This is text\n' ' \tfor\n div. </div>' '<pre i18n:translate=""> This is text\n' ' <b>\tfor</b>\n pre. </pre>') self.interpreter = TALInterpreter(program, {}, self.engine, stream=result) self.interpreter() msgids = list(xlatdmn.data) msgids.sort() self.assertEqual(2, len(msgids)) self.assertEqual(' This is text\n <b>\tfor</b>\n pre. ', msgids[0][0]) self.assertEqual('This is text for div.', msgids[1][0]) self.assertEqual( '<div>THIS IS TEXT FOR DIV.</div>' '<pre> THIS IS TEXT\n <B>\tFOR</B>\n PRE. </pre>\n', result.getvalue()) |
msgids = list(xlatdmn.data) | msgids = self.engine.translationDomain.getMsgids('default') | def test_for_raw_msgids(self): # Test for Issue 314: i18n:translate removes line breaks from # <pre>...</pre> contents # HTML mode xlatdmn = self._getCollectingTranslationDomain() result = StringIO() program, macros = self._compile( '<div i18n:translate=""> This is text\n' ' \tfor\n div. </div>' '<pre i18n:translate=""> This is text\n' ' <b>\tfor</b>\n pre. </pre>') self.interpreter = TALInterpreter(program, {}, self.engine, stream=result) self.interpreter() msgids = list(xlatdmn.data) msgids.sort() self.assertEqual(2, len(msgids)) self.assertEqual(' This is text\n <b>\tfor</b>\n pre. ', msgids[0][0]) self.assertEqual('This is text for div.', msgids[1][0]) self.assertEqual( '<div>THIS IS TEXT FOR DIV.</div>' '<pre> THIS IS TEXT\n <B>\tFOR</B>\n PRE. </pre>\n', result.getvalue()) |
xlatdmn = self._getCollectingTranslationDomain() | self.engine.translationDomain.clearMsgids() | def test_raw_msgids_and_i18ntranslate_i18nname(self): xlatdmn = self._getCollectingTranslationDomain() result = StringIO() program, macros = self._compile( '<div i18n:translate=""> This is text\n \tfor\n' '<pre tal:content="raw" i18n:name="raw"' ' i18n:translate=""></pre>.</div>') self.interpreter = TALInterpreter(program, {}, self.engine, stream=result) self.interpreter() msgids = list(xlatdmn.data) msgids.sort() self.assertEqual(2, len(msgids)) self.assertEqual(' \tRaW\n ', msgids[0][0]) self.assertEqual('This is text for ${raw}.', msgids[1][0]) self.assertEqual({'raw': '<pre> \tRAW\n </pre>'}, msgids[1][1]) self.assertEqual( u'<div>THIS IS TEXT FOR <pre> \tRAW\n </pre>.</div>\n', result.getvalue()) |
msgids = list(xlatdmn.data) | msgids = self.engine.translationDomain.getMsgids('default') | def test_raw_msgids_and_i18ntranslate_i18nname(self): xlatdmn = self._getCollectingTranslationDomain() result = StringIO() program, macros = self._compile( '<div i18n:translate=""> This is text\n \tfor\n' '<pre tal:content="raw" i18n:name="raw"' ' i18n:translate=""></pre>.</div>') self.interpreter = TALInterpreter(program, {}, self.engine, stream=result) self.interpreter() msgids = list(xlatdmn.data) msgids.sort() self.assertEqual(2, len(msgids)) self.assertEqual(' \tRaW\n ', msgids[0][0]) self.assertEqual('This is text for ${raw}.', msgids[1][0]) self.assertEqual({'raw': '<pre> \tRAW\n </pre>'}, msgids[1][1]) self.assertEqual( u'<div>THIS IS TEXT FOR <pre> \tRAW\n </pre>.</div>\n', result.getvalue()) |
def test_unicode_mixed_unknown_encoding(self): text = u"foo ${bar}" mapping = {u'bar': 'd\xe9j\xe0'} expected = u"foo d\\xe9j\\xe0" self.assertEqual(interpolate(text, mapping), expected) | def test_unicode_mixed_unknown_encoding(self): # This test assumes that sys.getdefaultencoding is ascii... text = u"foo ${bar}" mapping = {u'bar': 'd\xe9j\xe0'} expected = u"foo d\\xe9j\\xe0" self.assertEqual(interpolate(text, mapping), expected) |
|
def __init__(self, basepath): | def __init__(self, basepath, verbosity=VERBOSE): | def __init__(self, basepath): # initialize python path self.basepath=path=basepath pjoin=os.path.join if sys.platform == 'win32': sys.path.insert(0, pjoin(path, 'lib/python')) sys.path.insert(1, pjoin(path, 'bin/lib')) sys.path.insert(2, pjoin(path, 'bin/lib/plat-win')) sys.path.insert(3, pjoin(path, 'bin/lib/win32')) sys.path.insert(4, pjoin(path, 'bin/lib/win32/lib')) sys.path.insert(5, path) else: sys.path.insert(0, pjoin(path, 'lib/python')) sys.path.insert(1, path) |
file = open(filepath, 'r') text = file.read() file.close() return ((find(text, 'unittest') > -1) or (find(text, 'framework.py') > -1)) | path, name = os.path.split(filepath) fname, ext = os.path.splitext(name) if name[:4]=='test' and name[-3:]=='.py' and \ name != 'testrunner.py': file=open(filepath, 'r') lines=file.readlines() file.close() for line in lines: if (find(line, 'def test_suite(') > -1) or \ (find(line, 'framework(') > -1): return 1 return 0 | def smellsLikeATest(self, filepath, find=string.find): file = open(filepath, 'r') text = file.read() file.close() return ((find(text, 'unittest') > -1) or (find(text, 'framework.py') > -1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.