rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
finished.append(base) | finished_dict[id(base)] = None | def fixupZClassDependencies(self, rebuild=0): # Note that callers should not catch exceptions from this method # to ensure that the transaction gets aborted if the registry # cannot be rebuilt for some reason. Returns true if any ZClasses # were registered as a result of the call or the registry was # rebuilt. jar=self._p_jar result=0 |
if hasattr(ob, '_register') and hasattr(ob, '_zclass_'): class_id=getattr(ob._zclass_, '__module__', None) | if hasattr(base,'_register') and hasattr(base,'_zclass_'): class_id=getattr(base._zclass_, '__module__', None) | def fixupZClassDependencies(self, rebuild=0): # Note that callers should not catch exceptions from this method # to ensure that the transaction gets aborted if the registry # cannot be rebuilt for some reason. Returns true if any ZClasses # were registered as a result of the call or the registry was # rebuilt. jar=self._p_jar result=0 |
if hasattr(ob, 'objectItems'): | if hasattr(base, 'objectItems'): | def fixupZClassDependencies(self, rebuild=0): # Note that callers should not catch exceptions from this method # to ensure that the transaction gets aborted if the registry # cannot be rebuilt for some reason. Returns true if any ZClasses # were registered as a result of the call or the registry was # rebuilt. jar=self._p_jar result=0 |
if hasattr(ob, 'propertysheets'): | if hasattr(base, 'propertysheets'): | def fixupZClassDependencies(self, rebuild=0): # Note that callers should not catch exceptions from this method # to ensure that the transaction gets aborted if the registry # cannot be rebuilt for some reason. Returns true if any ZClasses # were registered as a result of the call or the registry was # rebuilt. jar=self._p_jar result=0 |
'Broken objects exist in product %s.' % product.id) idx = idx + 1 | 'Broken objects exist in product %s.' % product.id, error=sys.exc_info()) | def fixupZClassDependencies(self, rebuild=0): # Note that callers should not catch exceptions from this method # to ensure that the transaction gets aborted if the registry # cannot be rebuilt for some reason. Returns true if any ZClasses # were registered as a result of the call or the registry was # rebuilt. jar=self._p_jar result=0 |
try: keys=list(self._p_jar.root()['ZGlobals'].keys()) except: return 1 | try: keys=list(self._p_jar.root()['ZGlobals'].keys()) except: LOG('Zope', ERROR, 'A problem was found when checking the global product '\ 'registry. This is probably due to a Product being '\ 'uninstalled or renamed. The traceback follows.', error=sys.exc_info()) return 1 | def checkGlobalRegistry(self): """Check the global (zclass) registry for problems, which can be caused by things like disk-based products being deleted. Return true if a problem is found""" try: keys=list(self._p_jar.root()['ZGlobals'].keys()) except: return 1 return 0 |
'A broken ZClass dependency was found in the global ' \ 'class registry. This is probably due to a product ' \ 'being uninstalled. The registry has successfully ' \ 'been rebuilt.') | 'The global ZClass registry has successfully been rebuilt.') | def initialize(app): # Initialize the application # Initialize the cache: app.Control_Panel.initialize_cache() # The following items marked b/c are backward compatibility hacks # which make sure that expected system objects are added to the # bobobase. This is required because the bobobase in use may pre- # date the introduction of certain system objects such as those # which provide Lever support. # b/c: Ensure that Control Panel exists. if not hasattr(app, 'Control_Panel'): cpl=ApplicationManager() cpl._init() app._setObject('Control_Panel', cpl) get_transaction().note('Added Control_Panel') get_transaction().commit() # b/c: Ensure that a ProductFolder exists. if not hasattr(app.Control_Panel.aq_base, 'Products'): app.Control_Panel.Products=App.Product.ProductFolder() get_transaction().note('Added Control_Panel.Products') get_transaction().commit() # b/c: Ensure that std err msg exists. if not hasattr(app, 'standard_error_message'): import Document Document.manage_addDocument( app, 'standard_error_message', 'Standard Error Message', _standard_error_msg) get_transaction().note('Added standard_error_message') get_transaction().commit() # b/c: Ensure that Owner role exists. if hasattr(app, '__ac_roles__') and not ('Owner' in app.__ac_roles__): app.__ac_roles__=app.__ac_roles__ + ('Owner',) get_transaction().note('Added Owner role') get_transaction().commit() # ensure the Authenticated role exists. if hasattr(app, '__ac_roles__'): if not 'Authenticated' in app.__ac_roles__: app.__ac_roles__=app.__ac_roles__ + ('Authenticated',) get_transaction().note('Added Authenticated role') get_transaction().commit() # Make sure we have Globals root=app._p_jar.root() if not root.has_key('ZGlobals'): import BTree app._p_jar.root()['ZGlobals']=BTree.BTree() get_transaction().note('Added Globals') get_transaction().commit() # Install the initial user. if hasattr(app, 'acl_users'): users = app.acl_users if hasattr(users, '_createInitialUser'): app.acl_users._createInitialUser() get_transaction().note('Created initial user') get_transaction().commit() install_products(app) # Note that the code from here on only runs if we are not a ZEO # client, or if we are a ZEO client and we've specified by way # of env variable that we want to force products to load. if (os.environ.get('ZEO_CLIENT') and not os.environ.get('FORCE_PRODUCT_LOAD')): return # Check for dangling pointers (broken zclass dependencies) in the # global class registry. If found, rebuild the registry. Note that # if the check finds problems but fails to successfully rebuild the # registry we abort the transaction so that we don't leave it in an # indeterminate state. did_fixups=0 bad_things=0 try: if app.checkGlobalRegistry(): app.fixupZClassDependencies(rebuild=1) did_fixups=1 LOG('Zope', INFO, 'A broken ZClass dependency was found in the global ' \ 'class registry. This is probably due to a product ' \ 'being uninstalled. The registry has successfully ' \ 'been rebuilt.') get_transaction().note('Rebuilt global product registry') get_transaction().commit() except: bad_things=1 LOG('Zope', ERROR, 'A problem was found in the global product registry but ' 'the attempt to rebuild the registry failed.', error=sys.exc_info()) get_transaction().abort() # Now we need to see if any (disk-based) products were installed # during intialization. If so (and the registry has no errors), # there may still be zclasses dependent on a base class in the # newly installed product that were previously broken and need to # be fixed up. If any really Bad Things happened (dangling pointers # were found in the registry but it couldn't be rebuilt), we don't # try to do anything to avoid making the problem worse. if (not did_fixups) and (not bad_things): # App.Product.initializeProduct will set this if a disk-based # product was added or updated and we are not a ZEO client. if getattr(Globals, '__disk_product_installed__', 0): try: if app.fixupZClassDependencies(): get_transaction().commit() except: LOG('Zope', ERROR, 'Attempt to fixup ZClass dependencies after detecting ' \ 'an updated disk-based product failed.', error=sys.exc_info()) get_transaction().abort() |
LOG('Zope', ERROR, 'A problem was found in the global product registry but ' 'the attempt to rebuild the registry failed.', | LOG('Zope', ERROR, 'The attempt to rebuild the registry failed.', | def initialize(app): # Initialize the application # Initialize the cache: app.Control_Panel.initialize_cache() # The following items marked b/c are backward compatibility hacks # which make sure that expected system objects are added to the # bobobase. This is required because the bobobase in use may pre- # date the introduction of certain system objects such as those # which provide Lever support. # b/c: Ensure that Control Panel exists. if not hasattr(app, 'Control_Panel'): cpl=ApplicationManager() cpl._init() app._setObject('Control_Panel', cpl) get_transaction().note('Added Control_Panel') get_transaction().commit() # b/c: Ensure that a ProductFolder exists. if not hasattr(app.Control_Panel.aq_base, 'Products'): app.Control_Panel.Products=App.Product.ProductFolder() get_transaction().note('Added Control_Panel.Products') get_transaction().commit() # b/c: Ensure that std err msg exists. if not hasattr(app, 'standard_error_message'): import Document Document.manage_addDocument( app, 'standard_error_message', 'Standard Error Message', _standard_error_msg) get_transaction().note('Added standard_error_message') get_transaction().commit() # b/c: Ensure that Owner role exists. if hasattr(app, '__ac_roles__') and not ('Owner' in app.__ac_roles__): app.__ac_roles__=app.__ac_roles__ + ('Owner',) get_transaction().note('Added Owner role') get_transaction().commit() # ensure the Authenticated role exists. if hasattr(app, '__ac_roles__'): if not 'Authenticated' in app.__ac_roles__: app.__ac_roles__=app.__ac_roles__ + ('Authenticated',) get_transaction().note('Added Authenticated role') get_transaction().commit() # Make sure we have Globals root=app._p_jar.root() if not root.has_key('ZGlobals'): import BTree app._p_jar.root()['ZGlobals']=BTree.BTree() get_transaction().note('Added Globals') get_transaction().commit() # Install the initial user. if hasattr(app, 'acl_users'): users = app.acl_users if hasattr(users, '_createInitialUser'): app.acl_users._createInitialUser() get_transaction().note('Created initial user') get_transaction().commit() install_products(app) # Note that the code from here on only runs if we are not a ZEO # client, or if we are a ZEO client and we've specified by way # of env variable that we want to force products to load. if (os.environ.get('ZEO_CLIENT') and not os.environ.get('FORCE_PRODUCT_LOAD')): return # Check for dangling pointers (broken zclass dependencies) in the # global class registry. If found, rebuild the registry. Note that # if the check finds problems but fails to successfully rebuild the # registry we abort the transaction so that we don't leave it in an # indeterminate state. did_fixups=0 bad_things=0 try: if app.checkGlobalRegistry(): app.fixupZClassDependencies(rebuild=1) did_fixups=1 LOG('Zope', INFO, 'A broken ZClass dependency was found in the global ' \ 'class registry. This is probably due to a product ' \ 'being uninstalled. The registry has successfully ' \ 'been rebuilt.') get_transaction().note('Rebuilt global product registry') get_transaction().commit() except: bad_things=1 LOG('Zope', ERROR, 'A problem was found in the global product registry but ' 'the attempt to rebuild the registry failed.', error=sys.exc_info()) get_transaction().abort() # Now we need to see if any (disk-based) products were installed # during intialization. If so (and the registry has no errors), # there may still be zclasses dependent on a base class in the # newly installed product that were previously broken and need to # be fixed up. If any really Bad Things happened (dangling pointers # were found in the registry but it couldn't be rebuilt), we don't # try to do anything to avoid making the problem worse. if (not did_fixups) and (not bad_things): # App.Product.initializeProduct will set this if a disk-based # product was added or updated and we are not a ZEO client. if getattr(Globals, '__disk_product_installed__', 0): try: if app.fixupZClassDependencies(): get_transaction().commit() except: LOG('Zope', ERROR, 'Attempt to fixup ZClass dependencies after detecting ' \ 'an updated disk-based product failed.', error=sys.exc_info()) get_transaction().abort() |
def __init__(self, args, fmt=''): | def __init__(self, args, fmt='s'): | def __init__(self, args, fmt=''): |
if len(args)==1: | if len(args)==1 and fmt=='s': | def __init__(self, args, fmt=''): |
val = ('%'+self.fmt) % val | fmt=self.fmt if fmt=='s': val=str(val) else: val = ('%'+self.fmt) % (val,) | def render(self, md): |
def dav__propstat(self, allprop, vals, join=string.join): | def dav__propstat(self, allprop, names, join=string.join): | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
propstat='<d:propstat%s>\n' \ | propstat='<d:propstat xmlns:ps="%s">\n' \ | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
'%s\n' \ | '%%s\n' \ | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' | ' <d:status>HTTP/1.1 %%s</d:status>\n%%s' \ '</d:propstat>\n' % self.xml_namespace() errormsg=' <d:responsedescription>%s</d:responsedescription>\n' | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
if not self.propertyMap(): return '' if not allprop and not vals: | if not allprop and not names: | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
result.append(' <ns0:%s/>' % name) | result.append(' <ps:%s/>' % name) if not result: return '' | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') | return propstat % (result, '200 OK', '') | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) | for item in self.propertyMap(): name, type=item['id'], item.get('type','string') meta=item.get('meta', {}) value=self.getProperty(name) if type=='tokens': value=join(value, ' ') elif type=='lines': value=join(value, '\n') if meta.get('dav_xml', 0): prop=value else: prop=' <ps:%s>%s</ps:%s>' % (name, value, name) | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
xml_ns=self.xml_namespace() | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
|
nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): | xml_id=self.xml_namespace() for name, ns in names: if ns==xml_id: if not propdict.has_key(name): prop=' <ps:%s/>' % name emsg=errormsg % 'No such property: %s' % name result.append(propstat % (prop, '404 Not Found', emsg)) else: item=propdict[name] name, type=item['id'], item.get('type','string') meta=item.get('meta', {}) | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') def odav__propstat(self, url, allprop, vals, iscol, join=string.join): result=[] propstat='<d:propstat>\n' \ '<d:prop%s>\n' \ '%s\n' \ '</d:prop>\n' \ '<d:status>HTTP/1.1 %s</d:status>\n' \ '</d:/propstat>' if not allprop and not vals: if hasattr(aq_base(self), 'propertyMap'): for md in self.propertyMap(): prop='<z:%s/>' % md['id'] result.append(propstat % ('', prop, '200 OK')) elif allprop: if hasattr(aq_base(self), 'propertyMap'): for md in self.propertyMap(): name, type=md['id'], md.get('type', 'string') value=getattr(self, name) if type=='tokens': value=join(value, ' ') elif type=='lines': value=join(value, '\n') else: value=str(value) prop='<z:%s>%s</z:%s>' % (name, value, name) result.append(propstat % ('', prop, '200 OK')) else: prop_mgr=hasattr(aq_base(self), 'propertyMap') for name, ns in vals: if ns==zpns: if not prop_mgr or not self.hasProperty(name): prop='<z:%s/>' % name result.append(propstat % ('',prop,'404 Not Found')) else: value=getattr(self, name) type=self.getPropertyType(name) | def dav__propstat(self, allprop, vals, join=string.join): # The dav__propstat method returns a chunk of xml containing # one or more propstat elements indicating property names, # values, errors and status codes. This is called by some # of the WebDAV support machinery. If a property set does # not support WebDAV, just override this method to return # an empty string. propstat='<d:propstat%s>\n' \ ' <d:prop>\n' \ '%s\n' \ ' </d:prop>\n' \ ' <d:status>HTTP/1.1 %s</d:status>\n' \ '</d:propstat>\n' result=[] if not self.propertyMap(): return '' if not allprop and not vals: # propname request for name in self.propertyIds(): result.append(' <ns0:%s/>' % name) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') elif allprop: for name, value in self.propertyItems(): prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(prop) result=join(result, '\n') nsdef=' xmlns:ns0="%s"' % self.xml_namespace() return propstat % (nsdef, result, '200 OK') else: xml_ns=self.xml_namespace() propdict=self.propdict() nsdef=' xmlns:ns0="%s"' % self.xml_namespace() for name, ns in vals: if ns==xml_ns: if propdict.has_key(name): value=self.getProperty(name) prop=' <ns0:%s>%s</ns0:%s>' % (name, value, name) result.append(propstat % (nsdef, prop, '200 OK')) else: prop=' <ns0:%s/>' % name result.append(propstat % (nsdef, prop,'404 Not Found')) return join(result, '\n') |
|
else: value=str(value) prop='<z:%s>%s</z:%s>' % (name, value, name) result.append(propstat % ('', prop, '200 OK')) else: prop='<n:%s/>' % name ns=' xmlns:n="%s"' % ns result.append(propstat % (ns, prop, '404 Not Found')) result='<d:response>\n' \ '<d:href>%s</d:href>\n' \ '%s\n' \ '</d:response>' % (url, join(result, '\n')) return result | if meta.get('dav_xml', 0): prop=value else: prop=' <ps:%s>%s</ps:%s>' % (name, value, name) result.append(propstat % (prop, '200 OK', '')) if not result: return '' return join(result, '') | def odav__propstat(self, url, allprop, vals, iscol, join=string.join): # The dav__propstat method returns an xml response element # containing one or more propstats indicating property names, # values, errors and status codes. result=[] propstat='<d:propstat>\n' \ '<d:prop%s>\n' \ '%s\n' \ '</d:prop>\n' \ '<d:status>HTTP/1.1 %s</d:status>\n' \ '</d:/propstat>' if not allprop and not vals: if hasattr(aq_base(self), 'propertyMap'): for md in self.propertyMap(): prop='<z:%s/>' % md['id'] result.append(propstat % ('', prop, '200 OK')) |
Apply the test parameters. provided by the dictionary 'argvars'. | Apply the test parameters provided by the dictionary 'argvars'. | def ZScriptHTML_tryAction(REQUEST, argvars): """ |
self.size = len(dumps(index)) + len(dumps(data)) | sizer = _ByteCounter() pickler = Pickler(sizer, HIGHEST_PROTOCOL) pickler.dump(index) pickler.dump(data) self.size = sizer.getCount() | def __init__(self, index, data, view_name): try: # This is a protective barrier that hopefully prevents # us from caching something that might result in memory # leaks. It's also convenient for determining the # approximate memory usage of the cache entry. self.size = len(dumps(index)) + len(dumps(data)) except: raise CacheException('The data for the cache is not pickleable.') self.created = time.time() self.data = data self.view_name = view_name self.access_count = 0 |
start, end = ws.pos(position) text = text[:start] + before + text[start:end] + after + text[end:] | if lpos != position: lpos=position start, end = ws.pos(position) text = (text[:start] + before + text[start:end] + after + text[end:]) | def highlight(self, text, positions, before, after): ws = WordSequence(text, self.synstop) positions = map(None, positions) |
product=getattr(__import__("Products.%s" % product_name), product_name) | product=__import__("Products.%s" % product_name, global_dict, global_dict, silly) | def install_products(app): # Install a list of products into the basic folder class, so # that all folders know about top-level objects, aka products path_join=os.path.join product_dir=path_join(SOFTWARE_HOME,'Products') isdir=os.path.isdir exists=os.path.exists DictType=type({}) from Folder import Folder folder_permissions={} for p in Folder.__ac_permissions__: permission, names = p[:2] folder_permissions[permission]=names meta_types=list(Folder.dynamic_meta_types) product_names=os.listdir(product_dir) product_names.sort() for product_name in product_names: package_dir=path_join(product_dir, product_name) if not isdir(package_dir): continue if not exists(path_join(package_dir, '__init__.py')): if not exists(path_join(package_dir, '__init__.pyc')): continue product=getattr(__import__("Products.%s" % product_name), product_name) permissions={} new_permissions={} for permission, names in pgetattr(product, '__ac_permissions__', ()): if names: for name in names: permissions[name]=permission elif not folder_permissions.has_key(permission): new_permissions[permission]=() for meta_type in pgetattr(product, 'meta_types', ()): if product_name=='OFSP': meta_types.insert(0,meta_type) else: meta_types.append(meta_type) name=meta_type['name'] for name,method in pgetattr(product, 'methods', {}).items(): if not hasattr(Folder, name): setattr(Folder, name, method) if name[-9:]=='__roles__': continue # Just setting roles if (permissions.has_key(name) and not folder_permissions.has_key(permissions[name])): permission=permissions[name] if new_permissions.has_key(permission): new_permissions[permission].append(name) else: new_permissions[permission]=[name] if new_permissions: new_permissions=new_permissions.items() for permission, names in new_permissions: folder_permissions[permission]=names new_permissions.sort() Folder.__dict__['__ac_permissions__']=tuple( list(Folder.__ac_permissions__)+new_permissions) misc_=pgetattr(product, 'misc_', {}) if type(misc_) is DictType: misc_=Misc_(product_name, misc_) Application.misc_.__dict__[product_name]=misc_ # Set up dynamic project information. App.Product.initializeProduct(product_name, package_dir, app) Folder.dynamic_meta_types=tuple(meta_types) Globals.default__class_init__(Folder) |
self.emit("insertStructure", cexpr, attrDict, []) | self.emit("insertStructure", cexpr, {}, []) | def emitOnError(self, name, onError, position): block = self.popProgram() key, expr = parseSubstitution(onError, position) cexpr = self.compileExpression(expr) if key == "text": self.emit("insertText", cexpr, []) else: assert key == "structure" self.emit("insertStructure", cexpr, attrDict, []) self.emitEndTag(name) handler = self.popProgram() self.emit("onError", block, handler) |
mod_since=DateTime(header).timeTime() last_mod =self._p_mtime | mod_since=int(DateTime(header).timeTime()) last_mod =int(self._p_mtime) | def index_html(self, REQUEST, RESPONSE): """ The default view of the contents of a File or Image. |
if m.find('/'): raise 'Redirect', ( "%s/%s" % (REQUEST['URL1'], m)) | if m.find('/') >= 0: prefix= m.startswith('/') and REQUEST['BASE0'] or REQUEST['URL1'] raise 'Redirect', ( "%s/%s" % (prefix, m)) | def manage_workspace(self, REQUEST): """Dispatch to first interface in manage_options """ options=self.filtered_manage_options(REQUEST) try: m=options[0]['action'] if m=='manage_workspace': raise TypeError except: raise Unauthorized, ( 'You are not authorized to view this object.') |
def declareProtected(self, permission_name, *names): | def declareProtected(self, permission_name, name, *names): | def declareProtected(self, permission_name, *names): """Declare names to be associated with a permission.""" self._setaccess(names, permission_name) |
self._setaccess(names, permission_name) | self._setaccess((name,) + names, permission_name) | def declareProtected(self, permission_name, *names): """Declare names to be associated with a permission.""" self._setaccess(names, permission_name) |
if not force: self.appendHeader('Vary', 'Accept-Encoding') | def enableHTTPCompression(self,REQUEST={},force=0,disable=0,query=0): """Enable HTTP Content Encoding with gzip compression if possible |
|
Append a value to a cookie | Append a value to a header. | def appendHeader(self, name, value, delimiter=","): '''\ Append a value to a cookie Sets an HTTP return header "name" with value "value", appending it following a comma if there was a previous value set for the header. ''' headers=self.headers if headers.has_key(name): h=self.header[name] h="%s%s\n\t%s" % (h,delimiter,value) else: h=value self.setHeader(name,h) |
h=self.header[name] | h=headers[name] | def appendHeader(self, name, value, delimiter=","): '''\ Append a value to a cookie Sets an HTTP return header "name" with value "value", appending it following a comma if there was a previous value set for the header. ''' headers=self.headers if headers.has_key(name): h=self.header[name] h="%s%s\n\t%s" % (h,delimiter,value) else: h=value self.setHeader(name,h) |
main() | unittest.main(defaultTest='test_suite') | def test_suite(): return unittest.makeSuite(TALESTests) |
self.setHeader('content-length', len(self.body)) | def insertBase(self, base_re_search=regex.compile('\(<base[\0- ]+[^>]+>\)', regex.casefold).search ): if (self.headers.has_key('content-type') and self.headers['content-type'] != 'text/html'): return |
|
raise exepctions.RuntimeError,"operator not valid: %s" % operator | raise RuntimeError,"operator not valid: %s" % operator | def _apply_index(self, request, cid='', type=type, None=None): """Apply the index to query parameters given in the request arg. |
w, h = struct.unpack(">LL", data[16:24]) | w, h = struct.unpack(">LL", data[16:24]) self.width=str(int(w)) self.height=str(int(h)) | def update_data(self, data, content_type=None, size=None): if content_type is not None: self.content_type=content_type if size is None: size=len(data) |
ts_results = indent_tab.search_group(rest, (1,2)) | ts_results = indent_tab(rest, (1,2)) | def untabify(aString): '''\ Convert indentation tabs to spaces. ''' result='' rest=aString while 1: ts_results = indent_tab.search_group(rest, (1,2)) if ts_results: start, grps = ts_results lnl=len(grps[0]) indent=len(grps[1]) result=result+rest[:start] rest="\n%s%s" % (' ' * ((indent/8+1)*8), rest[start+indent+1+lnl:]) else: return result+rest |
def indent_level(aString): | def indent_level(aString, indent_space=ts_regex.compile('\n\( *\)').search_group, ): | def indent_level(aString): '''\ Find the minimum indentation for a string, not counting blank lines. ''' start=0 text='\n'+aString indent=l=len(text) while 1: ts_results = indent_space.search_group(text, (1,2), start) if ts_results: start, grps = ts_results i=len(grps[0]) start=start+i+1 if start < l and text[start] != '\n': # Skip blank lines if not i: return (0,aString) if i < indent: indent = i else: return (indent,aString) |
ts_results = indent_space.search_group(text, (1,2), start) | ts_results = indent_space(text, (1,2), start) | def indent_level(aString): '''\ Find the minimum indentation for a string, not counting blank lines. ''' start=0 text='\n'+aString indent=l=len(text) while 1: ts_results = indent_space.search_group(text, (1,2), start) if ts_results: start, grps = ts_results i=len(grps[0]) start=start+i+1 if start < l and text[start] != '\n': # Skip blank lines if not i: return (0,aString) if i < indent: indent = i else: return (indent,aString) |
bullet=ts_regex.compile('[ \t\n]*[o*-][ \t\n]+\([^\0]*\)') example=ts_regex.compile('[\0- ]examples?:[\0- ]*$').search dl=ts_regex.compile('\([^\n]+\)[ \t]+--[ \t\n]+\([^\0]*\)') nl=ts_regex.compile('\n').search ol=ts_regex.compile('[ \t]*\(\([0-9]+\|[a-zA-Z]+\)[.)]\)+[ \t\n]+\([^\0]*\|$\)') olp=ts_regex.compile('[ \t]*([0-9]+)[ \t\n]+\([^\0]*\|$\)') | def structure(list): if not list: return [] i=0 l=len(list) r=[] while i < l: sublen=paragraphs(list,i) i=i+1 r.append((list[i-1][1],structure(list[i:i+sublen]))) i=i+sublen return r |
|
def __init__(self, aStructuredString, level=0): | def __init__(self, aStructuredString, level=0, paragraph_divider=regex.compile('\(\n *\)+\n'), ): | def __init__(self, aStructuredString, level=0): '''Convert a structured text string into a structured text object. |
em =ts_regex.compile(ctag_prefix+(ctag_middle % (("*",)*6) )+ctag_suffix) strong=ts_regex.compile(ctag_prefix+(ctag_middl2 % (("*",)*8))+ctag_suffix) under =ts_regex.compile(ctag_prefix+(ctag_middle % (("_",)*6) )+ctag_suffix) code =ts_regex.compile(ctag_prefix+(ctag_middle % (("\'",)*6))+ctag_suffix) | def __str__(self): return str(self.structure) |
|
def ctag(s): | def ctag(s, em=regex.compile( ctag_prefix+(ctag_middle % (("*",)*6) )+ctag_suffix), strong=regex.compile( ctag_prefix+(ctag_middl2 % (("*",)*8))+ctag_suffix), under=regex.compile( ctag_prefix+(ctag_middle % (("_",)*6) )+ctag_suffix), code=regex.compile( ctag_prefix+(ctag_middle % (("\'",)*6))+ctag_suffix), ): | def ctag(s): if s is None: s='' s=gsub(strong,'\\1<strong>\\2</strong>\\3',s) s=gsub(under, '\\1<u>\\2</u>\\3',s) s=gsub(code, '\\1<code>\\2</code>\\3',s) s=gsub(em, '\\1<em>\\2</em>\\3',s) return s |
extra_dl=ts_regex.compile("</dl>\n<dl>"), extra_ul=ts_regex.compile("</ul>\n<ul>"), extra_ol=ts_regex.compile("</ol>\n<ol>"), | extra_dl=regex.compile("</dl>\n<dl>"), extra_ul=regex.compile("</ul>\n<ul>"), extra_ol=regex.compile("</ol>\n<ol>"), | def __str__(self, extra_dl=ts_regex.compile("</dl>\n<dl>"), extra_ul=ts_regex.compile("</ul>\n<ul>"), extra_ol=ts_regex.compile("</ol>\n<ol>"), ): '''\ Return an HTML string representation of the structured text data. |
def _str(self,structure,level): | def _str(self,structure,level, bullet=ts_regex.compile('[ \t\n]*[o*-][ \t\n]+\([^\0]*\)' ).match_group, example=ts_regex.compile('[\0- ]examples?:[\0- ]*$' ).search, dl=ts_regex.compile('\([^\n]+\)[ \t]+--[ \t\n]+\([^\0]*\)' ).match_group, nl=ts_regex.compile('\n').search, ol=ts_regex.compile( '[ \t]*\(\([0-9]+\|[a-zA-Z]+\)[.)]\)+[ \t\n]+\([^\0]*\|$\)' ).match_group, olp=ts_regex.compile('[ \t]*([0-9]+)[ \t\n]+\([^\0]*\|$\)' ).match_group, ): | def _str(self,structure,level): r='' for s in structure: # print s[0],'\n', len(s[1]), '\n\n' ts_results = bullet.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ul(r,p,self._str(s[1],level)) else: ts_results = ol.match_group(s[0], (3,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = olp.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = dl.match_group(s[0], (1,2)) if ts_results: t,d = ts_results[1] r=self.dl(r,t,d,self._str(s[1],level)) else: if example(s[0]) >= 0 and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0],self.pre(s[1])) else: if s[0][-2:]=='::' and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0][:-1],self.pre(s[1])) else: |
ts_results = bullet.match_group(s[0], (1,)) | ts_results = bullet(s[0], (1,)) | def _str(self,structure,level): r='' for s in structure: # print s[0],'\n', len(s[1]), '\n\n' ts_results = bullet.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ul(r,p,self._str(s[1],level)) else: ts_results = ol.match_group(s[0], (3,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = olp.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = dl.match_group(s[0], (1,2)) if ts_results: t,d = ts_results[1] r=self.dl(r,t,d,self._str(s[1],level)) else: if example(s[0]) >= 0 and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0],self.pre(s[1])) else: if s[0][-2:]=='::' and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0][:-1],self.pre(s[1])) else: |
ts_results = ol.match_group(s[0], (3,)) | ts_results = ol(s[0], (3,)) | def _str(self,structure,level): r='' for s in structure: # print s[0],'\n', len(s[1]), '\n\n' ts_results = bullet.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ul(r,p,self._str(s[1],level)) else: ts_results = ol.match_group(s[0], (3,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = olp.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = dl.match_group(s[0], (1,2)) if ts_results: t,d = ts_results[1] r=self.dl(r,t,d,self._str(s[1],level)) else: if example(s[0]) >= 0 and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0],self.pre(s[1])) else: if s[0][-2:]=='::' and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0][:-1],self.pre(s[1])) else: |
ts_results = olp.match_group(s[0], (1,)) | ts_results = olp(s[0], (1,)) | def _str(self,structure,level): r='' for s in structure: # print s[0],'\n', len(s[1]), '\n\n' ts_results = bullet.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ul(r,p,self._str(s[1],level)) else: ts_results = ol.match_group(s[0], (3,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = olp.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = dl.match_group(s[0], (1,2)) if ts_results: t,d = ts_results[1] r=self.dl(r,t,d,self._str(s[1],level)) else: if example(s[0]) >= 0 and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0],self.pre(s[1])) else: if s[0][-2:]=='::' and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0][:-1],self.pre(s[1])) else: |
ts_results = dl.match_group(s[0], (1,2)) | ts_results = dl(s[0], (1,2)) | def _str(self,structure,level): r='' for s in structure: # print s[0],'\n', len(s[1]), '\n\n' ts_results = bullet.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ul(r,p,self._str(s[1],level)) else: ts_results = ol.match_group(s[0], (3,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = olp.match_group(s[0], (1,)) if ts_results: p = ts_results[1] r=self.ol(r,p,self._str(s[1],level)) else: ts_results = dl.match_group(s[0], (1,2)) if ts_results: t,d = ts_results[1] r=self.dl(r,t,d,self._str(s[1],level)) else: if example(s[0]) >= 0 and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0],self.pre(s[1])) else: if s[0][-2:]=='::' and s[1]: # Introduce an example, using pre tags: r=self.normal(r,s[0][:-1],self.pre(s[1])) else: |
(ts_regex.compile('&'), '&'), (ts_regex.compile("<"), '<' ), (ts_regex.compile(">"), '>' ), (ts_regex.compile('"'), '"'))): | (regex.compile('&'), '&'), (regex.compile("<"), '<' ), (regex.compile(">"), '>' ), (regex.compile('"'), '"') )): | def html_quote(v, character_entities=( (ts_regex.compile('&'), '&'), (ts_regex.compile("<"), '<' ), (ts_regex.compile(">"), '>' ), (ts_regex.compile('"'), '"'))): #" text=str(v) for re,name in character_entities: text=gsub(re,name,text) return text |
if not self._unindex.has_key(id): self._unindex[id] = OOSet() self._unindex[id].insert( (comp,level) ) | def insertEntry(self,comp,id,level): """ k is a path component (generated by splitPath() ) v is the documentId level is the level of the component inside the path """ |
|
return self.setBody( | tb=self.setBody( | def exception(self, fatal=0): |
if type(b) is not types.StringType or regex.search('[ \t\n]',b) < 0: return self.setBody( | elif type(b) is not types.StringType or regex.search('[a-zA-Z]>',b) < 0: tb=self.setBody( | def exception(self, fatal=0): |
if self.isHTML(b): return self.setBody(b+self._traceback(t,v,tb)) return self.setBody((str(t),b+self._traceback(t,v,tb))) | elif self.isHTML(b): tb=self.setBody(b+self._traceback(t,v,tb)) else: tb=self.setBody((str(t),b+self._traceback(t,v,tb))) return tb | def exception(self, fatal=0): |
'Invalid cookie attribute, %s' % name) | 'Invalid cookie attribute, <em>%s</em>' % name) | def _cookie_list(self): |
try: roles=self.__roles__ | try: roles=self.aq_self.__roles__ | def selectedRoles(self): |
try: del self.__roles__ except: pass | if hasattr(self,'aq_self'): try: del self.aq_self.__roles__ except: pass | def manage_editRoles(self,REQUEST,acl_type='A',acl_roles=[]): """ """ |
def oldmanage_editRoles(self,REQUEST,roles=[]): try: del self.__roles__ except: pass if not roles: return self.manage_rolesForm(self,REQUEST) if roles==['Public',]: self.__roles__=None return self.manage_rolesForm(self,REQUEST) if ('Acquire' in roles) or ('Public' in roles): raise 'Bad Request',('<EM>Acquired</EM> and <EM>Public</EM> ' \ 'cannot be combined with other roles!') self.__roles__=roles return self.manage_rolesForm(self,REQUEST) | def oldmanage_editRoles(self,REQUEST,roles=[]): |
|
try: del self.__roles__ except: pass | if hasattr(self,'aq_self'): try: del self.aq_self.__roles__ except: pass | def _setRoles(self,acl_type,acl_roles): |
def publish(script,path_info,u=None,p=None,d=None,t=None,e={},s=None): | def publish(script,path_info,u=None,p=None,d=None,t=None,e={},s=None,pm=0): | def publish(script,path_info,u=None,p=None,d=None,t=None,e={},s=None): import sys, os, getopt, string profile=p debug=d timeit=t silent=s if not script: script='+Main' if script[0]=='+': script='../../lib/python/'+script[1:] env=e env['SERVER_NAME']='bobo.server' env['SERVER_PORT']='80' env['REQUEST_METHOD']='GET' env['REMOTE_ADDR']='204.183.226.81 ' env['REMOTE_HOST']='bobo.remote.host' env['HTTP_USER_AGENT']='Bobo/%s' % __version__ env['HTTP_HOST']='ninny.digicool.com:8081 ' env['SERVER_SOFTWARE']='Bobo/%s' % __version__ env['SERVER_PROTOCOL']='HTTP/1.0 ' env['HTTP_ACCEPT']='image/gif, image/x-xbitmap, image/jpeg, */* ' env['SERVER_HOSTNAME']='bobo.server.host' env['GATEWAY_INTERFACE']='CGI/1.1 ' env['SCRIPT_NAME']=script p=string.split(path_info,'?') if len(p)==1: env['PATH_INFO'] = p[0] elif len(p)==2: [env['PATH_INFO'], env['QUERY_STRING']]=p else: raise TypeError, '' if u: import base64 env['HTTP_AUTHORIZATION']="Basic %s" % base64.encodestring(u) dir,file=os.path.split(script) cdir=os.path.join(dir,'Components') sys.path[0:0]=[dir,cdir,os.path.join(cdir,sys.platform)] # We delay import to here, in case cgi_module_publisher is part of the # application distribution. from cgi_module_publisher import publish_module if profile: import __main__ __main__.publish_module=publish_module __main__.file=file __main__.env=env print profile publish_module(file, environ=env, stdout=open('/dev/null','w')) c=("for i in range(%s): " "publish_module(file, environ=env, stdout=open('/dev/null','w'))" % repeat_count ) if profile: run(c,profile) else: run(c) elif debug: import cgi_module_publisher from cgi_module_publisher import ModulePublisher import pdb class Pdb(pdb.Pdb): def do_pub(self,arg): if hasattr(self,'done_pub'): print 'pub already done.' else: self.do_s('') self.do_s('') self.do_c('') self.done_pub=1 def do_ob(self,arg): if hasattr(self,'done_ob'): print 'ob already done.' else: self.do_pub('') self.do_c('') self.done_ob=1 import codehack db=Pdb() def fbreak(db,meth,codehack=codehack): try: meth=meth.im_func except AttributeError: pass code=meth.func_code lineno = codehack.getlineno(code) filename = code.co_filename db.set_break(filename,lineno) fbreak(db,ModulePublisher.publish) fbreak(db,ModulePublisher.call_object) #fbreak(db,cgi_module_publisher.new_find_object) #fbreak(db,cgi_module_publisher.old_find_object) dbdata={'breakpoints':(), 'env':env} b='' try: b=open('.bobodb','r').read() except: pass if b: exec b in dbdata for b in dbdata['breakpoints']: if type(b) is type(()): apply(db.set_break,b) else: fbreak(db,b) db.prompt='pdb> ' # db.set_continue() print ( '* Type "s<cr>c<cr>" to jump to beginning of real publishing process.\n' '* Then type c<cr> to jump to the beginning of the URL traversal\n' ' algorithm.\n' '* Then type c<cr> to jump to published object call.' ) db.run('publish_module(file,environ=env,debug=1)', cgi_module_publisher.__dict__, {'file':file, 'env':env}) elif timeit: stdout=sys.stdout t= time(publish_module,file, stdout=open('/dev/null','w'), environ=env) stdout.write('%s milliseconds\n' % t) else: if silent: stdout=open('/dev/null','w') else: stdout=sys.stdout publish_module(file, environ=env, stdout=stdout) print '\n%s\n' % ('_'*60) |
if Globals.DatabaseVersion!='3': return | if not dbVersionEquals('3'): return | def changeClassId(self, newid=None): if Globals.DatabaseVersion!='3': return if newid is None: newid=self._new_class_id() self._unregister() if newid: if not newid[:1] == '*': newid='*'+newid self.setClassAttr('__module__', newid) self._register() |
if Globals.DatabaseVersion!='3': return | if not dbVersionEquals('3'): return | def _register(self): if Globals.DatabaseVersion!='3': return z=self._zclass_ class_id=z.__module__ if not class_id: return |
if Globals.DatabaseVersion!='3': return | if not dbVersionEquals('3'): return | def _unregister(self): if Globals.DatabaseVersion!='3': return class_id=self._zclass_.__module__ if not class_id: return globals=self._p_jar.root()['ZGlobals'] if globals.has_key(class_id): del globals[class_id] |
if Globals.DatabaseVersion!='3': return | if not dbVersionEquals('3'): return | def manage_afterAdd(self, item, container): if Globals.DatabaseVersion!='3': return if not self._zclass_.__module__: self.setClassAttr('__module__', self._new_class_id()) self._register() self.propertysheets.methods.manage_afterAdd(item, container) |
if Globals.DatabaseVersion!='3': return | if not dbVersionEquals('3'): return | def manage_beforeDelete(self, item, container): if Globals.DatabaseVersion!='3': return self._unregister() self.propertysheets.methods.manage_beforeDelete(item, container) |
if rawdata[j:j+2] == "/>": return j + 2 | if next == "/": s = rawdata[j:j+2] if s == "/>": return j + 2 if s == "/": return -1 self.updatepos(i, j + 1) raise HTMLParseError("malformed empty start tag", self.getpos()) | def check_for_whole_start_tag(self, i): rawdata = self.rawdata m = locatestarttagend.match(rawdata, i) if m: j = m.end() next = rawdata[j:j+1] if next == ">": return j + 1 if rawdata[j:j+2] == "/>": return j + 2 if next == "": # end of input return -1 if next in ("abcdefghijklmnopqrstuvwxyz=" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): # end of input in or before attribute value return -1 self.updatepos(i, j) raise HTMLParseError("malformed start tag", self.getpos()) raise AssertionError("we should not gt here!") |
if next in ("abcdefghijklmnopqrstuvwxyz=" | if next in ("abcdefghijklmnopqrstuvwxyz=/" | def check_for_whole_start_tag(self, i): rawdata = self.rawdata m = locatestarttagend.match(rawdata, i) if m: j = m.end() next = rawdata[j:j+1] if next == ">": return j + 1 if rawdata[j:j+2] == "/>": return j + 2 if next == "": # end of input return -1 if next in ("abcdefghijklmnopqrstuvwxyz=" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): # end of input in or before attribute value return -1 self.updatepos(i, j) raise HTMLParseError("malformed start tag", self.getpos()) raise AssertionError("we should not gt here!") |
Shared.DC.xml.xmllib.XMLParser.__init__(self) self.root=None self.node=None | pass | def __init__(self): Shared.DC.xml.xmllib.XMLParser.__init__(self) self.root=None self.node=None |
if not data.startswith("<?xml"): data = '<?xml version="1.0" ?>\n' + data self.feed(data) self.close() return self.root def add(self, node): self.node.addNode(node) def push(self, node): self.node.addNode(node) self.node=self.node.__nodes__[-1] def pop(self): self.node=self.node.aq_parent def unknown_starttag(self, name, attrs): node=Element(name, attrs) self.push(node) self.node.fixup() def unknown_endtag(self, name): self.pop() def handle_xml(self, encoding, stdalone): self.root=Document(encoding, stdalone) self.node=self.root def handle_doctype(self, tag, pubid, syslit, data): pass def handle_entity(self, name, strval, pubid, syslit, ndata): self.add(Entity(name, strval, pubid, syslit, ndata)) def handle_cdata(self, data): self.add(CData(data)) def handle_proc(self, name, data): self.add(ProcInst(name, data)) def handle_comment(self, data): self.add(Comment(data)) def handle_data(self, data): self.add(Text(data)) def unknown_entityref(self, data): self.add(EntityRef(data)) def escape(data, rmap={}): data=data.replace( "&", "&") data=data.replace( "<", "<") data=data.replace( ">", ">") for key, val in rmap.items(): data=data.replace( key, val) return data def remap(data, dict={'DAV:': 'd'}): root=XmlParser().parse(data) root.elements()[0].remap(dict, 0) return root.toxml() | self.dom=minidom.parseString(data) return Node(self.dom) | def parse(self, data): # prepending a XML preample to make xmllib happy # (Collector #863) if not data.startswith("<?xml"): data = '<?xml version="1.0" ?>\n' + data self.feed(data) self.close() return self.root |
manage_workspace__roles__=('Anonymous',) | manage_workspace__roles__=('Authenticated',) | def filtered_manage_options(self, REQUEST=None): |
from AccessControl.User import nobody if aq_base(getSecurityManager().getUser()) is aq_base(nobody): raise 'Unauthorized', ( 'You are not authorized to view this object.') | def manage_workspace(self, REQUEST): """Dispatch to first interface in manage_options """ from AccessControl.User import nobody if aq_base(getSecurityManager().getUser()) is aq_base(nobody): # No authentication credentials presented. raise 'Unauthorized', ( 'You are not authorized to view this object.') options=self.filtered_manage_options(REQUEST) try: m=options[0]['action'] if m=='manage_workspace': raise TypeError except: raise 'Unauthorized', ( 'You are not authorized to view this object.<p>') |
|
def manage_cutObjects(self, ids, REQUEST=None): | def manage_cutObjects(self, ids=None, REQUEST=None): | def manage_cutObjects(self, ids, REQUEST=None): """Put a reference to the objects named in ids in the clip board""" if type(ids) is type(''): ids=[ids] oblist=[] for id in ids: ob=self._getOb(id) if not ob.cb_isMoveable(): raise CopyError, eNotSupported % id m=Moniker.Moniker(ob) oblist.append(m.dump()) cp=(1, oblist) cp=_cb_encode(cp) if REQUEST is not None: resp=REQUEST['RESPONSE'] resp.setCookie('__cp', cp, path='%s' % REQUEST['SCRIPT_NAME']) return self.manage_main(self, REQUEST, cb_dataValid=1) return cp |
def manage_copyObjects(self, ids, REQUEST=None, RESPONSE=None): | def manage_copyObjects(self, ids=None, REQUEST=None, RESPONSE=None): | def manage_copyObjects(self, ids, REQUEST=None, RESPONSE=None): """Put a reference to the objects named in ids in the clip board""" if type(ids) is type(''): ids=[ids] oblist=[] for id in ids: ob=self._getOb(id) if not ob.cb_isCopyable(): raise CopyError, eNotSupported % id m=Moniker.Moniker(ob) oblist.append(m.dump()) cp=(0, oblist) cp=_cb_encode(cp) if REQUEST is not None: resp=REQUEST['RESPONSE'] resp.setCookie('__cp', cp, path='%s' % REQUEST['SCRIPT_NAME']) return self.manage_main(self, REQUEST, cb_dataValid=1) return cp |
return [] | return ['Shared'] | def getRoles(self): |
(path and hasattr(self.aq_explicit, path[-1])) ): | (path and hasattr(self.aq_base, path[-1])) ): | def __before_publishing_traverse__(self, self2, request): path = request['TraversalRequestNameStack'] names = self.getBindingAssignments() if (not names.isNameAssigned('name_subpath') or (path and hasattr(self.aq_explicit, path[-1])) ): return subpath = path[:] path[:] = [] subpath.reverse() request.set('traverse_subpath', subpath) |
def constructSessionDataManager(self, id, title='', path=None, automatic=None, | def constructSessionDataManager(self, id, title='', path=None, requestName=None, | def constructSessionDataManager(self, id, title='', path=None, automatic=None, REQUEST=None): """ """ ob = SessionDataManager(id, path, title, automatic) self._setObject(id, ob) if REQUEST is not None: return self.manage_main(self, REQUEST, update_menu=1) |
ob = SessionDataManager(id, path, title, automatic) | ob = SessionDataManager(id, path, title, requestName) | def constructSessionDataManager(self, id, title='', path=None, automatic=None, REQUEST=None): """ """ ob = SessionDataManager(id, path, title, automatic) self._setObject(id, ob) if REQUEST is not None: return self.manage_main(self, REQUEST, update_menu=1) |
def __init__(self, id, path=None, title='', automatic=None): | def __init__(self, id, path=None, title='', requestName=None): | def __init__(self, id, path=None, title='', automatic=None): self.id = id self.setContainerPath(path) self.setTitle(title) |
if automatic: self._requestSessionName='SESSION' | if requestName: self._requestSessionName=requestName | def __init__(self, id, path=None, title='', automatic=None): self.id = id self.setContainerPath(path) self.setTitle(title) |
def manage_changeSDM(self, title, path=None, automatic=None, REQUEST=None): | def manage_changeSDM(self, title, path=None, requestName=None, REQUEST=None): | def manage_changeSDM(self, title, path=None, automatic=None, REQUEST=None): """ """ self.setContainerPath(path) self.setTitle(title) if automatic: self.updateTraversalData('SESSION') else: self.updateTraversalData(None) if REQUEST is not None: return self.manage_sessiondatamgr(self, REQUEST) |
if automatic: self.updateTraversalData('SESSION') | if requestName: self.updateTraversalData(requestName) | def manage_changeSDM(self, title, path=None, automatic=None, REQUEST=None): """ """ self.setContainerPath(path) self.setTitle(title) if automatic: self.updateTraversalData('SESSION') else: self.updateTraversalData(None) if REQUEST is not None: return self.manage_sessiondatamgr(self, REQUEST) |
security.declareProtected(MGMT_SCREEN_PERM, 'getAutomatic') def getAutomatic(self): """ """ if hasattr(self,'_hasTraversalHook'): return 1 return 0 | security.declareProtected(MGMT_SCREEN_PERM, 'getrequestName') def getrequestName(self): """ """ return self._requestSessionName or '' | def _getSessionDataContainer(self): """ Do not cache the results of this call. Doing so breaks the transactions for mounted storages. """ if self.obpath is None: err = 'Session data container is unspecified in %s' % self.getId() if DEBUG: LOG('Session Tracking', 0, err) raise SessionIdManagerErr, err # return an external data container try: # This should arguably use restrictedTraverse, but it # currently fails for mounted storages. This might # be construed as a security hole, albeit a minor one. # unrestrictedTraverse is also much faster. if DEBUG and not hasattr(self, '_v_wrote_dc_type'): args = string.join(self.obpath, '/') LOG('Session Tracking', 0, 'External data container at %s in use' % args) self._v_wrote_dc_type = 1 return self.unrestrictedTraverse(self.obpath) except: raise SessionDataManagerErr, ( "External session data container '%s' not found." % string.join(self.obpath,'/') ) |
for path in self.path2docid.keys(f.name): | for path in list(self.path2docid.keys(f.name)): | def updatefolder(self, f, msgs): self.watchfolders[f.name] = self.getmtime(f.name) for n in msgs: path = "%s/%s" % (f.name, n) docid = self.path2docid.get(path, 0) if docid and self.getmtime(path) == self.doctimes.get(docid, 0): print "unchanged", docid, path continue docid = self.newdocid(path) try: m = f.openmessage(n) except IOError: print "disappeared", docid, path self.unindexpath(path) continue text = self.getmessagetext(m, f.name) if not text: self.unindexpath(path) continue print "indexing", docid, path self.index.index_text(docid, text) self.maycommit() # Remove messages from the folder that no longer exist for path in self.path2docid.keys(f.name): if not path.startswith(f.name + "/"): break if self.getmtime(path) == 0: self.unindexpath(path) print "done." |
(('&'), '&'), (("<"), '<' ), ((">"), '>' ), (('"'), '"'))): | (('&'), '&'), (('<'), '<' ), (('>'), '>' ), (('\213'), '<' ), (('\233'), '>' ), (('"'), '"'))): | def html_quote(v, name='(Unknown name)', md={}, character_entities=( (('&'), '&'), (("<"), '<' ), ((">"), '>' ), (('"'), '"'))): #" text=str(v) for re,name in character_entities: if find(text, re) >= 0: text=join(split(text,re),name) return text |
def _roles_debug(self,hier,expected_roles,got_roles): | def _roles_debug(self,hier,got_roles,expected_roles): | def _roles_debug(self,hier,expected_roles,got_roles): |
r=apply(HTML.__call__, (self, self, REQUEST), kw) | r=apply(HTML.__call__, (self, bself, REQUEST), kw) | def __call__(self, client=None, REQUEST={}, RESPONSE=None, **kw): """Render the document given a client object, REQUEST mapping, Response, and key word arguments.""" kw['document_id'] =self.id kw['document_title']=self.title if client is None: # Called as subtemplate, so don't need error propigation! r=apply(HTML.__call__, (self, self, REQUEST), kw) if RESPONSE is None: return r return decapitate(r, RESPONSE) try: r=apply(HTML.__call__, (self, (client, self), REQUEST), kw) except: if self.id()=='standard_error_message': raise sys.exc_type, sys.exc_value, sys.exc_traceback return self.raise_standardErrorMessage(client, REQUEST) if RESPONSE is None: return r RESPONSE.setHeader('Last-Modified', rfc1123_date(self._p_mtime)) return decapitate(r, RESPONSE) |
try: r=apply(HTML.__call__, (self, (client, self), REQUEST), kw) | try: r=apply(HTML.__call__, (self, (client, bself), REQUEST), kw) | def __call__(self, client=None, REQUEST={}, RESPONSE=None, **kw): """Render the document given a client object, REQUEST mapping, Response, and key word arguments.""" kw['document_id'] =self.id kw['document_title']=self.title if client is None: # Called as subtemplate, so don't need error propigation! r=apply(HTML.__call__, (self, self, REQUEST), kw) if RESPONSE is None: return r return decapitate(r, RESPONSE) try: r=apply(HTML.__call__, (self, (client, self), REQUEST), kw) except: if self.id()=='standard_error_message': raise sys.exc_type, sys.exc_value, sys.exc_traceback return self.raise_standardErrorMessage(client, REQUEST) if RESPONSE is None: return r RESPONSE.setHeader('Last-Modified', rfc1123_date(self._p_mtime)) return decapitate(r, RESPONSE) |
try: _localzone =_cache._zmap[lower(tzname[0])] except: _localzone='' | try: _localzone=_cache._zmap[lower(tzname[0])] except: try: t=time() localzone=mktime(gmtime(t))-t localzone=int(round(-localzone/(60*60))) if localzone >= 0: zn='+%d' % localzone else: lz=str(localzone) _localzone=_cache._zmap[lower('GMT%s' % lz)] except: _localzone='' | def __init__(self,*args): """Return a new date-time object |
security.declareProtected('Edit ReStructuredText', 'manage_editForm') | security.declareProtected('Edit ReStructuredText', 'manage_main') | def source_txt(self, REQUEST=None): ''' Getting the source text ''' REQUEST.RESPONSE.setHeader('content-type', 'text/plain; charset=%s' % self.input_encoding) return self.source |
if unicode and is8bit(data): | if unicode and encoding and is8bit(data): | def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search): # decode non-ascii string (if possible) if unicode and is8bit(data): data = unicode(data, encoding) return data |
__version__ = "0.9.9" | def escape(s, replace=string.replace): s = replace(s, "&", "&") s = replace(s, "<", "<") return replace(s, ">", ">",) MAXINT = 2L**31-1 MININT = -2L**31 if unicode: def _stringify(string): try: return str(string) except UnicodeError: return string else: def _stringify(string): return string __version__ = "1.0.0" | def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search): # decode non-ascii string (if possible) if unicode and is8bit(data): data = unicode(data, encoding) return data |
class Error: pass | class Error(Exception): """Base class for client errors.""" def __str__(self): return repr(self) | def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search): # decode non-ascii string (if possible) if unicode and is8bit(data): data = unicode(data, encoding) return data |
def datetime(data): value = DateTime() value.decode(data) return value | def encode(self, out): out.write("<value><dateTime.iso8601>") out.write(self.value) out.write("</dateTime.iso8601></value>\n") |
|
if sgmlop: class FastParser: | try: import _xmlrpclib FastParser = _xmlrpclib.Parser FastUnmarshaller = _xmlrpclib.Unmarshaller except (AttributeError, ImportError): FastParser = FastUnmarshaller = None try: import sgmlop if not hasattr(sgmlop, "XMLParser"): raise ImportError except ImportError: SgmlopParser = None else: class SgmlopParser: | def encode(self, out): import base64, StringIO out.write("<value><base64>\n") base64.encode(StringIO.StringIO(self.data), out) out.write("</base64></value>\n") |
FastParser = None class SlowParser(xmllib.XMLParser): | class ExpatParser: def __init__(self, target): self._parser = parser = expat.ParserCreate(None, None) self._target = target parser.StartElementHandler = target.start parser.EndElementHandler = target.end parser.CharacterDataHandler = target.data encoding = None if not parser.returns_unicode: encoding = "utf-8" target.xml(encoding, None) def feed(self, data): self._parser.Parse(data, 0) def close(self): self._parser.Parse("", 1) del self._target, self._parser class SlowParser: """Default XML parser (based on xmllib.XMLParser).""" | def handle_entityref(self, entity): # <string> entity try: self.handle_data(self.entity[entity]) except KeyError: self.handle_data("&%s;" % entity) |
xmllib.XMLParser.__init__(self) | try: xmllib.XMLParser.__init__(self, accept_utf8=1) except TypeError: xmllib.XMLParser.__init__(self) | def __init__(self, target): self.handle_xml = target.xml self.unknown_starttag = target.start self.handle_data = target.data self.unknown_endtag = target.end xmllib.XMLParser.__init__(self) |
self.write("<value><double>%s</double></value>\n" % value) | self.write("<value><double>%s</double></value>\n" % repr(value)) | def dump_double(self, value): self.write("<value><double>%s</double></value>\n" % value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.