rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
self._debug("comment %r", data)
debug(FILTER, "%s comment %r", self, data)
def comment (self, data): """a comment; accept only non-empty comments""" if not (self.comments and data): return self._debug("comment %r", data) item = [COMMENT, data] self.htmlparser.tagbuf.append(item)
self._debug("doctype %r", data)
debug(FILTER, "%s doctype %r", self, data)
def doctype (self, data): self._debug("doctype %r", data) return self._data("<!DOCTYPE%s>"%data)
self._debug("pi %r", data)
debug(FILTER, "%s pi %r", self, data)
def pi (self, data): self._debug("pi %r", data) return self._data("<?%s?>"%data)
self._debug("startElement %r", tag)
debug(FILTER, "%s startElement %r", self, tag) if self._is_waiting([STARTTAG, tag, attrs]): return
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
self._debug("using base url %r", self.base_url)
debug(FILTER, "%s using base url %r", self, self.base_url)
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
self._filterStartElement(tag, attrs)
self.filterStartElement(tag, attrs)
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
def _filterStartElement (self, tag, attrs):
def filterStartElement (self, tag, attrs):
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
self._debug("matched rule %r on tag %r", rule.title, tag)
debug(FILTER, "%s matched rule %r on tag %r", self, rule.title, tag)
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
self._debug("put on buffer")
debug(FILTER, "%s put rule %r on buffer", self, rule.title)
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
self._jsStartElement(tag, attrs)
self.jsStartElement(tag, attrs)
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
self._debug("endElement %r", tag)
debug(FILTER, "%s endElement %r", self, tag) if self._is_waiting([ENDTAG, tag]): return
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
if not self._filterEndElement(tag):
if not self.filterEndElement(tag):
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
self._jsEndElement(item)
self.jsEndElement(item)
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
elif self.headers.get('Content-Type') != gm[0]:
elif self.headers.get('Content-Type') != gm[0] and \ gm[0] in _fix_content_types:
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) # check content-type against our own guess gm = mimetypes.guess_type(self.document, None) if gm[0]: # guessed an own content type if not self.headers.has_key('Content-Type'): self.headers['Content-Type'] = gm[0] print >>sys.stderr, _("Warning: %s guessed Content-Type (%s)") % \ (self.url, gm[0]) elif self.headers.get('Content-Type') != gm[0]: print >>sys.stderr, _("Warning: %s guessed Content-Type (%s) != server Content-Type (%s)") % \ (self.url, gm[0], self.headers.get('Content-Type')) self.headers['Content-Type'] = gm[0] if gm[1]: # guessed an own encoding type if not self.headers.has_key('Content-Encoding'): self.headers['Content-Encoding'] = gm[1] print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s)") % \ (self.url, gm[1]) elif self.headers.get('Content-Encoding') != gm[1]: print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s) != server Content-Encoding (%s)") % \ (self.url, gm[1], self.headers.get('Content-Encoding')) # only fix html content type if gm[1] in _fix_content_types: self.headers['Content-Encoding'] = gm[1] # will content be rewritten? rewrite = None for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', '')): rewrite = "True" break # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.has_key('Content-Length'): self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None
if gm[1] in _fix_content_types: self.headers['Content-Encoding'] = gm[1]
self.headers['Content-Encoding'] = gm[1]
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) # check content-type against our own guess gm = mimetypes.guess_type(self.document, None) if gm[0]: # guessed an own content type if not self.headers.has_key('Content-Type'): self.headers['Content-Type'] = gm[0] print >>sys.stderr, _("Warning: %s guessed Content-Type (%s)") % \ (self.url, gm[0]) elif self.headers.get('Content-Type') != gm[0]: print >>sys.stderr, _("Warning: %s guessed Content-Type (%s) != server Content-Type (%s)") % \ (self.url, gm[0], self.headers.get('Content-Type')) self.headers['Content-Type'] = gm[0] if gm[1]: # guessed an own encoding type if not self.headers.has_key('Content-Encoding'): self.headers['Content-Encoding'] = gm[1] print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s)") % \ (self.url, gm[1]) elif self.headers.get('Content-Encoding') != gm[1]: print >>sys.stderr, _("Warning: %s guessed Content-Encoding (%s) != server Content-Encoding (%s)") % \ (self.url, gm[1], self.headers.get('Content-Encoding')) # only fix html content type if gm[1] in _fix_content_types: self.headers['Content-Encoding'] = gm[1] # will content be rewritten? rewrite = None for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', '')): rewrite = "True" break # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.has_key('Content-Length'): self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None
f = file(self['configfile'], 'w')
f = file(self.configfile, 'w')
def write_proxyconf (self): """write proxy configuration""" f = file(self['configfile'], 'w') f.write("""<?xml version="1.0" encoding="%s"?>
debug(GUI, "Translator catalog %s", str(translator._catalog))
def __init__ (self, client, url, form, protocol, status=200, msg=i18n._('Ok'), context={}, headers={'Content-Type': 'text/html'}): self.client = client # we pretend to be the server self.connected = True try: lang = i18n.get_headers_lang(headers) # get the template filename path, dirs, lang = get_template_url(url, lang) # do not rely on content-type header value if path.endswith('.html'): headers['Content-Type'] = 'text/html' f = file(path) # get TAL context context = get_context(dirs, form, context, lang) # get translator translator = gettext.translation(Name, LocaleDir, [lang], fallback=True) debug(GUI, "Using translator %s", str(translator.info())) debug(GUI, "Translator catalog %s", str(translator._catalog)) # expand template data = expand_template(f, context, translator=translator) else: f = file(path, 'rb') data = f.read() except IOError, e: exception(GUI, "Wrong path `%s'", url) # XXX this can actually lead to a maximum recursion # error when client.error caused the exception return client.error(404, i18n._("Not Found")) except: # catch all other exceptions and report internal error exception(GUI, "Template error") return client.error(500, i18n._("Internal Error")) f.close() # write response self.put_response(data, protocol, status, msg, headers)
if compobj: header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data))
header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data))
def filter (self, data, **attrs): """Compress the string s. Note that compression state is saved outside of this function in the compression object. """ if self.init_compressor: self.set_encoding_header(attrs) self.init_compressor = False if not attrs.has_key('compressobj'): return data compobj = attrs['compressobj'] if compobj: header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) return data
if compobj: header = compobj['header'] if header: wc.log.debug(wc.LOG_FILTER, 'final writing gzip header') pass if data: compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) else: data = header wc.log.debug(wc.LOG_FILTER, 'finishing compressor') data += "%s%s%s" % (compobj['compressor'].flush(zlib.Z_FINISH), struct.pack('<l', compobj['crc']), struct.pack('<l', compobj['size']))
header = compobj['header'] if header: wc.log.debug(wc.LOG_FILTER, 'final writing gzip header') pass if data: compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) else: data = header wc.log.debug(wc.LOG_FILTER, 'finishing compressor') data += "%s%s%s" % (compobj['compressor'].flush(zlib.Z_FINISH), struct.pack('<l', compobj['crc']), struct.pack('<l', compobj['size']))
def finish (self, data, **attrs): """final compression of data, flush gzip buffers""" if not attrs.has_key('compressobj'): return data compobj = attrs['compressobj'] if compobj: header = compobj['header'] if header: wc.log.debug(wc.LOG_FILTER, 'final writing gzip header') pass if data: compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) else: data = header wc.log.debug(wc.LOG_FILTER, 'finishing compressor') data += "%s%s%s" % (compobj['compressor'].flush(zlib.Z_FINISH), struct.pack('<l', compobj['crc']), struct.pack('<l', compobj['size'])) return data
res.append(key[len(prefix):], get_item_value(form[key]))
res.append((key[len(prefix):], get_item_value(form[key])))
def get_prefix_vals (form, prefix): """return a list of (key, value) pairs where ``prefix+key'' is a valid form field""" res = [] for key in form: if key.startswith(prefix): res.append(key[len(prefix):], get_item_value(form[key])) return res
dstfinfo = dstfss.GetFInfo() dstfinfo.Flags = dstfinfo.Flags|0x8000 dstfss.SetFInfo(dstfinfo) def main(): dir, ok = macfs.GetDirectory() if not ok: sys.exit(0) os.chdir(dir.as_pathname())
if __name__ == '__main__': run(1)
def mkalias(src, dst): """Create a finder alias""" srcfss = macfs.FSSpec(src) dstfss = macfs.FSSpec(dst) alias = srcfss.NewAlias() srcfinfo = srcfss.GetFInfo() Res.FSpCreateResFile(dstfss, srcfinfo.Creator, srcfinfo.Type, -1) h = Res.FSpOpenResFile(dstfss, 3) resource = Res.Resource(alias.data) resource.AddResource('alis', 0, '') Res.CloseResFile(h) dstfinfo = dstfss.GetFInfo() dstfinfo.Flags = dstfinfo.Flags|0x8000 # Alias flag dstfss.SetFInfo(dstfinfo)
if EasyDialogs.AskYesNoCancel('Proceed with removing old aliases?') <= 0: sys.exit(0) LibFiles = [] allfiles = os.listdir(':') for f in allfiles: if f[-4:] == '.slb': finfo = macfs.FSSpec(f).GetFInfo() if finfo.Flags & 0x8000: os.unlink(f) else: LibFiles.append(f)
def main(): # Ask the user for the plugins directory dir, ok = macfs.GetDirectory() if not ok: sys.exit(0) os.chdir(dir.as_pathname()) # Remove old .slb aliases and collect a list of .slb files if EasyDialogs.AskYesNoCancel('Proceed with removing old aliases?') <= 0: sys.exit(0) LibFiles = [] allfiles = os.listdir(':') for f in allfiles: if f[-4:] == '.slb': finfo = macfs.FSSpec(f).GetFInfo() if finfo.Flags & 0x8000: os.unlink(f) else: LibFiles.append(f) print LibFiles # Create the new aliases. if EasyDialogs.AskYesNoCancel('Proceed with creating new ones?') <= 0: sys.exit(0) for dst, src in goals: if src in LibFiles: mkalias(src, dst) else: EasyDialogs.Message(dst+' not created: '+src+' not found') EasyDialogs.Message('All done!')
print LibFiles if EasyDialogs.AskYesNoCancel('Proceed with creating new ones?') <= 0: sys.exit(0) for dst, src in goals: if src in LibFiles: mkalias(src, dst) else: EasyDialogs.Message(dst+' not created: '+src+' not found') EasyDialogs.Message('All done!') if __name__ == '__main__': main()
def main(): # Ask the user for the plugins directory dir, ok = macfs.GetDirectory() if not ok: sys.exit(0) os.chdir(dir.as_pathname()) # Remove old .slb aliases and collect a list of .slb files if EasyDialogs.AskYesNoCancel('Proceed with removing old aliases?') <= 0: sys.exit(0) LibFiles = [] allfiles = os.listdir(':') for f in allfiles: if f[-4:] == '.slb': finfo = macfs.FSSpec(f).GetFInfo() if finfo.Flags & 0x8000: os.unlink(f) else: LibFiles.append(f) print LibFiles # Create the new aliases. if EasyDialogs.AskYesNoCancel('Proceed with creating new ones?') <= 0: sys.exit(0) for dst, src in goals: if src in LibFiles: mkalias(src, dst) else: EasyDialogs.Message(dst+' not created: '+src+' not found') EasyDialogs.Message('All done!')
def test( tests ): failed=[] for t in tests: if get_arg_text(t) != t.__doc__: failed.append(t) print "%s - expected %s, but got %s" % (t, `t.__doc__`, `get_arg_text(t)`) print "%d of %d tests failed" % (len(failed), len(tests))
class container: def __init__(self): root = Tk() text = self.text = Text(root) text.pack(side=LEFT, fill=BOTH, expand=1) text.insert("insert", "string.split") root.update() self.calltip = CallTip(text)
def test( tests ): failed=[] for t in tests: if get_arg_text(t) != t.__doc__: failed.append(t) print "%s - expected %s, but got %s" % (t, `t.__doc__`, `get_arg_text(t)`) print "%d of %d tests failed" % (len(failed), len(tests))
tc = TC() tests = t1, t2, t3, t4, t5, t6, \ tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6
text.event_add("<<calltip-show>>", "(") text.event_add("<<calltip-hide>>", ")") text.bind("<<calltip-show>>", self.calltip_show) text.bind("<<calltip-hide>>", self.calltip_hide) text.focus_set()
def test( tests ): failed=[] for t in tests: if get_arg_text(t) != t.__doc__: failed.append(t) print "%s - expected %s, but got %s" % (t, `t.__doc__`, `get_arg_text(t)`) print "%d of %d tests failed" % (len(failed), len(tests))
test(tests)
def calltip_show(self, event): self.calltip.showtip("Hello world") def calltip_hide(self, event): self.calltip.hidetip() def main(): c=container() if __name__=='__main__': main()
def test( tests ): failed=[] for t in tests: if get_arg_text(t) != t.__doc__: failed.append(t) print "%s - expected %s, but got %s" % (t, `t.__doc__`, `get_arg_text(t)`) print "%d of %d tests failed" % (len(failed), len(tests))
/* For now we declare them forward here. They'll go to mactoolbox later */ staticforward PyObject *TXNObj_New(TXNObject); staticforward int TXNObj_Convert(PyObject *, TXNObject *); staticforward PyObject *TXNFontMenuObj_New(TXNFontMenuObject); staticforward int TXNFontMenuObj_Convert(PyObject *, TXNFontMenuObject *);
def makegreylist(self): return []
#ifdef WITHOUT_FRAMEWORKS
// ADD declarations //extern PyObject *_CFTypeRefObj_New(CFTypeRef); //extern int _CFTypeRefObj_Convert(PyObject *, CFTypeRef *);
def makeblacklisttypes(self): return [ "TXNTab", "TXNMargins", "TXNControlData", "TXNATSUIFeatures", "TXNATSUIVariations", "TXNAttributeData", "TXNTypeAttributes", "TXNMatchTextRecord", "TXNBackground", "UniChar", "TXNFindUPP", ]
#ifdef WITHOUT_FRAMEWORKS
// // /* ** Parse/generate ADD records */ """ initstuff = initstuff + """ // PyMac_INIT_TOOLBOX_OBJECT_NEW(xxxx); """ TXNObject = OpaqueByValueType("TXNObject", "TXNObj") TXNFontMenuObject = OpaqueByValueType("TXNFontMenuObject", "TXNFontMenuObj") TXNFrameID = Type("TXNFrameID", "l") TXNVersionValue = Type("TXNVersionValue", "l") TXNFeatureBits = Type("TXNFeatureBits", "l") TXNInitOptions = Type("TXNInitOptions", "l") TXNFrameOptions = Type("TXNFrameOptions", "l") TXNContinuousFlags = Type("TXNContinuousFlags", "l") TXNMatchOptions = Type("TXNMatchOptions", "l") TXNFileType = OSTypeType("TXNFileType") TXNFrameType = Type("TXNFrameType", "l") TXNDataType = OSTypeType("TXNDataType") TXNControlTag = OSTypeType("TXNControlTag") TXNActionKey = Type("TXNActionKey", "l") TXNTabType = Type("TXNTabType", "b") TXNScrollBarState = Type("TXNScrollBarState", "l") TXNOffset = Type("TXNOffset", "l") TXNObjectRefcon = FakeType("(TXNObjectRefcon)0") TXNErrors = OSErrType("TXNErrors", "l") TXNTypeRunAttributes = OSTypeType("TXNTypeRunAttributes") TXNTypeRunAttributeSizes = Type("TXNTypeRunAttributeSizes", "l") TXNPermanentTextEncodingType = Type("TXNPermanentTextEncodingType", "l") TXTNTag = OSTypeType("TXTNTag") TXNBackgroundType = Type("TXNBackgroundType", "l") DragReference = OpaqueByValueType("DragReference", "DragObj") DragTrackingMessage = Type("DragTrackingMessage", "h") RgnHandle = OpaqueByValueType("RgnHandle", "ResObj") GWorldPtr = OpaqueByValueType("GWorldPtr", "GWorldObj") MlteInBuffer = VarInputBufferType('void *', 'ByteCount', 'l') execfile("mltetypetest.py") class TXNObjDefinition(GlobalObjectDefinition): def outputCheckNewArg(self): Output("if (itself == NULL) return PyMac_Error(resNotFound);") class TXNFontMenuObjDefinition(GlobalObjectDefinition): def outputCheckNewArg(self): Output("if (itself == NULL) return PyMac_Error(resNotFound);") module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff) TXNObject_object = TXNObjDefinition("TXNObject", "TXNObj", "TXNObject") TXNFontMenuObject_object = TXNFontMenuObjDefinition("TXNFontMenuObject", "TXNFontMenuObj", "TXNFontMenuObject") module.addobject(TXNObject_object) module.addobject(TXNFontMenuObject_object) Function = OSErrWeakLinkFunctionGenerator Method = OSErrWeakLinkMethodGenerator functions = [] TXNObject_methods = [] TXNFontMenuObject_methods = [] execfile(INPUTFILE) for f in functions: module.add(f) for f in TXNObject_methods: TXNObject_object.add(f) for f in TXNFontMenuObject_methods: TXNFontMenuObject_object.add(f) SetOutputFileName(OUTPUTFILE) module.generate()
def makerepairinstructions(self): return [ ([("void", "*", "OutMode"), ("ByteCount", "*", "InMode")], [("MlteInBuffer", "*", "InMode")]), ] if __name__ == "__main__": main()
#ifdef NOTYET_USE_TOOLBOX_OBJECT_GLUE
def print_debug(msg): if debug: print msg def _open(fullpath):
def needs_declaration(fullpath):
def print_debug(msg): if debug: print msg
size = os.stat(fullpath).st_size except OSError, err: print_debug("%s: permission denied: %s" % (fullpath, err))
infile = open(fullpath, 'rU') except IOError:
def _open(fullpath): try: size = os.stat(fullpath).st_size except OSError, err: # Permission denied - ignore the file print_debug("%s: permission denied: %s" % (fullpath, err)) return None if size > 1024*1024: # too big print_debug("%s: the file is too big: %d bytes" % (fullpath, size)) return None try: return open(fullpath, 'rU') except IOError, err: # Access denied, or a special file - ignore it print_debug("%s: access denied: %s" % (fullpath, err)) return None
if size > 1024*1024: print_debug("%s: the file is too big: %d bytes" % (fullpath, size)) return None try: return open(fullpath, 'rU') except IOError, err: print_debug("%s: access denied: %s" % (fullpath, err)) return None def has_python_ext(fullpath): return fullpath.endswith(".py") or fullpath.endswith(".pyw") def looks_like_python(fullpath): infile = _open(fullpath) if infile is None:
line1 = infile.readline() line2 = infile.readline() if get_declaration(line1) or get_declaration(line2): infile.close()
def _open(fullpath): try: size = os.stat(fullpath).st_size except OSError, err: # Permission denied - ignore the file print_debug("%s: permission denied: %s" % (fullpath, err)) return None if size > 1024*1024: # too big print_debug("%s: the file is too big: %d bytes" % (fullpath, size)) return None try: return open(fullpath, 'rU') except IOError, err: # Access denied, or a special file - ignore it print_debug("%s: access denied: %s" % (fullpath, err)) return None
line = infile.readline()
rest = infile.read()
def looks_like_python(fullpath): infile = _open(fullpath) if infile is None: return False line = infile.readline() infile.close() if binary_re.search(line): # file appears to be binary print_debug("%s: appears to be binary" % fullpath) return False if fullpath.endswith(".py") or fullpath.endswith(".pyw"): return True elif "python" in line: # disguised Python script (e.g. CGI) return True return False
if binary_re.search(line): print_debug("%s: appears to be binary" % fullpath)
if has_correct_encoding(line1+line2+rest, "ascii"):
def looks_like_python(fullpath): infile = _open(fullpath) if infile is None: return False line = infile.readline() infile.close() if binary_re.search(line): # file appears to be binary print_debug("%s: appears to be binary" % fullpath) return False if fullpath.endswith(".py") or fullpath.endswith(".pyw"): return True elif "python" in line: # disguised Python script (e.g. CGI) return True return False
if fullpath.endswith(".py") or fullpath.endswith(".pyw"): return True elif "python" in line: return True return False def can_be_compiled(fullpath): infile = _open(fullpath) if infile is None: return False code = infile.read() infile.close() try: compile(code, fullpath, "exec") except Exception, err: print_debug("%s: cannot compile: %s" % (fullpath, err)) return False
def looks_like_python(fullpath): infile = _open(fullpath) if infile is None: return False line = infile.readline() infile.close() if binary_re.search(line): # file appears to be binary print_debug("%s: appears to be binary" % fullpath) return False if fullpath.endswith(".py") or fullpath.endswith(".pyw"): return True elif "python" in line: # disguised Python script (e.g. CGI) return True return False
def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None): """\ Recursively yield all Python source files below the given paths.
usage = """Usage: %s [-cd] paths... -c: recognize Python source files trying to compile them -d: debug output""" % sys.argv[0]
def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None): """\ Recursively yield all Python source files below the given paths. paths: a list of files and/or directories to be checked. is_python: a function that takes a file name and checks whether it is a Python source file exclude_dirs: a list of directory base names that should be excluded in the search """ if exclude_dirs is None: exclude_dirs=[] for path in paths: print_debug("testing: %s" % path) if os.path.isfile(path): if is_python(path): yield path elif os.path.isdir(path): print_debug(" it is a directory") for dirpath, dirnames, filenames in os.walk(path): for exclude in exclude_dirs: if exclude in dirnames: dirnames.remove(exclude) for filename in filenames: fullpath = os.path.join(dirpath, filename) print_debug("testing: %s" % fullpath) if is_python(fullpath): yield fullpath else: print_debug(" unknown type")
paths: a list of files and/or directories to be checked. is_python: a function that takes a file name and checks whether it is a Python source file exclude_dirs: a list of directory base names that should be excluded in the search """ if exclude_dirs is None: exclude_dirs=[] for path in paths: print_debug("testing: %s" % path) if os.path.isfile(path): if is_python(path): yield path elif os.path.isdir(path): print_debug(" it is a directory") for dirpath, dirnames, filenames in os.walk(path): for exclude in exclude_dirs: if exclude in dirnames: dirnames.remove(exclude) for filename in filenames: fullpath = os.path.join(dirpath, filename) print_debug("testing: %s" % fullpath) if is_python(fullpath): yield fullpath else: print_debug(" unknown type")
try: opts, args = getopt.getopt(sys.argv[1:], 'cd') except getopt.error, msg: print >>sys.stderr, msg print >>sys.stderr, usage sys.exit(1) is_python = pysource.looks_like_python debug = False for o, a in opts: if o == '-c': is_python = pysource.can_be_compiled elif o == '-d': debug = True if not args: print >>sys.stderr, usage sys.exit(1) for fullpath in pysource.walk_python_files(args, is_python): if debug: print "Testing for coding: %s" % fullpath result = needs_declaration(fullpath) if result: print fullpath
def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None): """\ Recursively yield all Python source files below the given paths. paths: a list of files and/or directories to be checked. is_python: a function that takes a file name and checks whether it is a Python source file exclude_dirs: a list of directory base names that should be excluded in the search """ if exclude_dirs is None: exclude_dirs=[] for path in paths: print_debug("testing: %s" % path) if os.path.isfile(path): if is_python(path): yield path elif os.path.isdir(path): print_debug(" it is a directory") for dirpath, dirnames, filenames in os.walk(path): for exclude in exclude_dirs: if exclude in dirnames: dirnames.remove(exclude) for filename in filenames: fullpath = os.path.join(dirpath, filename) print_debug("testing: %s" % fullpath) if is_python(fullpath): yield fullpath else: print_debug(" unknown type")
if __name__ == "__main__": for fullpath in walk_python_files(['.']): print fullpath print "----------" for fullpath in walk_python_files(['.'], is_python=can_be_compiled): print fullpath
def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None): """\ Recursively yield all Python source files below the given paths. paths: a list of files and/or directories to be checked. is_python: a function that takes a file name and checks whether it is a Python source file exclude_dirs: a list of directory base names that should be excluded in the search """ if exclude_dirs is None: exclude_dirs=[] for path in paths: print_debug("testing: %s" % path) if os.path.isfile(path): if is_python(path): yield path elif os.path.isdir(path): print_debug(" it is a directory") for dirpath, dirnames, filenames in os.walk(path): for exclude in exclude_dirs: if exclude in dirnames: dirnames.remove(exclude) for filename in filenames: fullpath = os.path.join(dirpath, filename) print_debug("testing: %s" % fullpath) if is_python(fullpath): yield fullpath else: print_debug(" unknown type")
self.in_list_item = True
self.in_list_item = 1
def _indent_formatter(self, match, fullmatch): depth = int((len(fullmatch.group('idepth')) + 1) / 2) list_depth = len(self._list_stack) if list_depth > 0 and depth == list_depth + 1: self.in_list_item = True else: self.open_indentation(depth) return ''
self.send_header('ETag', etag) return self.send_response(304) self.end_headers() raise NotModifiedException()
self._headers.append(('ETag', etag)) else: self.send_response(304) self.end_headers() raise NotModifiedException()
def check_modified(self, timesecs, extra=''): etag = 'W"%s/%d/%s"' % (self.authname, timesecs, extra) inm = self.get_header('If-None-Match') if (not inm or inm != etag): self.send_header('ETag', etag) return self.send_response(304) self.end_headers() raise NotModifiedException()
options = field.get('options', []) if default and default not in field.get('options', []):
options = field.get('options') if default and options and default not in options:
def _init_defaults(self, db=None): for field in self.fields: default = None if not field.get('custom'): default = self.env.config.get('ticket', 'default_' + field['name']) else: default = field.get('value') options = field.get('options', []) if default and default not in field.get('options', []): try: default_idx = int(default) if default_idx > len(options): raise ValueError default = options[default_idx] except ValueError: self.env.log.warning('Invalid default value for ' 'custom field "%s"' % field['name']) if default: self.values.setdefault(field['name'], default)
"""Check the request "If-None-Match" header against an entity tag generated from the specified last modified time in seconds (`timesecs`), optionally appending an `extra` string to indicate variants of the requested resource. That `extra` parameter can also be a list, in which case the MD5 sum of the list content will be used.
"""Check the request "If-None-Match" header against an entity tag. The entity tag is generated from the specified last modified time in seconds (`timesecs`), optionally appending an `extra` string to indicate variants of the requested resource. That `extra` parameter can also be a list, in which case the MD5 sum of the list content will be used.
def check_modified(self, timesecs, extra=''): """Check the request "If-None-Match" header against an entity tag generated from the specified last modified time in seconds (`timesecs`), optionally appending an `extra` string to indicate variants of the requested resource. That `extra` parameter can also be a list, in which case the MD5 sum of the list content will be used.
Otherwise, it adds the entity tag as as "ETag" header to the response so that consequetive requests can be cached.
Otherwise, it adds the entity tag as an "ETag" header to the response so that consecutive requests can be cached.
def check_modified(self, timesecs, extra=''): """Check the request "If-None-Match" header against an entity tag generated from the specified last modified time in seconds (`timesecs`), optionally appending an `extra` string to indicate variants of the requested resource. That `extra` parameter can also be a list, in which case the MD5 sum of the list content will be used.
'value': field['value']
'value': value
def insert_custom_fields(env, hdf, vals={}): for idx, field in util.enum(TicketSystem(env).get_custom_fields()): name = field['name'] value = vals.get('custom_' + name, field['value']) prefix = 'ticket.custom.%d' % idx hdf[prefix] = { 'name': field['name'], 'type': field['type'], 'label': field['label'] or field['name'], 'value': field['value'] } if field['type'] == 'select' or field['type'] == 'radio': for optidx, option in util.enum(field['options']): hdf['%s.option.%d' % (prefix, optidx)] = option if value and (option == value or str(optidx) == value): hdf['%s.option.%d.selected' % (prefix, optidx)] = True elif field['type'] == 'checkbox': if value in util.TRUE: hdf['%s.selected' % prefix] = True elif field['type'] == 'textarea': hdf['%s.width' % prefix] = field['width'] hdf['%s.height' % prefix] = field['height']
from optparse import OptionParser
from optparse import OptionParser, OptionValueError
def main(): from optparse import OptionParser parser = OptionParser(usage='usage: %prog [options] [projenv] ...', version='%%prog %s' % __version__) auths = {} def _auth_callback(option, opt_str, value, parser, auths, cls): info = value.split(',', 3) if len(info) != 3: usage() env_name, filename, realm = info if env_name in auths: print >>sys.stderr, 'Ignoring duplicate authentication option for ' \ 'project: %s' % env_name else: auths[env_name] = cls(filename, realm) parser.add_option('-a', '--auth', action='callback', type='string', metavar='DIGESTAUTH', callback=_auth_callback, callback_args=(auths, DigestAuth), help='[project],[htdigest_file],[realm]') parser.add_option('--basic-auth', action='callback', type='string', metavar='BASICAUTH', callback=_auth_callback, callback_args=(auths, BasicAuth), help='[project],[htpasswd_file],[realm]') parser.add_option('-p', '--port', action='store', type='int', dest='port', help='the port number to bind to') parser.add_option('-b', '--hostname', action='store', dest='hostname', help='the host name or IP address to bind to') parser.add_option('-e', '--env-parent-dir', action='store', dest='env_parent_dir', metavar='PARENTDIR', help='parent directory of the project environments') if os.name == 'posix': parser.add_option('-d', '--daemonize', action='store_true', dest='daemonize', help='run in the background as a daemon') parser.set_defaults(port=80, hostname='', daemonize=False) options, args = parser.parse_args() if not args and not options.env_parent_dir: parser.error('either the --env_parent_dir option or at least one ' 'environment must be specified') server_address = (options.hostname, options.port) httpd = TracHTTPServer(server_address, options.env_parent_dir, args, auths) try: if options.daemonize: daemonize() httpd.serve_forever() except OSError: sys.exit(1) except KeyboardInterrupt: pass
usage()
raise OptionValueError("Incorrect number of parameters for %s" % option) usage()
def _auth_callback(option, opt_str, value, parser, auths, cls): info = value.split(',', 3) if len(info) != 3: usage() env_name, filename, realm = info if env_name in auths: print >>sys.stderr, 'Ignoring duplicate authentication option for ' \ 'project: %s' % env_name else: auths[env_name] = cls(filename, realm)
if os.name == 'posix' and options.daemonize: daemon.daemonize()
if os.name == 'posix': if options.pidfile: options.pidfile = os.path.abspath(options.pidfile) if os.path.exists(options.pidfile): pidfile = open(options.pidfile) try: pid = int(pidfile.read()) finally: pidfile.close() try: os.kill(pid, 0) except OSError, e: if e.errno != errno.ESRCH: raise else: sys.exit("tracd is already running with pid %s" % pid) realserve = serve def serve(): try: pidfile = open(options.pidfile, 'w') try: pidfile.write(str(os.getpid())) finally: pidfile.close() realserve() finally: if os.path.exists(options.pidfile): os.remove(options.pidfile) if options.daemonize: daemon.daemonize()
def serve(): server_cls = __import__('flup.server.%s' % options.protocol, None, None, ['']).WSGIServer ret = server_cls(wsgi_app, bindAddress=server_address).run() sys.exit(ret and 42 or 0) # if SIGHUP exit with status 42
old = self.db_query("SELECT text FROM wiki WHERE name='%s' " "ORDER BY version DESC LIMIT 1" % title, cursor)
rows = self.db_query("SELECT text FROM wiki WHERE name='%s' " "ORDER BY version DESC LIMIT 1" % title, cursor) old = list(rows)
def _do_wiki_import(self, filename, title, cursor=None): if not os.path.isfile(filename): print "%s is not a file" % filename return f = open(filename,'r') data = util.to_utf8(f.read())
logfile = os.path.join(self.get_log_dir(), logfile)
if not os.path.isabs(logfile): logfile = os.path.join(self.get_log_dir(), logfile)
def setup_log(self): """Initialize the logging sub-system.""" from trac.log import logger_factory logtype = self.config.get('logging', 'log_type') loglevel = self.config.get('logging', 'log_level') logfile = self.config.get('logging', 'log_file') logfile = os.path.join(self.get_log_dir(), logfile) logid = self.path # Env-path provides process-unique ID self.log = logger_factory(logtype, logfile, loglevel, logid)
if milestone.has_key('date'):
if milestone.has_key('due'):
def write_utctime(name, value, params={}): write_prop(name, strftime('%Y%m%dT%H%M%SZ', value), params)
if milestone.has_key('date'): write_date('DTSTART', localtime(milestone['due']))
write_date('DTSTART', localtime(milestone['due']))
def write_utctime(name, value, params={}): write_prop(name, strftime('%Y%m%dT%H%M%SZ', value), params)
req.hdf['%s.real' % prefix] = col[0]
req.hdf['%s.real' % prefix] = col
def _render_view(self, req, db, id): """ uses a user specified sql query to extract some information from the database and presents it as a html table. """ actions = {'create': 'REPORT_CREATE', 'delete': 'REPORT_DELETE', 'modify': 'REPORT_MODIFY'} for action in [k for k,v in actions.items() if req.perm.has_permission(v)]: req.hdf['report.can_' + action] = True req.hdf['report.href'] = req.href.report(id)
while req.hdf.get('links.%s.%d.href' % (rel, idx)):
while req.hdf.get('chrome.links.%s.%d.href' % (rel, idx)):
def add_link(req, rel, href, title=None, type=None, class_name=None): link = {'href': escape(href)} if title: link['title'] = escape(title) if type: link['type'] = type if class_name: link['class'] = class_name idx = 0 while req.hdf.get('links.%s.%d.href' % (rel, idx)): idx += 1 req.hdf['links.%s.%d' % (rel, idx)] = link
req.hdf['links.%s.%d' % (rel, idx)] = link
req.hdf['chrome.links.%s.%d' % (rel, idx)] = link
def add_link(req, rel, href, title=None, type=None, class_name=None): link = {'href': escape(href)} if title: link['title'] = escape(title) if type: link['type'] = type if class_name: link['class'] = class_name idx = 0 while req.hdf.get('links.%s.%d.href' % (rel, idx)): idx += 1 req.hdf['links.%s.%d' % (rel, idx)] = link
if not logo_src[0] == '/' and not logo_src_abs:
if not logo_src.startswith('/') and not logo_src_abs:
def populate_hdf(self, req, handler): """ Add chrome-related data to the HDF. """
req.hdf['header_logo'] = {
req.hdf['chrome.logo'] = {
def populate_hdf(self, req, handler): """ Add chrome-related data to the HDF. """
req.hdf['chrome.%s.%s' % (category, name)] = text
req.hdf['chrome.nav.%s.%s' % (category, name)] = text
def populate_hdf(self, req, handler): """ Add chrome-related data to the HDF. """
req.hdf['chrome.%s.%s.active' % (category, name)] = 1
req.hdf['chrome.nav.%s.%s.active' % (category, name)] = 1
def populate_hdf(self, req, handler): """ Add chrome-related data to the HDF. """
module, id = req.hdf['HTTP.PathInfo'].split('/', 3)[1:]
module, id = 'wiki', 'WikiStart' path_info = req.path_info.split('/',2) if len(path_info) > 1: module = path_info[1] if len(path_info) > 2: id = path_info[2]
def render_macro(self, req, name, content): # args will be null if the macro is called without parenthesis. if not content: return '' # parse arguments # we expect the 1st argument to be a filename (filespec) args = content.split(',') if len(args) == 0: raise Exception("No argument.") filespec = args[0] size_re = re.compile('^[0-9]+%?$') align_re = re.compile('^(?:left|right|top|bottom)$') keyval_re = re.compile('^([-a-z0-9]+)([=:])(.*)') quoted_re = re.compile("^(?:&#34;|')(.*)(?:&#34;|')$") attr = {} style = {} for arg in args[1:]: arg = arg.strip() if size_re.search(arg): # 'width' keyword attr['width'] = arg continue if align_re.search(arg): # 'align' keyword attr['align'] = arg continue match = keyval_re.search(arg) if match: key = match.group(1) sep = match.group(2) val = match.group(3) m = quoted_re.search(val) # unquote &#34; character " if m: val = m.group(1) if sep == '=': attr[key] = val; elif sep == ':': style[key] = val
'order': 0, 'optional': False},
'order': 0},
def test_custom_field_select(self): self.env.config.set('ticket-custom', 'test', 'select') self.env.config.set('ticket-custom', 'test.label', 'Test') self.env.config.set('ticket-custom', 'test.value', '1') self.env.config.set('ticket-custom', 'test.options', 'option1|option2') fields = TicketSystem(self.env).get_custom_fields() self.assertEqual({'name': 'test', 'type': 'select', 'label': 'Test', 'value': '1', 'options': ['option1', 'option2'], 'order': 0, 'optional': False}, fields[0])
'value': '1', 'options': ['', 'option1', 'option2'],
'value': '1', 'options': ['option1', 'option2'],
def test_custom_field_optional_select(self): self.env.config.set('ticket-custom', 'test', 'select') self.env.config.set('ticket-custom', 'test.label', 'Test') self.env.config.set('ticket-custom', 'test.value', '1') self.env.config.set('ticket-custom', 'test.options', '|option1|option2') fields = TicketSystem(self.env).get_custom_fields() self.assertEqual({'name': 'test', 'type': 'select', 'label': 'Test', 'value': '1', 'options': ['', 'option1', 'option2'], 'order': 0, 'optional': True}, fields[0])
Generator that produces a (path, kind, change, base_rev, base_path)
Generator that produces a (path, kind, change, base_path, base_rev)
def get_changes(self): """ Generator that produces a (path, kind, change, base_rev, base_path) tuple for every change in the changeset, where change can be one of Changeset.ADD, Changeset.COPY, Changeset.DELETE, Changeset.EDIT or Changeset.MOVE, and kind is one of Node.FILE or Node.DIRECTORY. """ raise NotImplementedError
milestone = Milestone(self.env, req.args.get('id'), db)
milestone = Milestone(self.env, milestone_id, db)
def process_request(self, req): req.perm.assert_permission('MILESTONE_VIEW')
xlist = ['summary', 'description', 'link', 'comment']
xlist = ['summary', 'description', 'link', 'comment', 'new']
def _validate_mimebody(self, mime, ticket, newtk): """Validate the body of a ticket notification message""" (mime_decoder, mime_name, mime_charset) = mime tn = TicketNotifyEmail(self.env) tn.notify(ticket, newticket=newtk) message = notifysuite.smtpd.get_message() (headers, body) = parse_smtp_message(message) self.failIf('MIME-Version' not in headers) self.failIf('Content-Type' not in headers) self.failIf('Content-Transfer-Encoding' not in headers) self.failIf(not re.compile(r"1.\d").match(headers['MIME-Version'])) type_re = re.compile(r'^text/plain;\scharset="([\w\-\d]+)"$') charset = type_re.match(headers['Content-Type']) self.failIf(not charset) charset = charset.group(1) self.assertEqual(charset, mime_charset) self.assertEqual(headers['Content-Transfer-Encoding'], mime_name) # checks the width of each body line for line in body.splitlines(): self.failIf(len(line) > MAXBODYWIDTH) # attempts to decode the body, following the specified MIME endoding # and charset try: if mime_decoder: body = mime_decoder.decodestring(body) body = unicode(body, charset) except Exception, e: raise AssertionError, e # now processes each line of the body bodylines = body.splitlines() # body starts with one of more summary lines, first line is prefixed # with the ticket number such as #<n>: summary # finds the banner after the summary banner_delim_re = re.compile(r'^\-+\+\-+$') bodyheader = [] while ( not banner_delim_re.match(bodylines[0]) ): bodyheader.append(bodylines.pop(0)) # summary should be present self.failIf(not bodyheader) # banner should not be empty self.failIf(not bodylines) # extracts the ticket ID from the first line (tknum, bodyheader[0]) = bodyheader[0].split(' ', 1) self.assertEqual(tknum[0], '#') try: tkid = int(tknum[1:-1]) self.assertEqual(tkid, 1) except ValueError: raise AssertionError, "invalid ticket number" self.assertEqual(tknum[-1], ':') summary = ' '.join(bodyheader) self.assertEqual(summary, ticket['summary']) # now checks the banner contents self.failIf(not banner_delim_re.match(bodylines[0])) banner = True footer = None props = {} for line in bodylines[1:]: # detect end of banner if banner_delim_re.match(line): banner = False continue if banner: # parse banner and fill in a property dict properties = line.split('|') self.assertEqual(len(properties), 2) for prop in properties: if prop.strip() == '': continue (k, v) = prop.split(':') props[k.strip().lower()] = v.strip() # detect footer marker (weak detection) if not footer: if line.strip() == '--': footer = 0 continue # check footer if footer != None: footer += 1 # invalid footer detection self.failIf(footer > 3) # check ticket link if line[:11] == 'Ticket URL:': self.assertEqual(line[12:].strip(), "<%s>" % ticket['link'].strip()) # note project title / URL are not validated yet
q.append('SELECT 2 as type, summary AS title, ' ' description AS message, reporter AS author, keywords,' ' id AS data, time,0 AS ver' ' FROM ticket WHERE %s OR %s OR %s OR %s OR %s' % (self.query_to_sql(query, 'summary'),
q.append('SELECT DISTINCT 2 as type, a.summary AS title, ' ' a.description AS message, a.reporter AS author, a.keywords as keywords,' ' a.id AS data, a.time as time, 0 AS ver' ' FROM ticket a LEFT JOIN ticket_change b ON a.id = b.ticket' ' WHERE (b.field=\'comment\' AND %s ) OR' ' %s OR %s OR %s OR %s OR %s' % (self.query_to_sql(query, 'b.newvalue'), self.query_to_sql(query, 'summary'),
def perform_query (self, query, changeset, tickets, wiki, page=0): keywords = query.split(' ')
q.append('SELECT 2 as type, a.summary AS title, ' ' b.newvalue AS message, a.reporter AS author,' ' a.keywords as keywords,' ' a.id AS data, a.time AS time,0 AS ver' ' FROM ticket a, ticket_change b' ' WHERE a.id = b.ticket AND b.field=\'comment\' AND %s' % (self.query_to_sql(query, 'b.newvalue')))
def perform_query (self, query, changeset, tickets, wiki, page=0): keywords = query.split(' ')
q_str += ' ORDER BY time DESC LIMIT %d OFFSET %d' % \
q_str += ' ORDER BY 7 DESC LIMIT %d OFFSET %d' % \
def perform_query (self, query, changeset, tickets, wiki, page=0): keywords = query.split(' ')
for f in os.listdir(trac.siteconfig.__default_macro_dir__):
for f in os.listdir(trac.siteconfig.__default_macros_dir__):
def do_initenv(self, line): if self.env_check(): print "Initenv for '%s' failed.\nDoes an environment already exist?" % self.envname return arg = self.arg_tokenize(line) project_name = None repository_dir = None templates_dir = None if len(arg) == 1: returnvals = self.get_initenv_args() project_name = returnvals[0] repository_dir = returnvals[1] templates_dir = returnvals[2] elif len(arg)!= 3: print 'Wrong number of arguments to initenv %d' % len(arg) return else: project_name = arg[0] repository_dir = arg[1] templates_dir = arg[2]
src = os.path.join(trac.siteconfig.__default_macro_dir__, f)
src = os.path.join(trac.siteconfig.__default_macros_dir__, f)
def do_initenv(self, line): if self.env_check(): print "Initenv for '%s' failed.\nDoes an environment already exist?" % self.envname return arg = self.arg_tokenize(line) project_name = None repository_dir = None templates_dir = None if len(arg) == 1: returnvals = self.get_initenv_args() project_name = returnvals[0] repository_dir = returnvals[1] templates_dir = returnvals[2] elif len(arg)!= 3: print 'Wrong number of arguments to initenv %d' % len(arg) return else: project_name = arg[0] repository_dir = arg[1] templates_dir = arg[2]
default_idx = int(default) if default_idx > len(options): raise ValueError default = options[default_idx] except ValueError: self.env.log.warning('Invalid default value for ' 'custom field "%s"' % field['name'])
default = options[int(default)] except (ValueError, IndexError): self.env.log.warning('Invalid default value "%s" ' 'for custom field "%s"' % (default, field['name']))
def _init_defaults(self, db=None): for field in self.fields: default = None if not field.get('custom'): default = self.env.config.get('ticket', 'default_' + field['name']) else: default = field.get('value') options = field.get('options') if default and options and default not in options: try: default_idx = int(default) if default_idx > len(options): raise ValueError default = options[default_idx] except ValueError: self.env.log.warning('Invalid default value for ' 'custom field "%s"' % field['name']) if default: self.values.setdefault(field['name'], default)
break
try: cnx.cursor() break except Exception: cnx.close()
def get_cnx(self, timeout=None): start = time.time() self._available.acquire() try: tid = threading._get_ident() if tid in self._active: self._active[tid][0] += 1 return PooledConnection(self, self._active[tid][1]) while True: if self._dormant: cnx = self._dormant.pop() break elif self._maxsize and self._cursize < self._maxsize: cnx = self._connector.get_connection(**self._kwargs) self._cursize += 1 break else: if timeout: self._available.wait(timeout) if (time.time() - start) >= timeout: raise TimeoutError, 'Unable to get database ' \ 'connection within %d seconds' \ % timeout else: self._available.wait() self._active[tid] = [1, cnx] return PooledConnection(self, cnx) finally: self._available.release()
for line in difflines: if line.startswith('--- '): words = line.split(None, 2) filename, fromrev = words[1], 'old' groups, blocks = None, None
lines = iter(difflines) for line in lines: if not line.startswith('--- '):
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
if line.startswith('+++ '): words = line.split(None, 2) if len(words[1]) < len(filename): filename = words[1] groups = [] output.append({'filename' : filename, 'oldrev' : fromrev, 'newrev' : 'new', 'diff' : groups}) continue if line.startswith('Index: ') or line.startswith('======') or line == '': continue if groups == None:
words = line.split(None, 2) filename, fromrev = words[1], 'old' groups, blocks = None, None line = lines.next() if not line.startswith('+++ '):
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
if line.startswith('@@ '): r = re.match(r'@@ -(\d+),\d+ \+(\d+),\d+ @@', line)
words = line.split(None, 2) if len(words[1]) < len(filename): filename = words[1] groups = [] output.append({'filename' : filename, 'oldrev' : fromrev, 'newrev' : 'new', 'diff' : groups}) for line in lines: r = re.match(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@', line)
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
return None
break
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
fromline,toline = map(int, r.groups())
fromline,fromend,toline,toend = map(int, r.groups())
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
continue if blocks == None: return None
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
command, line = line[0], line[1:]
fromend += fromline toend += toline
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
if (command == ' ') != last_type: last_type = command == ' ' blocks.append({'type': last_type and 'unmod' or 'mod', 'base.offset': fromline - 1, 'base.lines': [], 'changed.offset': toline - 1, 'changed.lines': []}) if command == ' ': blocks[-1]['changed.lines'].append(line) blocks[-1]['base.lines'].append(line) fromline += 1 toline += 1 elif command == '+': blocks[-1]['changed.lines'].append(line) toline += 1 elif command == '-': blocks[-1]['base.lines'].append(line) fromline += 1 else: return None
while fromline < fromend or toline < toend: line = lines.next() command, line = line[0], line[1:] if (command == ' ') != last_type: last_type = command == ' ' blocks.append({'type': last_type and 'unmod' or 'mod', 'base.offset': fromline - 1, 'base.lines': [], 'changed.offset': toline - 1, 'changed.lines': []}) if command == ' ': blocks[-1]['changed.lines'].append(line) blocks[-1]['base.lines'].append(line) fromline += 1 toline += 1 elif command == '+': blocks[-1]['changed.lines'].append(line) toline += 1 elif command == '-': blocks[-1]['base.lines'].append(line) fromline += 1 else: return None
def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;'
pass
version = None
def _insert_ticket_data(self, req, db, ticket, data, reporter_id): """Insert ticket data into the hdf""" replyto = req.args.get('replyto') version = req.args.get('version', None) data['replyto'] = replyto if version: try: version = int(version) data['version'] = version except ValueError: pass
if version is not None and cnum > version: for k, v in change['fields'].iteritems(): if k not in values: values[k] = v['old'] continue changes.append(change)
def quote_original(author, original, link): if 'comment' not in req.args: # i.e. the comment was not yet edited data['comment'] = '\n'.join( ['Replying to [%s %s]:' % (link, author)] + ['> %s' % line for line in original.splitlines()] + [''])
if 'replyto' in change: replies.setdefault(change['replyto'], []).append(cnum) comment = '' if replyto == str(cnum): quote_original(change['author'], comment, 'comment:%s' % replyto) if version:
if version is not None and cnum > version:
def quote_original(author, original, link): if 'comment' not in req.args: # i.e. the comment was not yet edited data['comment'] = '\n'.join( ['Replying to [%s %s]:' % (link, author)] + ['> %s' % line for line in original.splitlines()] + [''])
values[k] = v['new'] if 'description' in change['fields']: data['description_change'] = change
if k not in values: values[k] = v['old'] skip = True else: if 'replyto' in change: replies.setdefault(change['replyto'], []).append(cnum) comment = '' if replyto == str(cnum): quote_original(change['author'], comment, 'comment:%s' % replyto) if version: for k, v in change['fields'].iteritems(): values[k] = v['new'] if 'description' in change['fields']: data['description_change'] = change if not skip: changes.append(change)
def quote_original(author, original, link): if 'comment' not in req.args: # i.e. the comment was not yet edited data['comment'] = '\n'.join( ['Replying to [%s %s]:' % (link, author)] + ['> %s' % line for line in original.splitlines()] + [''])
if not row: raise TracError('Report %d does not exist.' % id,
try: if not row: raise TracError('Report %d does not exist.' % id,
def get_info(self, id, args): cursor = self.db.cursor()
title = row[0] try:
title = row[0]
def get_info(self, id, args): cursor = self.db.cursor()
if path.startswith(self.scope):
if rev > 0 and path.startswith(self.scope):
def get_history(self): history = _get_history(self.scope + self.path, self.authz, self.fs_ptr, self.pool, self.rev) for path, rev in history: if path.startswith(self.scope): yield path[len(self.scope):], rev
title = Markup('Ticket <em title="%s"> summary, id, type, verb, author)
if format == 'rss': title = 'Ticket (id, type.lower(), verb, summary) else: title = Markup('Ticket <em title="%s"> summary, id, type, verb, author)
def produce((id, t, author, type, summary), status, fields, comment, cid): if status == 'edit': if 'ticket_details' in filters: info = '' if len(fields) > 0: info = ', '.join(['<i>%s</i>' % f for f in \ fields.keys()]) + ' changed<br />' else: return None elif 'ticket' in filters: if status == 'closed' and fields.has_key('resolution'): info = fields['resolution'] if info and comment: info = '%s: ' % info else: info = '' else: return None kind, verb = status_map[status] title = Markup('Ticket <em title="%s">#%s</em> (%s) %s by %s', summary, id, type, verb, author) href = format == 'rss' and req.abs_href.ticket(id) or \ req.href.ticket(id) if cid: href += '#comment:' + cid if status == 'new': message = summary else: message = Markup(info) if comment: if format == 'rss': message += wiki_to_html(comment, self.env, req, db, absurls=True) else: message += wiki_to_oneliner(comment, self.env, db, shorten=True) return kind, href, title, t, author, message
defaults = {} for section, options in self.env.get_default_config().iteritems(): defaults[section] = default_options = {} for opt in options: default_options[opt.name] = opt.default
def _render_config(self, req): req.perm.assert_permission('CONFIG_VIEW') req.hdf['about.page'] = 'config' # Gather default values defaults = {} for section, options in self.env.get_default_config().iteritems(): defaults[section] = default_options = {} for opt in options: default_options[opt.name] = opt.default # Export the config table to hdf sections = [] for section in self.config.sections(): options = [] default_options = defaults.get(section) for name,value in self.config.options(section): default = default_options and default_options.get(name) or '' options.append({'name': name, 'value': value, 'valueclass': (value == default and \ 'defaultvalue' or 'value')}) options.sort(lambda x,y: cmp(x['name'], y['name'])) sections.append({'name': section, 'options': options}) sections.sort(lambda x,y: cmp(x['name'], y['name'])) req.hdf['about.config'] = sections # TODO: # We should probably export more info here like: # permissions, components...
default_options = defaults.get(section)
default_options = self.config.getdefaults().get(section)
def _render_config(self, req): req.perm.assert_permission('CONFIG_VIEW') req.hdf['about.page'] = 'config' # Gather default values defaults = {} for section, options in self.env.get_default_config().iteritems(): defaults[section] = default_options = {} for opt in options: default_options[opt.name] = opt.default # Export the config table to hdf sections = [] for section in self.config.sections(): options = [] default_options = defaults.get(section) for name,value in self.config.options(section): default = default_options and default_options.get(name) or '' options.append({'name': name, 'value': value, 'valueclass': (value == default and \ 'defaultvalue' or 'value')}) options.sort(lambda x,y: cmp(x['name'], y['name'])) sections.append({'name': section, 'options': options}) sections.sort(lambda x,y: cmp(x['name'], y['name'])) req.hdf['about.config'] = sections # TODO: # We should probably export more info here like: # permissions, components...
options.append({'name': name, 'value': value, 'valueclass': (value == default and \ 'defaultvalue' or 'value')})
options.append({ 'name': name, 'value': value, 'valueclass': (unicode(value) == unicode(default) and 'defaultvalue' or 'value')})
def _render_config(self, req): req.perm.assert_permission('CONFIG_VIEW') req.hdf['about.page'] = 'config' # Gather default values defaults = {} for section, options in self.env.get_default_config().iteritems(): defaults[section] = default_options = {} for opt in options: default_options[opt.name] = opt.default # Export the config table to hdf sections = [] for section in self.config.sections(): options = [] default_options = defaults.get(section) for name,value in self.config.options(section): default = default_options and default_options.get(name) or '' options.append({'name': name, 'value': value, 'valueclass': (value == default and \ 'defaultvalue' or 'value')}) options.sort(lambda x,y: cmp(x['name'], y['name'])) sections.append({'name': section, 'options': options}) sections.sort(lambda x,y: cmp(x['name'], y['name'])) req.hdf['about.config'] = sections # TODO: # We should probably export more info here like: # permissions, components...
self._execute('severity change critical end-of-the-world')
self._execute('severity change critical "end-of-the-world"')
def test_severity_change_ok(self): """ Tests the 'severity add' command in trac-admin. This particular test passes valid arguments and checks for success. """ test_name = sys._getframe().f_code.co_name self._execute('severity add critical') self._execute('severity change critical end-of-the-world') test_results = self._execute('severity list') self.assertEquals(self.expected_results[test_name], test_results)
return self.default
return self.default(component.env)
def implementation(self, component): cfgvalue = component.config.get(self.cfg_section, self.cfg_property) for impl in self.xtnpt.extensions(component): if impl.__class__.__name__ == cfgvalue: return impl if self.default is not None: return self.default raise AttributeError('Cannot find an implementation of the "%s" ' 'interface named "%s". Please update your ' 'trac.ini setting "%s.%s"' % (self.xtnpt.interface.__name__, cfgvalue, self.cfg_section, self.cfg_property))
time = int(t or time.time()) self.date = datetime.fromtimestamp(time, utc)
timestamp = int(t or time.time()) self.date = datetime.fromtimestamp(timestamp, utc)
def insert(self, filename, fileobj, size, t=None, db=None): # FIXME: `t` should probably be switched to `datetime` too if not db: db = self.env.get_db_cnx() handle_ta = True else: handle_ta = False
self.size, time, self.description, self.author, self.ipnr))
self.size, timestamp, self.description, self.author, self.ipnr))
def insert(self, filename, fileobj, size, t=None, db=None): # FIXME: `t` should probably be switched to `datetime` too if not db: db = self.env.get_db_cnx() handle_ta = True else: handle_ta = False
def get_ticket (self, id):
def get_ticket (self, id, escape_values=1):
def get_ticket (self, id): global fields cnx = db.get_connection () cursor = cnx.cursor ()
info[fields[i]] = escape(row[i])
info[fields[i]] = row[i]
def get_ticket (self, id): global fields cnx = db.get_connection () cursor = cnx.cursor ()
hdf.setValue('ticket.changes.%d.author' % idx, author) hdf.setValue('ticket.changes.%d.field' % idx, field)
hdf.setValue('ticket.changes.%d.time' % idx, str(date)) hdf.setValue('ticket.changes.%d.author' % idx, author) hdf.setValue('ticket.changes.%d.field' % idx, field)
def insert_ticket_data(self, hdf, id): """Inserts ticket data into the hdf""" cnx = db.get_connection () cursor = cnx.cursor() cursor.execute('SELECT time, author, field, oldvalue, newvalue ' 'FROM ticket_change ' 'WHERE ticket=%s ORDER BY time', id) curr_author = None curr_date = 0 comment = None idx = 0
old = self.get_ticket(id)
old = self.get_ticket(id, 0)
def render (self): action = dict_get_with_default(self.args, 'action', 'view') if action == 'create': self.create_ticket () try: id = int(self.args['id']) except: redirect (href.menu())
class BasicAuthentication(HTTPAuthentication):
class PasswordFileAuthentication(HTTPAuthentication): def __init__(self, filename): self.filename = filename self.mtime = os.stat(filename).st_mtime self.load(self.filename) self._lock = threading.Lock() def check_reload(self): self._lock.acquire() try: mtime = os.stat(self.filename).st_mtime if mtime > self.mtime: self.mtime = mtime self.load(self.filename) finally: self._lock.release() class BasicAuthentication(PasswordFileAuthentication):
def do_auth(self, environ, start_response): raise NotImplementedError
self.hash = {}
def __init__(self, htpasswd, realm): self.hash = {} self.realm = realm try: import crypt self.crypt = crypt.crypt except ImportError: self.crypt = None self.load(htpasswd)