rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else:
for f in self.replace[1].split(): if '=' in self.replace[1]: k,v = f.split('=') newattrs[k] = v else: newattrs[self.replace[1]] = None elif part==ATTRVAL:
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", `tag`, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", `self.replace[1]`) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) if part==COMPLETE: return [DATA, ""] newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: # replace complete attr if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches, and we can use them # the name 'replace' replaces the value, # all other names are given as format strings dict = mo.groupdict() if dict.has_key('replace'): newattrs[attr] = dict['replace'] else: newattrs[attr] = self.replace[1] % dict continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
if self.replace[0]==ATTR: val = self.replace[0][0]+'="'+self.replace[0][1]+'"' else: val = self.replace[1] s += '>'+xmlify(val)+"</replace>\n"
s += '>'+xmlify(self.replace[1])+"</replace>\n"
def toxml (self): s = UrlRule.toxml(self) if self.tag!='a': s += '\n tag="%s"' % self.tag if not (self.attrs or self.replace or self.enclosed): return s+"/>\n" s += ">\n" for key,val in self.attrs.items(): s += "<attr" if key!='href': s += ' name="%s"' % key if val: s += ">"+xmlify(val)+"</attr>\n" else: s += "/>\n" if self.enclosed: s += "<enclosed>"+xmlify(self.enclosed)+"</enclosed>\n" if not self.replace[0]==COMPLETE or self.replace[1]: s += "<replace" if self.replace[0]!=COMPLETE: s += ' part="%s"' % num_part(self.replace[0]) if self.replace[1]: if self.replace[0]==ATTR: val = self.replace[0][0]+'="'+self.replace[0][1]+'"' else: val = self.replace[1] s += '>'+xmlify(val)+"</replace>\n" else: s += "/>\n" return s + "</rewrite>"
print "XXX", currule
def _form_selrule (index): """ Select a rule. """ try: index = int(index) global currule currule = [r for r in curfolder.rules if r.oid == index][0] print "XXX", currule # fill ruletype flags for rt in rulenames: ruletype[rt] = (currule.name == rt) # XXX this side effect is bad :( # fill part flags if currule.name == u"htmlrewrite": global curparts curparts = {} for i, part in enumerate(partvalnames): curparts[part] = (currule.part == i) elif currule.name == u"xmlrewrite": global curreplacetypes curreplacetypes = {} for name, num in replacetypenums.items(): curreplacetypes[name] = (currule.replacetypenum == num) elif currule.name == u"header": global curfilterstage, curheaderaction curfilterstage = { u'both': currule.filterstage == u'both', u'request': currule.filterstage == u'request', u'response': currule.filterstage == u'response', } curheaderaction = { u'add': currule.action == u'add', u'replace': currule.action == u'replace', u'remove': currule.action == u'remove', } except (ValueError, IndexError, OverflowError): error['ruleindex'] = True
self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request)
request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, request)
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(self.request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
self.request = wc.filter.applyfilter(self.request, "finish", attrs)
self.request = wc.filter.applyfilter(request, "finish", attrs)
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(self.request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
data = wc.filter.applyfilter("", "finish", attrs)
data = wc.filter.applyfilter(data, "finish", attrs)
def process_content (self): """read and filter client request content""" data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # Just pass everything through to the server # NOTE: It's possible to have 'chunked' encoding here, # and then the current system of counting bytes remaining # won't work; we have to deal with chunks self.bytes_remaining -= len(data) is_closed = False for decoder in self.decoders: data = decoder.decode(data) if not is_closed: is_closed = decoder.closed for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter(data, "filter", attrs) self.content += data underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: wc.log.warn(wc.LOG_PROXY, "client received %d bytes more than content-length", -self.bytes_remaining) if is_closed or self.bytes_remaining <= 0: for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter("", "finish", attrs) self.content += data if self.content and not self.headers.has_key('Content-Length'): self.headers['Content-Length'] = "%d\r" % len(self.content) # We're done reading content self.state = 'receive' is_local = self.hostname in \ wc.proxy.dns_lookups.resolver.localhosts and \ self.port in (wc.configuration.config['port'], wc.configuration.config['sslport']) if is_local: is_public_doc = self.allow.public_document(self.document) if wc.configuration.config['adminuser'] and \ not wc.configuration.config['adminpass']: if is_local and is_public_doc: self.handle_local(is_public_doc=is_public_doc) else: # ignore request, must init admin password self.headers['Location'] = \ "http://%s:%d/adminpass.html\r" % \ (self.socket.getsockname()[0], wc.configuration.config['port']) self.error(302, _("Moved Temporarily")) elif is_local: # this is a direct proxy call self.handle_local(is_public_doc=is_public_doc) else: self.server_request()
return (2L<<n-1)-1
return (1L << (32 - n)) - 1
def suffix2mask (n): "return a mask of n bits as a long integer" return (2L<<n-1)-1
return int(math.log(mask+1, 2))
return 32 - int(math.log(mask+1, 2))
def mask2suffix (mask): """return suff for given bit mask""" return int(math.log(mask+1, 2))
hosts = ["192.168.1.1/16"] hostmap = hosts2map(hosts) print hostmap print map2hosts(hostmap)
hosts, nets = hosts2map([ "192.168.2.1", "192.168.2.1/32", "192.168.2.1/31", "192.168.2.1/30", "192.168.2.1/29", "192.168.2.1/28", "192.168.2.1/27", "192.168.2.1/26", "192.168.2.1/25", "192.168.2.1/24", "192.168.2.1/23", "192.168.2.1/22", "192.168.2.1/21", "192.168.2.1/20", "192.168.2.1/19", "192.168.2.1/18", "192.168.2.1/17", "192.168.2.1/16", "192.168.2.1/15", "192.168.2.1/14", "192.168.2.1/13", "192.168.2.1/12", "192.168.2.1/11", "192.168.2.1/10", "192.168.2.1/9", "192.168.2.1/8", "192.168.2.1/7", "192.168.2.1/6", "192.168.2.1/5", "192.168.2.1/4", "192.168.2.1/3", "192.168.2.1/2", "192.168.2.1/1", "127.0.0.1/8" ]) for host in hosts: print "host: %s" % (host) for net, mask in nets: print "net: %s %s => %s/%s" % (net, mask, num2dq(net), mask2suffix(mask)) maps = map2hosts([hosts, nets]) for map in maps: print "map: %s" % (map)
def _test (): hosts = ["192.168.1.1/16"] hostmap = hosts2map(hosts) print hostmap print map2hosts(hostmap)
if buf.len >= self.minimal_size_bytes:
if buf.tell() >= self.minimal_size_bytes:
def filter (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) if buf.len >= self.minimal_size_bytes: return self.recognize(buf, attrs) return ''
if not self.allow.scheme(self.scheme): wc.log.warn(wc.LOG_PROXY, "%s forbidden scheme %r encountered", self, self.scheme)
if not self.allow.is_allowed(self.method, self.scheme, self.port): wc.log.warn(wc.LOG_PROXY, "Unallowed request %s", self.url)
def fix_request (self): # refresh with filtered request data self.method, self.url, self.protocol = self.request.split() # enforce a maximum url length if len(self.url) > 2048: wc.log.error(wc.LOG_PROXY, "%s request url length %d chars is too long", self, len(self.url)) self.error(400, _("URL too long"), txt=_('URL length limit is %d bytes.') % 2048) return False if len(self.url) > 255: wc.log.warn(wc.LOG_PROXY, "%s request url length %d chars is very long", self, len(self.url)) # and unquote again self.url = wc.url.url_norm(self.url)[0] self.scheme, self.hostname, self.port, self.document = \ wc.url.url_split(self.url) # fix missing trailing / if not self.document: self.document = '/' # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not self.scheme: self.scheme = "https" if not self.allow.scheme(self.scheme): wc.log.warn(wc.LOG_PROXY, "%s forbidden scheme %r encountered", self, self.scheme) self.error(403, _("Forbidden")) return False # request is ok return True
wc.log.error(wc.LOG_PROXY, "%s got %s status %d %r",
wc.log.debug(wc.LOG_PROXY, "%s got %s status %d %r",
def server_response (self, server, response, status, headers): """ Follow redirects, and finish on errors. For HTTP status 2xx continue. """ self.server = server assert self.server.connected wc.log.debug(wc.LOG_PROXY, '%s server_response %r', self, response) version, status, msg = \ wc.http.parse_http_response(response, self.args[0]) # XXX check version wc.log.debug(wc.LOG_PROXY, '%s response %s %d %s', self, version, status, msg) if status in (302, 301): self.isredirect = True elif not (200 <= status < 300): wc.log.error(wc.LOG_PROXY, "%s got %s status %d %r", self, version, status, msg) self.finish() if headers.has_key('Transfer-Encoding'): # XXX don't look at value, assume chunked encoding for now wc.log.debug(wc.LOG_PROXY, '%s Transfer-encoding %r', self, headers['Transfer-encoding']) unchunker = wc.proxy.decoder.UnchunkStream.UnchunkStream(self) self.decoders.append(unchunker)
if line.startswith("install_") or line.startswith("config_"):
if line.startswith("install_") or \ line.startswith("config_") or \ line.startswith("template_"):
def fix_configdata (): """fix install and config paths in the config file""" name = "_webcleaner2_configdata.py" conffile = os.path.join(sys.prefix, "Lib", "site-packages", name) lines = [] for line in file(conffile): if line.startswith("install_") or line.startswith("config_"): lines.append(fix_install_path(line)) else: lines.append(line) f = file(conffile, "w") f.write("".join(lines)) f.close()
curfolder.oid = len(config['folderrules'])
if not config['folderrules']: curfolder.oid = 0 else: curfolder.oid = config['folderrules'][-1].oid+1
def _form_newfolder (foldername): if not foldername: error['newfolder'] = True return fd, filename = tempfile.mkstemp(".zap", "local_", ConfigDir, text=True) # select the new folder global curfolder curfolder = _FolderRule(title=foldername, desc="", disable=0, filename=filename) _register_rule(curfolder) _generate_sids(prefix="lc") curfolder.oid = len(config['folderrules']) curfolder.write() config['folderrules'].append(curfolder) _recalc_up_down(config['folderrules']) info['newfolder'] = True
curfolder.rules.remove(rule)
rules = curfolder.rules rules.remove(rule) for i in range(rule.oid, len(rules)): rules[i].oid = i curfolder.write()
def _form_removerule (rule): # XXX error handling curfolder.rules.remove(rule) global currule currule = None curfolder.write() info['removerule'] = True
curfolder.write()
def _form_removerule (rule): # XXX error handling curfolder.rules.remove(rule) global currule currule = None curfolder.write() info['removerule'] = True
def create_tcp_socket (self, sockinfo):
def create_tcp_socket (self):
def create_tcp_socket (self, sockinfo): """create tcp socket, connect to it and return socket object""" host = self.get('TCPAddr', 'localhost') port = int(self['TCPSocket']) sockinfo = get_sockinfo(host, port=port) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(sockinfo[0][4]) except socket.error: sock.close() raise return sock
"charset": wc.ConfigCharset,
"charset": wc.configuration.ConfigCharset,
def write_filters (res, filename): if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = { "charset": wc.ConfigCharset, "title_en": wc.XmlUtils.xmlquote("AdZapper filters"), "title_de": wc.XmlUtils.xmlquote("AdZapper Filter"), "desc_en": wc.XmlUtils.xmlquote("Automatically generated by adzap2wc.py from %s on %s"%(ADZAPPER_URL, date)), "desc_de": wc.XmlUtils.xmlquote("Automatisch erzeugt von adzap2wc.py aus %s am %s"%(ADZAPPER_URL, date)), } zapfile.write("""<?xml version="1.0" encoding="%(charset)s"?>
p.feed("""<!------>""")
s = """< a>""" for c in s: p.feed(c)
def _broken (): p = HtmlPrinter() p.feed("""<!------>""") p.flush()
data_url = "& "C:\\foo.mht!${PATH}/"+ "EXPLOIT.CHM::"+
data_url = "& "C:\\foo.mht!${PATH}/"+ \ "EXPLOIT.CHM::"+ \
def testITSVuln (self): """Microsoft Internet Explorer ITS Protocol Zone Bypass Vulnerability""" # To avoid virus alarms we obfuscate the exploit URL. This # code is harmless. data_url = "&#109;s-its:mhtml:file://"+ "C:\\foo.mht!${PATH}/"+ "EXPLOIT.CHM::"+ "/exploit.htm" self.filt("""<object data="%s">""" % data_url, """<object data="ms-its:mhtml:file:/C:/foo.mht">""")
for ro in self.mimes: if ro.match(mime): self.mime_cache[mime] = True else: self.mime_cache[mime] = False
self.mime_cache[mime] = \ [ro for ro in self.mimes if ro.match(mime)]
def applies_to_mime (self, mime): """ Ask if this filter applies to a mime type. """ if mime not in self.mime_cache: if not self.mimes: self.mime_cache[mime] = True elif mime is None: self.mime_cache[mime] = False else: for ro in self.mimes: if ro.match(mime): self.mime_cache[mime] = True else: self.mime_cache[mime] = False return self.mime_cache[mime]
if not data: return debug(HURT_ME_PLENTY, 'Proxy: read', len(data), '<=', self)
def handle_read (self): if not self.connected: # It's been closed (presumably recently) return
raise wc.filter.FilterProxyError((406, _("Not acceptable"),
raise wc.filter.FilterProxyError(406, _("Not acceptable"),
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError((406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES))
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError((406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
print "XXX new rule", rule
def _form_newrule (rtype, lang): if rtype not in rulenames: error['newrule'] = True return # add new rule rule = _GetRuleFromName(rtype) rule.parent = curfolder rule.titles[lang] = _("No title") # compile data and register rule.compile_data() if config['development']: prefix = u"wc" else: prefix = u"lc" _generate_sids(prefix) curfolder.append_rule(rule) _recalc_up_down(curfolder.rules) curfolder.write() _reinit_filters() # select new rule _form_selrule(rule.oid) info['newrule'] = True print "XXX new rule", rule
"unsupported content encoding in %r", encoding)
"unsupported content encoding in %r", cenc)
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() if server.headers.has_key('Transfer-Encoding'): to_remove.add('Transfer-Encoding') tencs = server.headers['Transfer-Encoding'].lower() for tenc in tencs.split(","): tenc = tenc.strip() if ";" in tenc: tenc = tenc.split(";", 1)[0] if not tenc or tenc == 'identity': continue if tenc == 'chunked': server.decoders.append(UnchunkStream.UnchunkStream(server)) elif tenc in ('x-gzip', 'gzip'): server.decoders.append(GunzipStream.GunzipStream(server)) elif tenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream(server)) else: wc.log.warn(wc.LOG_PROXY, "unsupported transfer encoding in %r", tencs) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'Transfer-Encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None if rewrite: to_remove.add('Content-Length') remove_headers(server.headers, to_remove) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' if not rewrite: # only decompress on rewrite return bytes_remaining to_remove = sets.Set() #if server.protocol == "HTTP/1.1": # # To make pipelining possible, enable chunked encoding. # server.headers['Transfer-Encoding'] = "chunked\r" # server.encoders.append(ChunkStream.ChunkStream(server)) # Compressed content (uncompress only for rewriting modules) if server.headers.has_key('Content-Encoding'): to_remove.add('Content-Encoding') cencs = server.headers['Content-Encoding'].lower() for cenc in cencs.split(","): cenc = cenc.strip() if ";" in cenc: cenc = cenc.split(";", 1)[0] if not cenc or cenc == 'identity': continue if filename is not None and \ (filename.endswith(".gz") or filename.endswith(".tgz")): continue # note: do not gunzip .gz files if cenc in ('gzip', 'x-gzip'): server.decoders.append(GunzipStream.GunzipStream()) elif cenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: wc.log.warn(wc.LOG_PROXY, "unsupported content encoding in %r", encoding) # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.add('Cache-Control') # add warning server.headers['Warning'] = "214 Transformation applied\r" remove_headers(server.headers, to_remove) return bytes_remaining
if data and self.statuscode != 407:
if data and self.statuscode != 407 and hasattr(self.client, "server_content"):
def flush (self): """ Flush data of decoders (if any) and filters and write it to the client. return True if flush was successful. """ assert None == wc.log.debug(wc.LOG_PROXY, "%s HttpServer.flush", self) if not self.statuscode and self.method != 'CONNECT': wc.log.warn(wc.LOG_PROXY, "%s flush without status", self) return True data = self.flush_coders(self.decoders) try: for stage in FilterStages: data = wc.filter.applyfilter(stage, data, "finish", self.attrs) except wc.filter.FilterWait, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterWait %s", self, msg) # the filter still needs some data # to save CPU time make connection unreadable for a while self.set_unreadable(1.0) return False except wc.filter.FilterRating, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterRating from content %s", self, msg) self._show_rating_deny(str(msg)) return True data = self.flush_coders(self.encoders, data=data) # the client might already have closed if not self.client: return if self.defer_data: self.defer_data = False self.client.server_response(self, self.response, self.statuscode, self.headers) if not self.client: return if data and self.statuscode != 407: self.client.server_content(data) return True
return p.flush()
return p.getoutput()
def filter (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] p.feed(data) return p.flush()
return p.flush(finish=True)
p.flush() p.tagbuf2data() return p.getoutput()
def finish (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] # note: feed even if data is empty p.feed(data) return p.flush(finish=True)
return unicode(htmlentitydefs.entitydefs.get(ent, s))
entdef = htmlentitydefs.entitydefs.get(ent) if entdef is None: return s return entdef.decode("iso8859-1")
def _resolve_html_entity (mo): """resolve html entity, helper function for resolve_html_entities""" ent = mo.group("entity") s = mo.group() return unicode(htmlentitydefs.entitydefs.get(ent, s))
body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n'
self.server.log.write("server got request path %r\n"%self.path) if not jsfiles.has_key(self.path): data = "HTTP/1.1 404 Oops\r\n" body = "" else: data = 'HTTP/1.1 200 OK\r\n' body = jsfiles[self.path]
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
data += "Transfer-Encoding: chunked\r\n"
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n"
data += body
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
class ChunkRequest (HttpRequest): def check_response (self, response): """check for 200 status and correct body data length""" if response.status!=200: return (self.VIOLATION, "Invalid HTTP status %r"%response.status) body = response.read() if len(body) != ChunkRequestHandler.body_length: return (self.VIOLATION, "Expected %d bytes in the body, but got %d bytes instead:\n%r" %\ (ChunkRequestHandler.body_length, len(body), body)) return (self.SUCCESS, "Ok")
class TestScriptSrc (StandardTest): """All these tests work with a _default_ filter configuration. If you change any of the *.zap filter configs, tests can fail..."""
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
def name (self): return 'chunked-leading-zeros'
def init (self): wc.config = wc.Configuration() disable_rating_rules(wc.config) wc.config['filters'] = ['Rewriter',] wc.config.init_filter_modules() initlog(os.path.join("test", "logging.conf")) self.headers = WcMessage() self.headers['Content-Type'] = "text/html" self.log = sys.stdout self.serverthread = HttpServer.startServer(self.log, handler_class=JSRequestHandler) def shutdown (self): """Stop server, close log""" HttpServer.stopServer(self.log) def filt (self, data, result, name=""): attrs = get_filterattrs(name, [FILTER_RESPONSE_MODIFY], headers=self.headers) filtered = "" try: filtered += applyfilter(FILTER_RESPONSE_MODIFY, data, 'filter', attrs) except FilterException, msg: pass i = 1 while 1: try: filtered += applyfilter(FILTER_RESPONSE_MODIFY, "", 'finish', attrs) break except FilterException, msg: proxy_poll(timeout=max(0, run_timers())) i+=1 if i==100: raise FilterException("Slow") self.assertEqual(filtered, result)
def name (self): return 'chunked-leading-zeros'
class TestChunkedEncoding (ProxyTest): def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
def testScriptSrc1 (self): self.filt( """<script src="http://localhost:%d/1.js"></script> </html>""" % HttpServer.defaultconfig['port'], """<script type="text/javascript"> <!-- %s//--> </script> </html>""" % jsfiles['/1.js']) def testScriptSrc2 (self): self.filt( """<script src="http://localhost:%d/1.js"> </script> </html>""" % HttpServer.defaultconfig['port'], """<script type="text/javascript"> <!-- %s//--> </script> </html>""" % jsfiles['/1.js']) def testScriptSrc3 (self): """missing </script>""" self.filt( """<script src="http://localhost:%d/3.js"/> <script type="JavaScript"> <!-- a = 1 //--> </script> </html>""" % HttpServer.defaultconfig['port'], """ <script type="JavaScript"> <!-- a = 1 //--> </script> </html>""") def testRecursionSrc (self): self.filt( """<script language="JavaScript"> <!-- document.write('<SCR'+'IPT LANGUAGE="JavaScript1.1" ' ); document.write('SRC="http://localhost:%d/2.js">'); document.write('</SCR'+'IPT>'); //--> </script> </td> </tr> </table>""" % HttpServer.defaultconfig['port'], """ </td> </tr> </table>""")
def name (self): return 'chunked-leading-zeros'
unittest.main(defaultTest='TestChunkedEncoding')
unittest.main(defaultTest='TestScriptSrc')
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
suite = unittest.makeSuite(TestChunkedEncoding, 'test')
suite = unittest.makeSuite(TestScriptSrc, 'test')
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
return self.recognize(buf)
return self.recognize(buf, attrs)
def finish (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) return self.recognize(buf)
if mime != attrs['mime']:
if not attrs['mime'].startswith(mime):
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if mime != attrs['mime']: wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() del attrs['mimerecognizer_buf'] return data
del attrs['mimerecognizer_buf']
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if mime != attrs['mime']: wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() del attrs['mimerecognizer_buf'] return data
lib_dir = distutils.get_python_lib(plat_specific=1)
lib_dir = distutils.sysconfig.get_python_lib(plat_specific=1)
def install_shortcuts (): """create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string) """ try: prg = get_special_folder_path("CSIDL_COMMON_PROGRAMS") except OSError: try: prg = get_special_folder_path("CSIDL_PROGRAMS") except OSError, reason: # give up - cannot install shortcuts print "cannot install shortcuts: %s" % reason sys.exit() lib_dir = distutils.get_python_lib(plat_specific=1) dest_dir = os.path.join(prg, "WebCleaner") try: os.mkdir(dest_dir) directory_created(dest_dir) except OSError: pass target = os.path.join(sys.prefix, "RemoveWebCleaner.exe") path = os.path.join(dest_dir, "Uninstall WebCleaner.lnk") arguments = "-u " + os.path.join(sys.prefix, "WebCleaner-wininst.log") create_shortcut(target, "Uninstall WebCleaner", path, arguments) file_created(path)
script = os.path.join(script_dir, "webcleaner-certificates")
script = os.path.join(wc.ScriptDir, "webcleaner-certificates")
def install_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(script_dir, "webcleaner-certificates") execute(pythonw, script, ["install"])
script = os.path.join(script_dir, "webcleaner-certificates")
script = os.path.join(wc.ScriptDir, "webcleaner-certificates")
def remove_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(script_dir, "webcleaner-certificates") execute(pythonw, script, ["remove"])
wc.log.warn(wc.LOG_JS,
wc.log.debug(wc.LOG_JS,
def js_end_element (self, item): """ Parse generated html for scripts. """ wc.log.debug(wc.LOG_JS, "%s js_end_element buf %r", self, self.htmlparser.tagbuf) if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.warn(wc.LOG_JS, "JS syntax error, self.tagbuf %r", self.htmlparser.tagbuf) return if self.js_src: wc.log.debug(wc.LOG_JS, "JS src, self.tagbuf %r", self.htmlparser.tagbuf) del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.warn(wc.LOG_JS, "JS end, self.tagbuf %s", self.htmlparser.tagbuf) return if len(self.htmlparser.tagbuf) > 2 and \ self.htmlparser.tagbuf[-3][0] == \ wc.filter.html.STARTTAG and \ self.htmlparser.tagbuf[-3][1] == 'script': del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2 or \ self.htmlparser.tagbuf[-1][0] != \ wc.filter.html.DATA or \ self.htmlparser.tagbuf[-2][0] != \ wc.filter.html.STARTTAG or \ self.htmlparser.tagbuf[-2][1] != 'script': # syntax error, ignore return js_ok, js_lang = wc.js.get_js_data(self.htmlparser.tagbuf[-2][2]) if not js_ok: # no JavaScript, add end tag and ignore self.htmlparser.tagbuf.append(item) return ver = wc.js.get_js_ver(js_lang) # get script data script = self.htmlparser.tagbuf[-1][1].strip() # remove html comments script = wc.js.remove_html_comments(script) if not script: # again, ignore an empty script del self.htmlparser.tagbuf[-1] del self.htmlparser.tagbuf[-1] return # put correctly quoted script data into buffer script = wc.js.clean(script, jscomments=self.jscomments) self.htmlparser.tagbuf[-1][1] = script # execute script self.jsScript(script, ver, item)
res = {}
res = []
def parse_adzapper_file (filename): res = {} is_comment = re.compile('^\s*(#.*)?$').match content = False # skip content until __DATA__ marker for line in open(filename): if not content: content = line.startswith('__DATA__') elif not is_comment(line): parse_adzapper_line(line.strip(), res) return res
adclass, pattern = line.split(None, 1) res.setdefault(adclass.lower(), []).append(pattern) def write_filters (ads):
res.append(line.split(None, 1)) def write_filters (res):
def parse_adzapper_line (line, res): adclass, pattern = line.split(None, 1) res.setdefault(adclass.lower(), []).append(pattern)
for adclass, pattern in res.items(): pattern = convert_adzapper_pattern(pattern) if adclass=='pass': write_allow(pattern) elif adclass='print':
for adclass, pattern in res: if adclass=='NOZAP': continue elif adclass=='PASS': pattern = convert_adzapper_pattern(pattern) write_allow(zapfile, adclass, pattern) elif adclass=='PRINT':
def write_filters (ads): filename = os.path.join("config", "adzapper.zap") if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = {"title": xmlify("AdZapper filters"), "desc": xmlify("Automatically generated on %s" % date), } zapfile.write("""<?xml version="1.0"?>
pattern = re.sub(r"[^.]*[^?]", pattern, "[^/]*")
pattern = re.sub(r"([^.])\*([^?])", r"\1[^/]*\2", pattern)
def convert_adzapper_pattern (pattern): pattern = pattern.replace(".", "\\.") pattern = pattern.replace("?", "\\?") pattern = pattern.replace("**", ".*?") pattern = re.sub(r"[^.]*[^?]", pattern, "[^/]*") return pattern
replace = re.sub(r"$(\d)", replace, r"\\1")
replace = re.sub(r"\$(\d)", r"\\1", replace)
def convert_adzapper_replace (replace): # replace Perl back references with Python ones replace = re.sub(r"$(\d)", replace, r"\\1") return replace
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value)
def write_allow (zapfile, adclass, pattern): d = get_rule_dict(adclass, pattern)
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
url="%(url)s"
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value)
d = get_rule_dict(adclass, pattern)
def write_block (zapfile, adclass, pattern, replacement=None): title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<block title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d) else: zapfile.write("/>") zapfile.write("\n")
scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d)
url="%(url)s" """ % d) if replacement is not None: zapfile.write(">%s</block>" % xmlify(replacement))
def write_block (zapfile, adclass, pattern, replacement=None): title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<block title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d) else: zapfile.write("/>") zapfile.write("\n")
f = file(proxyconf_file())
f = file(proxyconf_file(), 'w')
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
"""
""")
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
if self.allowedhosts:
if self['allowedhosts']:
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
from glob import glob
def read_filterconf (self): """read filter rules""" from glob import glob # filter configuration for f in filterconf_files(): ZapperParser().parse(f, self) for f in self['rules']: f.sort() self['rules'].sort() filter.rules.FolderRule.recalc_oids(self['rules'])
if self.scheme != 'https':
if scheme != 'https':
def is_allowed (self, method, scheme, port): if not self.method(method): wc.log.warn(wc.LOG_PROXY, "illegal method %s", method) return False if scheme not in self.schemes: wc.log.warn(wc.LOG_PROXY, "illegal scheme %s", scheme) return False if method == 'CONNECT': # CONNECT method sanity if port not in self.connect_ports: wc.log.warn(wc.LOG_PROXY, "illegal CONNECT port %d", port) return False if self.scheme != 'https': wc.log.warn(wc.LOG_PROXY, "illegal CONNECT scheme %d", scheme) return False else: # all other methods if port not in self.http_ports: wc.log.warn(wc.LOG_PROXY, "illegal port %d", port) return False return True
p.feed("<hTml>")
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
p.feed("<a href=''>") p.feed('<a href="">') p.feed("<a href='a'>") p.feed('<a href="a">')
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
p.feed("<a href='\"'>") p.feed("<a href=\"'\">") p.feed("<a href=' '>") p.feed("<a href=a href=b>") p.feed("<a/>") p.feed("<a href/>") p.feed("<a href=a />") p.feed("</a>") p.feed("<?bla foo?>") p.feed("<?bla?>") p.feed("<!-- - comment -->") p.feed("<!---->") p.feed("<!DOCTYPE \"vla foo>")
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
p.feed("")
p.feed("<img bo\\\nrder=0>")
def _broken (): p = HtmlPrinter() p.feed("") p.flush()
_test()
_broken()
def _broken (): p = HtmlPrinter() p.feed("") p.flush()
if not challenge.startswith('NTLMSSP\x00'):
if "," in challenge: chal, remainder = challenge.split(",", 1) else: chal, remainder = challenge, "" chal = base64.decodestring(chal.strip()) if not chal.startswith('NTLMSSP\x00'): res['type'] = 0
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if not challenge.startswith('NTLMSSP\x00'): return res, challenge res['nonce'] = challenge[24:32] return res, challenge[40:]
res['nonce'] = challenge[24:32] return res, challenge[40:]
res['nonce'] = chal[24:32] res['type'] = 2 return res, remainder.strip()
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if not challenge.startswith('NTLMSSP\x00'): return res, challenge res['nonce'] = challenge[24:32] return res, challenge[40:]
pass
res = {} if "," in credentials: creds, remainder = credentials.split(",", 1) else: creds, remainder = credentials, "" creds = base64.decodestring(creds.strip()) if not creds.startswith('NTLMSSP\x00'): return res, remainder.strip() type = creds[8] if type==1: res['type'] = 1 domain_len = int(creds[16:18]) domain_off = int(creds[20:22]) host_len = int(creds[24:26]) host_off = int(creds[28:30]) res['host'] = creds[host_off:host_off+host_len] res['domain'] = creds[domain_off:domain_off+domain_len] elif type==3: res['type'] = 3 lm_res_len = int(creds[12:14]) else: return res, remainder.strip() return res, remainder.strip()
def parse_ntlm_credentials (credentials): """parse both type1 and type3 credentials""" # XXX pass
def create_message2 (flags="\x82\x01"):
def create_message2 ():
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
nonce = "%08f" % (random.random()*10)
zero2 = '\x00'*2 flags="\x82\x01" nonce = "%08d" % (random.random()*100000000)
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
zero8 = '\x00'*8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(flags)s%(zero2)s%(nonce)s%(zero8)s" % locals()
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
protocol = 'NTLMSSP\000' type = '\003\000' head = protocol + type + '\000\000'
protocol = 'NTLMSSP\x00' type = '\x03' head = protocol + type + '\x00'*3
def create_message3 (nonce, domain, username, host, flags="\x82\x01", lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data return m3
def parse_message2 (msg2): msg2 = base64.decodestring(msg2) nonce = msg2[24:32] return nonce
def parse_message2 (msg2): msg2 = base64.decodestring(msg2) # protocol = msg2[0:7] # msg_type = msg2[7:9] nonce = msg2[24:32] return nonce
def debug_message1 (msg): m_ = base64.decodestring(msg) m_hex = utils.str2hex(m_) res = '==============================================================\n' res += 'NTLM Message 1 report:\n' res += '---------------------------------\n' res += 'Base64: %s\n' % msg res += 'String: %s\n' % utils.str2prn_str(m_) res += 'Hex: %s\n' % m_hex cur = 0 res += '---------------------------------\n' cur_len = 12 res += 'Header %d/%d:\n%s\n\n' % (cur, cur_len, m_hex[0:24]) res += '%s\nmethod name 0/8\n%s res += '0x%s%s res += '%s cur += cur_len res += '---------------------------------\n' cur_len = 4 res += 'Flags %d/%d\n' % (cur, cur_len) res += flags(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = len(m_) - cur res += 'Rest of the message %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) res += '\nEnd of message 1 report.\n' return res def debug_message2 (msg): m_ = base64.decodestring(msg) m_hex = utils.str2hex(m_) res = '==============================================================\n' res += 'NTLM Message 2 report:\n' res += '---------------------------------\n' res += 'Base64: %s\n' % msg res += 'String: %s\n' % utils.str2prn_str(m_) res += 'Hex: %s\n' % m_hex cur = 0 res += '---------------------------------\n' cur_len = 12 res += 'Header %d/%d:\n%s\n\n' % (cur, cur_len, m_hex[0:24]) res += '%s\nmethod name 0/8\n%s res += '0x%s%s res += '%s cur += cur_len res += '---------------------------------\n' cur_len = 8 res += 'Lengths and Positions %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur_len = 8 res += 'Domain ??? %d/%d\n' % (cur, cur_len) dom = item(m_[cur:cur+cur_len]) res += dom['string'] cur += cur_len res += '---------------------------------\n' cur_len = 4 res += 'Flags %d/%d\n' % (cur, cur_len) res += flags(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = 8 res += 'NONCE %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur += cur_len res += '---------------------------------\n' cur_len = dom['offset'] - cur res += 'Unknown data %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = dom['len1'] res += 'Domain ??? %d/%d:\n' % (cur, cur_len) res += 'Hex: %s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'String: %s\n\n' % utils.str2prn_str(m_[cur : cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = len(m_) - cur res += 'Rest of the message %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) res += '\nEnd of message 2 report.\n' return res def debug_message3 (msg): m_ = base64.decodestring(msg) m_hex = utils.str2hex(m_) res = '==============================================================\n' res += 'NTLM Message 3 report:\n' res += '---------------------------------\n' res += 'Base64: %s\n' % msg res += 'String: %s\n' % utils.str2prn_str(m_) res += 'Hex: %s\n' % m_hex cur = 0 res += '---------------------------------\n' cur_len = 12 res += 'Header %d/%d:\n%s\n\n' % (cur, cur_len, m_hex[0:24]) res += '%s\nmethod name 0/8\n%s res += '0x%s%s res += '%s cur += cur_len res += '---------------------------------\n' cur_len = 48 res += 'Lengths and Positions %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur_len = 8 res += 'LAN Manager response %d/%d\n' % (cur, cur_len) lmr = item(m_[cur:cur+cur_len]) res += lmr['string'] cur += cur_len cur_len = 8 res += 'NT response %d/%d\n' % (cur, cur_len) ntr = item(m_[cur:cur+cur_len]) res += ntr['string'] cur += cur_len cur_len = 8 res += 'Domain string %d/%d\n' % (cur, cur_len) dom = item(m_[cur:cur+cur_len]) res += dom['string'] cur += cur_len cur_len = 8 res += 'User string %d/%d\n' % (cur, cur_len) username = item(m_[cur:cur+cur_len]) res += username['string'] cur += cur_len cur_len = 8 res += 'Host string %d/%d\n' % (cur, cur_len) host = item(m_[cur:cur+cur_len]) res += host['string'] cur += cur_len cur_len = 8 res += 'Unknow item record %d/%d\n' % (cur, cur_len) unknown = item(m_[cur:cur+cur_len]) res += unknown['string'] cur += cur_len res += '---------------------------------\n' cur_len = 4 res += 'Flags %d/%d\n' % (cur, cur_len) res += flags(m_[cur: cur + cur_len]) cur += cur_len res += '---------------------------------\n' cur_len = dom['len1'] + user['len1'] + host['len1'] res += 'Domain, User, Host strings %d/%d\n%s\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2], utils.str2prn_str(m_[cur:cur + cur_len])) cur_len = dom['len1'] res += '%s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'Domain name %d/%d:\n' % (cur, cur_len) res += '%s\n\n' % (utils.str2prn_str(m_[cur: (cur + cur_len)])) cur += cur_len cur_len = user['len1'] res += '%s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'User name %d/%d:\n' % (cur, cur_len) res += '%s\n\n' % (utils.str2prn_str(m_[cur: (cur + cur_len)])) cur += cur_len cur_len = host['len1'] res += '%s\n' % m_hex[cur * 2: (cur + cur_len) * 2] res += 'Host name %d/%d:\n' % (cur, cur_len) res += '%s\n\n' % (utils.str2prn_str(m_[cur: (cur + cur_len)])) cur += cur_len res += '---------------------------------\n' cur_len = lmr['len1'] res += 'LAN Manager response %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur += cur_len res += '---------------------------------\n' cur_len = ntr['len1'] res += 'NT response %d/%d\n%s\n\n' % (cur, cur_len, m_hex[cur * 2 :(cur + cur_len) * 2]) cur += cur_len res += '---------------------------------\n' cur_len = len(m_) - cur res += 'Rest of the message %d/%d:\n' % (cur, cur_len) res += unknown_part(m_[cur: cur + cur_len]) res += '\nEnd of message 3 report.\n' return res
def unknown_part (bin_str): res = 'Hex : %s\n' % utils.str2hex(bin_str, ' ') res += 'String : %s\n' % utils.str2prn_str(bin_str, ' ') res += 'Decimal: %s\n' % utils.str2dec(bin_str, ' ') return res
return end
return end-size
def size_number (text): base = which_base(text) if base == 0: return 0 length = len(text) size = size_base(base) end = size+1 while end < length and text[end] in _hex[:base]: end += 1 return end
end = size_number(text)
end = start+size_number(text)
def convert (text): base = which_base(text) start = size_base(base) end = size_number(text) return base10(text[start:end], base)
if __name__ == '__main__': print "---" print "base10(\"FF\",16) = ", 255, "\tgot ", base10("FF",16) print "base10(\"77\", 8) = ", 63, "\tgot ", base10("77",8) print "---" print "convert(\"0xFF\" ) = ", 255, "\tgot ", convert("0xFF") print "convert(\"\\xFF\" ) = ", 255, "\tgot ", convert("\\xFF") print "convert(\"077\" ) = ", 63, "\tgot ", convert("077") print "convert(\"\\77\" ) = ", 63, "\tgot ", convert("\\77") print "convert(\"\\177E\" ) = ", 127, "\tgot ", convert("\\177E"), "The E is not used" print "---" print "size_number(\"100FFF\") = ", 3, "\tgot", size_number("100qwerty") print "size_number(\"\\7799\" ) = ", 3, "\tgot", size_number("\\77FF") print "size_number(\"\\XFFG\" ) = ", 3, "\tgot", size_number("\\XFFG") print "---" print "index_number(\"0XF\" ) = ", 0, "\tgot", index_number("0XF") print "index_number(\"\\XF\" ) = ", 0, "\tgot", index_number("\\XF") print "index_number(\"FF\\FFGG\" ) = ", -1, "\tgot", index_number("FF\\FFGG") print "index_number(\"FF\\7\" ) = ", 2, "\tgot", index_number("FF\\7") print "index_number(\"FFF\\XFFGG\" ) = ", 3, "\tgot", index_number("FFF\\XFFGG") print "index_number(\"\\\\\\XFFGG\" ) = ", 2, "\tgot", index_number("FF\\XFFGG") print "---" print "little2 ","1 ",little2(chr( 1)+chr(0)) print "little2 ","16 ",little2(chr(16)+chr(0)) print "---" print "big2","1 ",big2(chr(0)+chr(1)) print "big2","16 ",big2(chr(0)+chr(16)) print "---" print "little4","2147483649",little4(chr(1)+chr(0)+chr(0)+chr(128)) print "big4 ","2147483649",big4(chr(128)+chr(0)+chr(0)+chr(1))
def local4 (number): if sys.byteorder == 'big': return big4(number) return little4(number)
debug(PROXY, "%s closed, got empty data")
debug(PROXY, "%s closed, got empty data", self)
def handle_read (self): """read data from connection, put it into recv_buffer and call process_read""" assert self.connected
debug(FILTER, "blocked url %s", url)
debug(FILTER, "blocked url %s: %s", url, str(blocked))
def doit (self, data, **args): # note: data is the complete request method, url, httpver = data.split() debug(FILTER, "block filter working on url %s", `url`) if self.allowed(url): return data blocked = self.strict_whitelist or self.blocked(url) if blocked: debug(FILTER, "blocked url %s", url) if isinstance(blocked, basestring): doc = blocked # index 3, not 2! elif is_image(url): doc = self.block_image else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? doc = self.block_url port = config['port'] if method=='CONNECT': return 'CONNECT https://localhost:%d%s HTTP/1.1'%(port, doc) return 'GET http://localhost:%d%s HTTP/1.1'%(port, doc) return data
self.log.debug("Translating %s" % `args`)
self.log.debug("Translating %s with %s", `args`, str(self.translator))
def cmdOutput (self, command, args): if self.translator is not None and self.translateContent: self.log.debug("Translating %s" % `args`) self.file.write(self.translator.gettext(args) % \ self.context.getVariableMap()) else: self.file.write (args) self.programCounter += 1
self.log.debug("Translating %s" % `result.value()`)
self.log.debug("Translating %s with %s", `result.value()`, str(self.translator))
def cmdI18nTranslate (self, command, args): """ args: translation string, translation args, endTagSymbol Translate tag content. If the translation string is an empty string, the translate message id is the tag content. Otherwise, the value of the tag content is the message id. """ # an empty string means use tag content as message id if args[0] == "": self.translateContent = 1 else: result = self.context.evaluate (args[0], self.originalAttributes) if not (result is None or result.isNothing() or result.isDefault()): if self.translator is not None: self.log.debug("Translating %s" % `result.value()`) self.tagContent = (0, self.translator.gettext(result.value()) % \ self.context.getVariableMap()) else: self.tagContent = (0, result.value()) self.movePCForward = self.symbolTable[args[1]] self.programCounter += 1
self.log.debug("Translating %s" % `attExpr`)
self.log.debug("Translating %s with %s", `attExpr`, str(self.translator))
def cmdI18nAttributes (self, command, args):
def _write_color_nt (text, color):
def _write_color_nt (fp, text, color):
def _write_color_nt (text, color): """ Assumes WConio has been imported at module level. """ oldcolor = WConio.gettextinfo()[4] oldtextcolor = oldcolor & 0x000F if ";" in color: control, color = color.split(";", 1) WConio.textcolor(WConioColor.get(color, oldtextcolor)) fp.write(text) WConio.textattr(oldcolor)
logging.getLogger(log).debug(msg, *args)
def debug (log, msg, *args): #logging.getLogger(log).debug("collected %d"%gc.collect()) #logging.getLogger(log).debug("objects %d"%len(gc.get_objects())) #logging.getLogger(log).debug("garbage %d"%len(gc.garbage)) #logging.getLogger(log).debug("Mem: %s"%usedmemory()) logging.getLogger(log).debug(msg, *args)
% tag)
)
def testImgWidthHeight (self): for tag in ("width", "height"): self.filt("""<img %s="9999">""" % tag, """<img %s="9999">""" % tag) self.filt("""<img %s="12345">""" % tag, """<img>""" % tag)
JSFilter(self.url, **opts)
return JSFilter(self.url, opts)
def new_instance (self, **opts): JSFilter(self.url, **opts)
super(SetList), self).__setitem__(key, value)
super(SetList, self).__setitem__(key, value)
def __setitem__ (self, key, value): if value not in self: super(SetList), self).__setitem__(key, value)
def do_install (): """ Install shortcuts and NT service. """ fix_configdata() import wc wc.init_i18n() install_shortcuts() install_certificates() install_service() restart_service() open_browser_config()
def fix_install_path (line): """ Replace placeholders written by bdist_wininst with those specified in win_path_scheme. """ key, eq, val = line.split() # unescape string (do not use eval()) val = val[1:-1].replace("\\\\", "\\") for d in win_path_scheme.keys(): # look for placeholders to replace oldpath, newpath = win_path_scheme[d] oldpath = "%s%s" % (os.sep, oldpath) if oldpath in val: val = val.replace(oldpath, newpath) val = os.path.join(sys.prefix, val) val = os.path.normpath(val) return "%s = %r%s" % (key, val, os.linesep)
def state_nt_service (name): """ Return status of NT service. """ try: return win32serviceutil.QueryServiceStatus(name)[1] except pywintypes.error, msg: print _("Service status error: %s") % str(msg) return None def install_service (): """ Install WebCleaner as NT service. """ import wc import wc.win32start oldargs = sys.argv print _("Installing %s service...") % wc.AppName sys.argv = ['webcleaner', 'install'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs def remove_service (): import wc import wc.win32start oldargs = sys.argv print _("Removing %s service...") % wc.AppName sys.argv = ['webcleaner', 'remove'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs def restart_service (): """ Restart WebCleaner NT service. """ stop_service() start_service() def stop_service (): """ Stop WebCleaner NT service (if it is running). """ import wc import wc.win32start print _("Stopping %s proxy...") % wc.AppName oldargs = sys.argv state = state_nt_service(wc.AppName) while state==win32service.SERVICE_START_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) if state==win32service.SERVICE_RUNNING: sys.argv = ['webcleaner', 'stop'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) state = state_nt_service(wc.AppName) while state==win32service.SERVICE_STOP_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) sys.argv = oldargs def start_service (): """ Start WebCleaner NT service. """ import wc import wc.win32start print _("Starting %s proxy...") % wc.AppName oldargs = sys.argv sys.argv = ['webcleaner', 'start'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs
def install_adminpassword (): """ Ask for admin password if not already set. """ if has_adminpassword(): return import wc import Tkinter as tk root = init_tk() import tkSimpleDialog class PasswordDialog (tkSimpleDialog.Dialog): """ Admin password dialog. """ def body(self, master): d = {"appname": wc.AppName} msg = _("""The administrator password protects the web configuration frontend of %s. The default username is "admin" (without the quotes). You have to enter a non-empty password. If you press cancel, the administrator password has to be entered manually (don't worry, the web interface will tell you how to do that).""") label = Label(master, text=msg % d, anchor=tk.W, justify=tk.LEFT) label.grid(row=0, columnspan=2, sticky=tk.W) label = Label(master, text=_("Password:")) label.grid(row=1, sticky=tk.W) self.pass_entry = Entry(master) self.pass_entry.grid(row=1, column=1) return self.pass_entry def apply(self): password = self.pass_entry.get() if password: save_adminpassword(password) else: print _("Not saving empty password.") title = _("%s administrator password") % wc.AppName PasswordDialog(root, title=title) def has_adminpassword (): """ Check if admin password is already set. """ return get_wc_config()["adminpass"] def save_adminpassword (password): """ Save new admin password to WebCleaner configuration. Also checks for invalid password format. """ import base64 import wc.strformat password = base64.b64encode(password) if not password or not wc.strformat.is_ascii(password): print _("Not saving binary password.") return config = get_wc_config() config["password"] = password config.write_proxyconf()
def state_nt_service (name): """ Return status of NT service. """ try: return win32serviceutil.QueryServiceStatus(name)[1] except pywintypes.error, msg: print _("Service status error: %s") % str(msg) return None
from Tkinter import tkMessageBox answer = tkMessageBox.askyesno(_("Purge local config"), _("""Do you want to remove your local filter rules? They can be re-used in other installations of %s, but are useless otherwise."""))
init_tk() import tkMessageBox answer = tkMessageBox.askyesno(_("%s config purge") % wc.AppName, _("""There are local filter rules in the configuration directory. Do you want to remove them? They can be re-used in other installations of %s, but are useless otherwise.""") % wc.AppName)
def purge_tempfiles (): """ Ask if user wants to purge local config files. """ files = glob.glob(os.path.join(wc.ConfigDir, "local_*.zap")) if not files: return from Tkinter import tkMessageBox answer = tkMessageBox.askyesno(_("Purge local config"), _("""Do you want to remove your local filter rules?
print _("Could not remove %r: %s") % (fname, str(msg))
print _("Could not remove file %r: %s") % (fname, str(msg)) def is_empty_dir (name): """ Check if given name is a non-empty directory. """ return os.path.isdir(name) and not os.listdir(name) def remove_empty_directories (dname): """ Remove empty directory structure. """ try: if is_empty_dir(dname): os.rmdir(dname) remove_empty_directories(os.path.dirname(dname)) except OSError, msg: print _("Could not remove directory %r: %s") % (dname, str(msg))
def remove_file (fname): """ Remove a single file if it exists. Errors are printed to stdout. """ if os.path.exists(fname): try: os.remove(fname) except OSError, msg: print _("Could not remove %r: %s") % (fname, str(msg))
se_offset_abs = "^\(([0\\\][xX][\dA-Fa-f]+|[0\\\][0-7]*|\d+)" \ "(\.[bslBSL])*\)" se_offset_add = "^\(([0\\\][xX][\dA-Fa-f]+|[0\\\][0-7]*|\d+)" \ "(\.[bslBSL])*([-+])([0\\\][xX][\dA-Fa-f]+|[0\\\][0-7]*|\d+)\)"
se_offset_abs = re.compile( r"^\(([0\\\][xX][\dA-Fa-f]+|[0\\\][0-7]*|\d+)" \ r"(\.[bslBSL])*\)").match se_offset_add = re.compile( r"^\(([0\\\][xX][\dA-Fa-f]+|[0\\\][0-7]*|\d+)" \ r"(\.[bslBSL])*([-+])([0\\\][xX][\dA-Fa-f]+|[0\\\][0-7]*|\d+)\)").match
def dump (o, f): """pickle object o to file f""" cPickle.dump(o, f, pickle.HIGHEST_PROTOCOL)
match_abs = re.compile(self.se_offset_abs).match(text) match_add = re.compile(self.se_offset_add).match(text)
match_abs = self.se_offset_abs(text) match_add = self.se_offset_add(text)
def _offset (self, text): direct = self._direct_offset(text) offset_type = 'l' offset_delta = 0L offset_relatif = 0L
if line and not line.startswith(' part = self._split(line) while len(part) < 4: part.append('\b') level = self._level(part[0]) offset_string = self._strip_start('&', part[0][level:]) (direct, offset_type, offset_delta, offset_relatif) = \ self._offset(offset_string) (oper, mask, rest) = self._oper_mask(part[1]) full_type = self._strip_start('u', rest) endian = self._endian(full_type) kind = self._kind(full_type, endian) (test, result) = self._test_result(part[2]) data = self._data(kind, result) length = self._length(kind, data) mime = self._mime(part[3:]) self._leveldict[index] = level self._direct[index] = direct self._offset_type[index] = offset_type self._offset_delta[index] = offset_delta self._offset_relatif[index] = offset_relatif self._endiandict[index] = endian self._kinddict[index] = kind self._oper[index] = oper self._mask[index] = mask self._test[index] = test self._datadict[index] = data self._lengthdict[index] = length self._mimedict[index] = mime index += 1 self.entries = index
if not line or line.startswith(' continue part = self._split(line) while len(part) < 4: part.append(r'\b') level = self._level(part[0]) offset_string = self._strip_start('&', part[0][level:]) (direct, offset_type, offset_delta, offset_relatif) = \ self._offset(offset_string) (oper, mask, rest) = self._oper_mask(part[1]) full_type = self._strip_start('u', rest) endian = self._endian(full_type) kind = self._kind(full_type, endian) (test, result) = self._test_result(part[2]) data = self._data(kind, result) length = self._length(kind, data) mime = self._mime(part[3:]) self._leveldict[index] = level self._direct[index] = direct self._offset_type[index] = offset_type self._offset_delta[index] = offset_delta self._offset_relatif[index] = offset_relatif self._endiandict[index] = endian self._kinddict[index] = kind self._oper[index] = oper self._mask[index] = mask self._test[index] = test self._datadict[index] = data self._lengthdict[index] = length self._mimedict[index] = mime index += 1 self.entries = index
def read_magic (self, magic_file): self.magic = []
value = 0
value = data
def _convert (self, kind, endian, data): # Can raise StandardError and IOError value = 0
s = u'<Rule %s sid="%s"' % (self.get_name(),
s = u'<%s sid="%s"' % (self.get_name(),
def toxml (self): """Rule data as XML for storing, must be overridden in subclass""" s = u'<Rule %s sid="%s"' % (self.get_name(), wc.XmlUtils.xmlquoteattr(self.sid)) if self.disable: s += u' disable="%d"' % self.disable return s
self.filt("""<frameset><frame src="aaa"><embed name="sp", style><applet hspace="file:\\">""", """<frameset><frame src="aaa"><embed name="sp", style><applet>""")
self.filt("""<frameset><frame src="aaa"><embed name="sp" style><applet hspace="file:\\\\">""", """<frameset><frame src="aaa"><embed name="sp" style><applet>""")
def testAppletHspace (self): self.filt("""<frameset><frame src="aaa"><embed name="sp", style><applet hspace="file:\\">""", """<frameset><frame src="aaa"><embed name="sp", style><applet>""")
self.jsScript(script, ver) return 1
return self.jsScript(script, ver)
def jsScriptSrc (self, url, language): if not url: return url = urlparse.urljoin(self.url, url) #debug(HURT_ME_PLENTY, "jsScriptSrc", url, language) try: script = urlutils.open_url(url).read() except: print >>sys.stderr, "exception fetching script url", `url` return if not script: return ver = 0.0 if language: mo = re.search(r'(?i)javascript(?P<num>\d\.\d)', language) if mo: ver = float(mo.group('num')) self.jsScript(script, ver) return 1
self.jsfilter.flush() self.data.append(self.jsfilter.flushbuf()) self.buffer += self.jsfilter.buffer self.rulestack += self.jsfilter.rulestack
if self.output_counter: self.jsfilter.flush() self.data.append(self.jsfilter.flushbuf()) self.buffer += self.jsfilter.buffer self.rulestack += self.jsfilter.rulestack
def jsScript (self, script, ver): """execute given script with javascript version ver""" #debug(HURT_ME_PLENTY, "jsScript", script, ver) self.jsEnv.attachListener(self) self.jsfilter = HtmlFilter(self.rules, self.url, comments=self.comments, javascript=self.javascript) self.jsEnv.executeScript(script, ver) self.jsEnv.detachListener(self) self.jsfilter.flush() self.data.append(self.jsfilter.flushbuf()) self.buffer += self.jsfilter.buffer self.rulestack += self.jsfilter.rulestack self.jsfilter = None
if not filtered and self.javascript and tag=='script': self.jsEndElement(tag)
if not filtered and self.javascript and tag=='script' and \ self.jsEndElement(tag): del self.buffer[-1] del self.buffer[-1]
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
if not self.buffer: print >>sys.stderr, "empty buffer on </script>"
if len(self.buffer)<2:
def jsEndElement (self, tag): """parse generated html for scripts""" if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return if self.buffer[-1][0]!=DATA: print >>sys.stderr, "missing data for </script>", self.buffer[-1:] return script = self.buffer[-1][1].strip() del self.buffer[-1] if not (self.buffer and self.buffer[-1][0]==STARTTAG and \ self.buffer[-1][1]=='script'): # there was a <script src="..."> already return del self.buffer[-1] if script.startswith("<!--"): script = script[4:].strip() if not script: return self.jsScript(script, 0.0)