rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
debug(BRING_IT_ON, "removing header", `h`)
|
def remove_headers (headers, to_remove): """utility function to remove entries from RFC822 headers""" for h in to_remove: if headers.has_key(h): debug(BRING_IT_ON, "removing header", `h`) del headers[h]
|
|
HtmlParser.feed(self, data)
|
self.parser.feed(data)
|
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data #self._debug(NIGHTMARE, "feed", `data`) HtmlParser.feed(self, data) else: #self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer #self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
|
HtmlParser.flush(self)
|
self.parser.flush()
|
def flush (self): #self._debug(HURT_ME_PLENTY, "flush") # flushing in wait state raises a filter exception if self.state=='wait': raise FilterException("HtmlFilter[%d]: still waiting for data"%self.level) HtmlParser.flush(self)
|
wc.set_debuglevel(wc.HURT_ME_PLENTY)
|
wc.set_debuglevel(wc.NIGHTMARE)
|
def _main(): fname = sys.argv[1] if fname=="-": f = sys.stdin else: f = file(fname) import wc, time wc.config = wc.Configuration() # set debug level wc.set_debuglevel(wc.HURT_ME_PLENTY) wc.config['filters'] = ['Replacer', 'Rewriter', 'BinaryCharFilter'] wc.config.init_filter_modules() from wc.proxy import proxy_poll, run_timers from wc.filter import FilterException attrs = wc.filter.initStateObjects(url=fname) filtered = "" data = f.read(1024) while data: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, data, 'filter', attrs) except FilterException, msg: pass data = f.read(1024) i = 1 while 1: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, "", 'finish', attrs) break except FilterException, msg: print "Test: finish: exception:", msg proxy_poll(timeout=max(0, run_timers())) i+=1 if i==99: print "Test: oooooops" break print filtered
|
filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, data, 'filter', attrs)
|
filtered += wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, data, 'filter', attrs)
|
def _main(): fname = sys.argv[1] if fname=="-": f = sys.stdin else: f = file(fname) import wc, time wc.config = wc.Configuration() # set debug level wc.set_debuglevel(wc.HURT_ME_PLENTY) wc.config['filters'] = ['Replacer', 'Rewriter', 'BinaryCharFilter'] wc.config.init_filter_modules() from wc.proxy import proxy_poll, run_timers from wc.filter import FilterException attrs = wc.filter.initStateObjects(url=fname) filtered = "" data = f.read(1024) while data: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, data, 'filter', attrs) except FilterException, msg: pass data = f.read(1024) i = 1 while 1: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, "", 'finish', attrs) break except FilterException, msg: print "Test: finish: exception:", msg proxy_poll(timeout=max(0, run_timers())) i+=1 if i==99: print "Test: oooooops" break print filtered
|
filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY,
|
filtered += wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY,
|
def _main(): fname = sys.argv[1] if fname=="-": f = sys.stdin else: f = file(fname) import wc, time wc.config = wc.Configuration() # set debug level wc.set_debuglevel(wc.HURT_ME_PLENTY) wc.config['filters'] = ['Replacer', 'Rewriter', 'BinaryCharFilter'] wc.config.init_filter_modules() from wc.proxy import proxy_poll, run_timers from wc.filter import FilterException attrs = wc.filter.initStateObjects(url=fname) filtered = "" data = f.read(1024) while data: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, data, 'filter', attrs) except FilterException, msg: pass data = f.read(1024) i = 1 while 1: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, "", 'finish', attrs) break except FilterException, msg: print "Test: finish: exception:", msg proxy_poll(timeout=max(0, run_timers())) i+=1 if i==99: print "Test: oooooops" break print filtered
|
print "Test: finish: exception:", msg
|
print >>sys.stderr, "Test: finish: exception:", msg
|
def _main(): fname = sys.argv[1] if fname=="-": f = sys.stdin else: f = file(fname) import wc, time wc.config = wc.Configuration() # set debug level wc.set_debuglevel(wc.HURT_ME_PLENTY) wc.config['filters'] = ['Replacer', 'Rewriter', 'BinaryCharFilter'] wc.config.init_filter_modules() from wc.proxy import proxy_poll, run_timers from wc.filter import FilterException attrs = wc.filter.initStateObjects(url=fname) filtered = "" data = f.read(1024) while data: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, data, 'filter', attrs) except FilterException, msg: pass data = f.read(1024) i = 1 while 1: try: filtered = wc.filter.applyfilter(wc.filter.FILTER_RESPONSE_MODIFY, "", 'finish', attrs) break except FilterException, msg: print "Test: finish: exception:", msg proxy_poll(timeout=max(0, run_timers())) i+=1 if i==99: print "Test: oooooops" break print filtered
|
if server.headers.has_key('Transfer-Encoding'): to_remove.add('Transfer-Encoding') tencs = server.headers['Transfer-Encoding'].lower() for tenc in tencs.split(","): tenc = tenc.strip() if ";" in tenc: tenc = tenc.split(";", 1)[0] if not tenc or tenc == 'identity': continue if tenc == 'chunked': server.decoders.append(UnchunkStream.UnchunkStream(server)) elif tenc in ('x-gzip', 'gzip'): server.decoders.append(GunzipStream.GunzipStream(server)) elif tenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream(server)) else: wc.log.warn(wc.LOG_PROXY, "unsupported transfer encoding in %r", tencs) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'Transfer-Encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() # remove content length if rewrite: to_remove.add('Content-Length') # add decoders if server.headers.has_key('Transfer-Encoding'): # chunked encoded tenc = server.headers['Transfer-Encoding'] if tenc != 'chunked': wc.log.warn(wc.LOG_PROXY, "unknown transfer encoding %r, assuming chunked encoding", tenc) server.decoders.append(UnchunkStream.UnchunkStream(server)) server.encoders.append(ChunkStream.ChunkStream(server)) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'chunked encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None elif rewrite: # To make pipelining possible, enable chunked encoding. server.headers['Transfer-Encoding'] = "chunked\r" if server.headers.has_key("Content-Length"): to_remove.add("Content-Length") server.encoders.append(ChunkStream.ChunkStream(server)) remove_headers(server.headers, to_remove) # only decompress on rewrite if not rewrite: return bytes_remaining # Compressed content (uncompress only for rewriting modules) encoding = server.headers.get('Content-Encoding', '').lower() # note: do not gunzip .gz files if encoding in ('gzip', 'x-gzip', 'deflate') and \ (filename is None or not filename.endswith(".gz")): if encoding == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: server.decoders.append(GunzipStream.GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.append('Cache-Control') remove_headers(server.headers, to_remove) # add warning server.headers['Warning'] = "214 Transformation applied\r" elif encoding and encoding!='identity': wc.log.warn(wc.LOG_PROXY, _("unsupported encoding: %r"), encoding) # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' return bytes_remaining
|
|
if server.headers.has_key('Transfer-Encoding'): tenc = server.headers['Transfer-Encoding'] if tenc != 'chunked': wc.log.warn(wc.LOG_PROXY, "unknown transfer encoding %r, assuming chunked encoding", tenc) server.decoders.append(UnchunkStream.UnchunkStream(server)) server.encoders.append(ChunkStream.ChunkStream(server)) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'chunked encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None elif rewrite: server.headers['Transfer-Encoding'] = "chunked\r" if server.headers.has_key("Content-Length"): to_remove.add("Content-Length") server.encoders.append(ChunkStream.ChunkStream(server))
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() # remove content length if rewrite: to_remove.add('Content-Length') # add decoders if server.headers.has_key('Transfer-Encoding'): # chunked encoded tenc = server.headers['Transfer-Encoding'] if tenc != 'chunked': wc.log.warn(wc.LOG_PROXY, "unknown transfer encoding %r, assuming chunked encoding", tenc) server.decoders.append(UnchunkStream.UnchunkStream(server)) server.encoders.append(ChunkStream.ChunkStream(server)) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'chunked encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None elif rewrite: # To make pipelining possible, enable chunked encoding. server.headers['Transfer-Encoding'] = "chunked\r" if server.headers.has_key("Content-Length"): to_remove.add("Content-Length") server.encoders.append(ChunkStream.ChunkStream(server)) remove_headers(server.headers, to_remove) # only decompress on rewrite if not rewrite: return bytes_remaining # Compressed content (uncompress only for rewriting modules) encoding = server.headers.get('Content-Encoding', '').lower() # note: do not gunzip .gz files if encoding in ('gzip', 'x-gzip', 'deflate') and \ (filename is None or not filename.endswith(".gz")): if encoding == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: server.decoders.append(GunzipStream.GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.append('Cache-Control') remove_headers(server.headers, to_remove) # add warning server.headers['Warning'] = "214 Transformation applied\r" elif encoding and encoding!='identity': wc.log.warn(wc.LOG_PROXY, _("unsupported encoding: %r"), encoding) # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' return bytes_remaining
|
|
if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r'
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() # remove content length if rewrite: to_remove.add('Content-Length') # add decoders if server.headers.has_key('Transfer-Encoding'): # chunked encoded tenc = server.headers['Transfer-Encoding'] if tenc != 'chunked': wc.log.warn(wc.LOG_PROXY, "unknown transfer encoding %r, assuming chunked encoding", tenc) server.decoders.append(UnchunkStream.UnchunkStream(server)) server.encoders.append(ChunkStream.ChunkStream(server)) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'chunked encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None elif rewrite: # To make pipelining possible, enable chunked encoding. server.headers['Transfer-Encoding'] = "chunked\r" if server.headers.has_key("Content-Length"): to_remove.add("Content-Length") server.encoders.append(ChunkStream.ChunkStream(server)) remove_headers(server.headers, to_remove) # only decompress on rewrite if not rewrite: return bytes_remaining # Compressed content (uncompress only for rewriting modules) encoding = server.headers.get('Content-Encoding', '').lower() # note: do not gunzip .gz files if encoding in ('gzip', 'x-gzip', 'deflate') and \ (filename is None or not filename.endswith(".gz")): if encoding == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: server.decoders.append(GunzipStream.GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.append('Cache-Control') remove_headers(server.headers, to_remove) # add warning server.headers['Warning'] = "214 Transformation applied\r" elif encoding and encoding!='identity': wc.log.warn(wc.LOG_PROXY, _("unsupported encoding: %r"), encoding) # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' return bytes_remaining
|
|
encoding = server.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and \ (filename is None or not filename.endswith(".gz")): if encoding == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: server.decoders.append(GunzipStream.GunzipStream()) to_remove = ['Content-Encoding']
|
if server.headers.has_key('Content-Encoding'): to_remove.add('Content-Encoding') cencs = server.headers['Content-Encoding'].lower() for cenc in cencs.split(","): cenc = cenc.strip() if ";" in cenc: cenc = cenc.split(";", 1)[0] if not cenc or cenc == 'identity': continue if filename is not None and \ (filename.endswith(".gz") or filename.endswith(".tgz")): continue if cenc in ('gzip', 'x-gzip'): server.decoders.append(GunzipStream.GunzipStream()) elif cenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: wc.log.warn(wc.LOG_PROXY, "unsupported content encoding in %r", encoding)
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() # remove content length if rewrite: to_remove.add('Content-Length') # add decoders if server.headers.has_key('Transfer-Encoding'): # chunked encoded tenc = server.headers['Transfer-Encoding'] if tenc != 'chunked': wc.log.warn(wc.LOG_PROXY, "unknown transfer encoding %r, assuming chunked encoding", tenc) server.decoders.append(UnchunkStream.UnchunkStream(server)) server.encoders.append(ChunkStream.ChunkStream(server)) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'chunked encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None elif rewrite: # To make pipelining possible, enable chunked encoding. server.headers['Transfer-Encoding'] = "chunked\r" if server.headers.has_key("Content-Length"): to_remove.add("Content-Length") server.encoders.append(ChunkStream.ChunkStream(server)) remove_headers(server.headers, to_remove) # only decompress on rewrite if not rewrite: return bytes_remaining # Compressed content (uncompress only for rewriting modules) encoding = server.headers.get('Content-Encoding', '').lower() # note: do not gunzip .gz files if encoding in ('gzip', 'x-gzip', 'deflate') and \ (filename is None or not filename.endswith(".gz")): if encoding == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: server.decoders.append(GunzipStream.GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.append('Cache-Control') remove_headers(server.headers, to_remove) # add warning server.headers['Warning'] = "214 Transformation applied\r" elif encoding and encoding!='identity': wc.log.warn(wc.LOG_PROXY, _("unsupported encoding: %r"), encoding) # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' return bytes_remaining
|
to_remove.append('Cache-Control') remove_headers(server.headers, to_remove)
|
to_remove.add('Cache-Control')
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() # remove content length if rewrite: to_remove.add('Content-Length') # add decoders if server.headers.has_key('Transfer-Encoding'): # chunked encoded tenc = server.headers['Transfer-Encoding'] if tenc != 'chunked': wc.log.warn(wc.LOG_PROXY, "unknown transfer encoding %r, assuming chunked encoding", tenc) server.decoders.append(UnchunkStream.UnchunkStream(server)) server.encoders.append(ChunkStream.ChunkStream(server)) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'chunked encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None elif rewrite: # To make pipelining possible, enable chunked encoding. server.headers['Transfer-Encoding'] = "chunked\r" if server.headers.has_key("Content-Length"): to_remove.add("Content-Length") server.encoders.append(ChunkStream.ChunkStream(server)) remove_headers(server.headers, to_remove) # only decompress on rewrite if not rewrite: return bytes_remaining # Compressed content (uncompress only for rewriting modules) encoding = server.headers.get('Content-Encoding', '').lower() # note: do not gunzip .gz files if encoding in ('gzip', 'x-gzip', 'deflate') and \ (filename is None or not filename.endswith(".gz")): if encoding == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: server.decoders.append(GunzipStream.GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.append('Cache-Control') remove_headers(server.headers, to_remove) # add warning server.headers['Warning'] = "214 Transformation applied\r" elif encoding and encoding!='identity': wc.log.warn(wc.LOG_PROXY, _("unsupported encoding: %r"), encoding) # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' return bytes_remaining
|
elif encoding and encoding!='identity': wc.log.warn(wc.LOG_PROXY, _("unsupported encoding: %r"), encoding) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r'
|
remove_headers(server.headers, to_remove)
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() # remove content length if rewrite: to_remove.add('Content-Length') # add decoders if server.headers.has_key('Transfer-Encoding'): # chunked encoded tenc = server.headers['Transfer-Encoding'] if tenc != 'chunked': wc.log.warn(wc.LOG_PROXY, "unknown transfer encoding %r, assuming chunked encoding", tenc) server.decoders.append(UnchunkStream.UnchunkStream(server)) server.encoders.append(ChunkStream.ChunkStream(server)) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'chunked encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None elif rewrite: # To make pipelining possible, enable chunked encoding. server.headers['Transfer-Encoding'] = "chunked\r" if server.headers.has_key("Content-Length"): to_remove.add("Content-Length") server.encoders.append(ChunkStream.ChunkStream(server)) remove_headers(server.headers, to_remove) # only decompress on rewrite if not rewrite: return bytes_remaining # Compressed content (uncompress only for rewriting modules) encoding = server.headers.get('Content-Encoding', '').lower() # note: do not gunzip .gz files if encoding in ('gzip', 'x-gzip', 'deflate') and \ (filename is None or not filename.endswith(".gz")): if encoding == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: server.decoders.append(GunzipStream.GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.append('Cache-Control') remove_headers(server.headers, to_remove) # add warning server.headers['Warning'] = "214 Transformation applied\r" elif encoding and encoding!='identity': wc.log.warn(wc.LOG_PROXY, _("unsupported encoding: %r"), encoding) # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' return bytes_remaining
|
hostset.add(expand_ip(host))
|
hostset.add(expand_ip(host)[0])
|
def hosts2map (hosts): """return a set of named hosts, and a list of subnets (host/netmask adresses). Only IPv4 host/netmasks are supported. """ hostset = Set() nets = [] for host in hosts: if _host_bitmask_re.match(host): host, mask = host.split("/") mask = int(mask) if not is_valid_bitmask(mask): error(PROXY, "bitmask %d is not a valid network mask", mask) continue if not is_valid_ipv4(host): error(PROXY, "host %s is not a valid ip address", host) continue nets.append(dq2net(host, suffix2mask(mask))) elif _host_netmask_re.match(host): host, mask = host.split("/") if not is_valid_ipv4(host): error(PROXY, "host %s is not a valid ip address", host) continue if not is_valid_ipv4(mask): error(PROXY, "mask %s is not a valid ip network mask", mask) continue nets.append(dq2net(host, dq2mask(mask))) elif is_valid_ip(host): hostset.add(expand_ip(host)) else: try: ips = resolve_host(host) for i in ips: hostset.add(i) except socket.gaierror: pass return (hostset, nets)
|
ips = resolve_host(host) for i in ips: hostset.add(i)
|
hostset |= resolve_host(host)
|
def hosts2map (hosts): """return a set of named hosts, and a list of subnets (host/netmask adresses). Only IPv4 host/netmasks are supported. """ hostset = Set() nets = [] for host in hosts: if _host_bitmask_re.match(host): host, mask = host.split("/") mask = int(mask) if not is_valid_bitmask(mask): error(PROXY, "bitmask %d is not a valid network mask", mask) continue if not is_valid_ipv4(host): error(PROXY, "host %s is not a valid ip address", host) continue nets.append(dq2net(host, suffix2mask(mask))) elif _host_netmask_re.match(host): host, mask = host.split("/") if not is_valid_ipv4(host): error(PROXY, "host %s is not a valid ip address", host) continue if not is_valid_ipv4(mask): error(PROXY, "mask %s is not a valid ip network mask", mask) continue nets.append(dq2net(host, dq2mask(mask))) elif is_valid_ip(host): hostset.add(expand_ip(host)) else: try: ips = resolve_host(host) for i in ips: hostset.add(i) except socket.gaierror: pass return (hostset, nets)
|
item = form[key] if isinstance(item, list): item = item[0] elif hasattr(item, "value"): item = item.value return item.decode(charset)
|
return get_item_value(form[key])
|
def getval (form, key): """return a formfield value""" if not form.has_key(key): return u'' item = form[key] if isinstance(item, list): item = item[0] elif hasattr(item, "value"): item = item.value return item.decode(charset)
|
item = form[key] if isinstance(item, list): l = [x.value for x in item] elif hasattr(item, "value"): l = [item.value] else: l = [item] return [ x.decode(charset) for x in l ]
|
return get_item_list(form[key]) def get_prefix_vals (form, prefix): """return a list of (key, value) pairs where ``prefix+key'' is a valid form field""" res = [] for key, item in form.items(): if key.startswith(prefix): res.append(key[len(prefix):], get_item_value(item)) return res
|
def getlist (form, key): """return a list of formfield values""" if not form.has_key(key): return [] item = form[key] if isinstance(item, list): l = [x.value for x in item] elif hasattr(item, "value"): l = [item.value] else: l = [item] return [ x.decode(charset) for x in l ]
|
if ct != self.mime:
|
if ct is None:
|
def check_headers (self): """add missing content-type and/or encoding headers""" # 304 Not Modified does not send any type or encoding info, # because this info was cached if self.statuscode == '304': return # check content-type against our own guess i = self.document.find('?') if i>0: document = self.document[:i] else: document = self.document gm = mimetypes.guess_type(document, None) ct = self.headers.get('Content-Type', None) if self.mime: if ct != self.mime: warn(PROXY, i18n._("set Content-Type from %s to %s in %s"), `str(ct)`, `self.mime`, `self.url`) self.headers['Content-Type'] = "%s\r"%self.mime elif gm[0]: # guessed an own content type if ct is None: warn(PROXY, i18n._("add Content-Type %s to %s"), `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] # fix some content types elif not ct.startswith(gm[0]) and \ gm[0] in _fix_content_types: warn(PROXY, i18n._("change Content-Type from %s to %s in %s"), `ct`, `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] if gm[1] and gm[1] in _fix_content_encodings: ce = self.headers.get('Content-Encoding', None) # guessed an own encoding type if ce is None: self.headers['Content-Encoding'] = "%s\r"%gm[1] warn(PROXY, i18n._("add Content-Encoding %s to %s"), `gm[1]`, `self.url`) elif ce != gm[1]: warn(PROXY, i18n._("change Content-Encoding from %s to %s in %s"), `ce`, `gm[1]`, `self.url`) self.headers['Content-Encoding'] = "%s\r"%gm[1] # hmm, fix application/x-httpd-php* if self.headers.get('Content-Type', '').lower().startswith('application/x-httpd-php'): warn(PROXY, i18n._("fix x-httpd-php Content-Type")) self.headers['Content-Type'] = 'text/html\r'
|
self.js_filter = opts['javascript'] and jslib
|
self.javascript = opts['javascript'] and jslib
|
def __init__ (self, opts): self.js_filter = opts['javascript'] and jslib self.js_html = None self.js_src = False self.js_script = '' if self.js_filter: self.js_env = jslib.new_jsenv() self.js_output = 0 self.js_popup = 0
|
if self.js_filter:
|
if self.javascript:
|
def __init__ (self, opts): self.js_filter = opts['javascript'] and jslib self.js_html = None self.js_src = False self.js_script = '' if self.js_filter: self.js_env = jslib.new_jsenv() self.js_output = 0 self.js_popup = 0
|
error(FILTER, "waited too long for %s"%self.state[1]) self.js_client.finish() self.js_html = None
|
def flush (self): self._debug("flush") if self.waited > 100: # waited too long; stop js background downloader and # switch back to parse error(FILTER, "waited too long for %s"%self.state[1]) if self.js_env.hasListener(self): self.js_env.detachListener(self) self.js_html = None self.state = ('parse',) self.feed("") # will replay() buffered data elif self.state[0]=='wait': # flushing in wait state raises a filter exception self.waited += 1 raise FilterWait("HtmlParser[%d,wait]: waited %d times for %s"%\ (self.level, self.waited, self.state[1])) self.parser.flush()
|
|
error(FILTER, "waited too long for %s"%self.state[1]) if self.js_env.hasListener(self): self.js_env.detachListener(self) self.js_html = None
|
def flush (self): self._debug("flush") if self.waited > 100: # waited too long; stop js background downloader and # switch back to parse error(FILTER, "waited too long for %s"%self.state[1]) if self.js_env.hasListener(self): self.js_env.detachListener(self) self.js_html = None self.state = ('parse',) self.feed("") # will replay() buffered data elif self.state[0]=='wait': # flushing in wait state raises a filter exception self.waited += 1 raise FilterWait("HtmlParser[%d,wait]: waited %d times for %s"%\ (self.level, self.waited, self.state[1])) self.parser.flush()
|
|
elif self.js_filter:
|
elif self.javascript:
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %s", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state[0]=='wait': self.waitbuf.append(item) return rulelist = [] filtered = False if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] elif tag=="base" and attrs.has_key('href'): self.base_url = strip_quotes(attrs['href']) self._debug("using base url %s", `self.base_url`) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self)
|
if not self.rulestack and not self.js_filter:
|
if not self.rulestack and not self.javascript:
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %s", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state[0]=='wait': self.waitbuf.append(item) return rulelist = [] filtered = False if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] elif tag=="base" and attrs.has_key('href'): self.base_url = strip_quotes(attrs['href']) self._debug("using base url %s", `self.base_url`) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self)
|
if self.js_filter and tag=='script':
|
if self.javascript and tag=='script':
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
|
client = HttpProxyClient(self.jsScriptData, (url, ver)) ClientServerMatchmaker(client, "GET %s HTTP/1.1" % url, WcMessage(StringIO('')), '',
|
self.js_client = HttpProxyClient(self.jsScriptData, (url, ver)) ClientServerMatchmaker(self.js_client, "GET %s HTTP/1.1" % url, WcMessage(StringIO('')), '',
|
def jsScriptSrc (self, url, language): """Start a background download for <script src=""> tags""" assert self.state[0]=='parse', "non-parse state %s" % str(self.state) ver = get_js_ver(language) if self.base_url: url = urlparse.urljoin(self.base_url, url) else: url = urlparse.urljoin(self.url, url) url = norm_url(url) if _has_ws(url): warn(PARSER, "HtmlParser[%d]: broken JS url %s at %s", self.level, `url`, `self.url`) return self.state = ('wait', url) self.waited = 1 self.js_src = True client = HttpProxyClient(self.jsScriptData, (url, ver)) ClientServerMatchmaker(client, "GET %s HTTP/1.1" % url, #request WcMessage(StringIO('')), #headers '', #content {'nofilter': None}, # nofilter 'identity', # compress mime = "application/x-javascript", )
|
comments=self.comments, javascript=self.js_filter, level=self.level+1)
|
comments=self.comments, javascript=self.javascript, level=self.level+1)
|
def jsScript (self, script, ver, item): """execute given script with javascript version ver""" self._debug("JS: jsScript %s %s", ver, `script`) assert self.state[0]=='parse', "non-parse state %s" % str(self.state) assert len(self.buf) >= 2, "too small buffer %s" % str(self.buf) self.js_output = 0 self.js_env.attachListener(self) # start recursive html filter (used by jsProcessData) self.js_html = FilterHtmlParser(self.rules, self.pics, self.url, comments=self.comments, javascript=self.js_filter, level=self.level+1) # execute self.js_env.executeScript(unescape_js(script), ver) self.js_env.detachListener(self) # wait for recursive filter to finish self.jsEndScript(item)
|
from urllib import splittype, splithost, splitport
|
from urllib import splittype, splithost, splitnport
|
def fileno(self): return self.socket.fileno()
|
if not host: hostname = "localhost" port = config['port'] else: hostname, port = splitport(host) if port is None: port = 80 else: port = int(port) return scheme, hostname.lower(), port, document
|
port = 80 if host: host = host.lower() host, port = splitnport(host, 80) return scheme, host, port, document
|
def spliturl (url): """split url in a tuple (scheme, hostname, port, document) where hostname is always lowercased""" # XXX this relies on scheme==http! scheme, netloc = splittype(url) host, document = splithost(netloc) if not host: hostname = "localhost" port = config['port'] else: hostname, port = splitport(host) if port is None: port = 80 else: port = int(port) return scheme, hostname.lower(), port, document
|
info.append("Rule removed")
|
info.append(i18n._("Rule removed"))
|
def _form_removerule (rule): curfolder.rules.remove(rule) global currule currule = None info.append("Rule removed")
|
info.append("Attribute added")
|
info.append(i18n._("Rewrite attribute added"))
|
def _form_rewrite_addattr (form): name = getval(form, "attrname").strip() if not name: error.append(i18n._("Empty attribute name")) return value = getval(form, "attrval") currule.attrs[name] = value info.append("Attribute added")
|
info.append("Attributes removed")
|
info.append(i18n._("Rewrite attributes removed"))
|
def _form_rewrite_removeattrs (form): toremove = getlist(form, 'delattr') if toremove: for attr in toremove: del currule.attrs[attr] info.append("Attributes removed")
|
error.append("Empty rule title")
|
error.append(i18n._("Empty rule title"))
|
def _form_rule_titledesc (form): title = getval(form, 'rule_title') if not title: error.append("Empty rule title") return if title!=currule.title: currule.title = title info.append("Rule title changed") desc = getval(form, 'rule_description') if desc!=currule.desc: currule.desc = desc info.append("Rule description changed")
|
info.append("Rule title changed")
|
info.append(i18n._("Rule title changed"))
|
def _form_rule_titledesc (form): title = getval(form, 'rule_title') if not title: error.append("Empty rule title") return if title!=currule.title: currule.title = title info.append("Rule title changed") desc = getval(form, 'rule_description') if desc!=currule.desc: currule.desc = desc info.append("Rule description changed")
|
info.append("Rule description changed")
|
info.append(i18n._("Rule description changed"))
|
def _form_rule_titledesc (form): title = getval(form, 'rule_title') if not title: error.append("Empty rule title") return if title!=currule.title: currule.title = title info.append("Rule title changed") desc = getval(form, 'rule_description') if desc!=currule.desc: currule.desc = desc info.append("Rule description changed")
|
info.append("Rule match url changed")
|
info.append(i18n._("Rule match url changed"))
|
def _form_rule_matchurl (form): matchurl = getval(form, 'rule_matchurl').strip() if matchurl!=currule.matchurl: currule.matchurl = matchurl info.append("Rule match url changed") dontmatchurl = getval(form, 'rule_dontmatchurl').strip() if dontmatchurl!=currule.dontmatchurl: currule.dontmatchurl = dontmatchurl info.append("Rule dontmatch url changed")
|
info.append("Rule dontmatch url changed")
|
info.append(i18n._("Rule dontmatch url changed"))
|
def _form_rule_matchurl (form): matchurl = getval(form, 'rule_matchurl').strip() if matchurl!=currule.matchurl: currule.matchurl = matchurl info.append("Rule match url changed") dontmatchurl = getval(form, 'rule_dontmatchurl').strip() if dontmatchurl!=currule.dontmatchurl: currule.dontmatchurl = dontmatchurl info.append("Rule dontmatch url changed")
|
info.append("Rule url scheme changed")
|
info.append(i18n._("Rule url scheme changed"))
|
def _form_rule_urlparts (form): scheme = getval(form, 'rule_urlscheme').strip() if scheme!=currule.scheme: currule.scheme = scheme info.append("Rule url scheme changed") host = getval(form, 'rule_urlhost').strip() if host!=currule.host: currule.host = host info.append("Rule url host changed") port = getval(form, 'rule_urlport').strip() if port!=currule.port: currule.port = port info.append("Rule url port changed") path = getval(form, 'rule_urlpath').strip() if path!=currule.path: currule.path = path info.append("Rule url path changed") parameters = getval(form, 'rule_urlparameters').strip() if parameters!=currule.parameters: currule.parameters = parameters info.append("Rule url parameters changed") query = getval(form, 'rule_urlquery').strip() if query!=currule.query: currule.query = query info.append("Rule url query changed") fragment = getval(form, 'rule_urlfragment').strip() if fragment!=currule.fragment: currule.fragment = fragment info.append("Rule url fragment changed")
|
info.append("Rule url host changed")
|
info.append(i18n._("Rule url host changed"))
|
def _form_rule_urlparts (form): scheme = getval(form, 'rule_urlscheme').strip() if scheme!=currule.scheme: currule.scheme = scheme info.append("Rule url scheme changed") host = getval(form, 'rule_urlhost').strip() if host!=currule.host: currule.host = host info.append("Rule url host changed") port = getval(form, 'rule_urlport').strip() if port!=currule.port: currule.port = port info.append("Rule url port changed") path = getval(form, 'rule_urlpath').strip() if path!=currule.path: currule.path = path info.append("Rule url path changed") parameters = getval(form, 'rule_urlparameters').strip() if parameters!=currule.parameters: currule.parameters = parameters info.append("Rule url parameters changed") query = getval(form, 'rule_urlquery').strip() if query!=currule.query: currule.query = query info.append("Rule url query changed") fragment = getval(form, 'rule_urlfragment').strip() if fragment!=currule.fragment: currule.fragment = fragment info.append("Rule url fragment changed")
|
info.append("Rule url port changed")
|
info.append(i18n._("Rule url port changed"))
|
def _form_rule_urlparts (form): scheme = getval(form, 'rule_urlscheme').strip() if scheme!=currule.scheme: currule.scheme = scheme info.append("Rule url scheme changed") host = getval(form, 'rule_urlhost').strip() if host!=currule.host: currule.host = host info.append("Rule url host changed") port = getval(form, 'rule_urlport').strip() if port!=currule.port: currule.port = port info.append("Rule url port changed") path = getval(form, 'rule_urlpath').strip() if path!=currule.path: currule.path = path info.append("Rule url path changed") parameters = getval(form, 'rule_urlparameters').strip() if parameters!=currule.parameters: currule.parameters = parameters info.append("Rule url parameters changed") query = getval(form, 'rule_urlquery').strip() if query!=currule.query: currule.query = query info.append("Rule url query changed") fragment = getval(form, 'rule_urlfragment').strip() if fragment!=currule.fragment: currule.fragment = fragment info.append("Rule url fragment changed")
|
info.append("Rule url path changed")
|
info.append(i18n._("Rule url path changed"))
|
def _form_rule_urlparts (form): scheme = getval(form, 'rule_urlscheme').strip() if scheme!=currule.scheme: currule.scheme = scheme info.append("Rule url scheme changed") host = getval(form, 'rule_urlhost').strip() if host!=currule.host: currule.host = host info.append("Rule url host changed") port = getval(form, 'rule_urlport').strip() if port!=currule.port: currule.port = port info.append("Rule url port changed") path = getval(form, 'rule_urlpath').strip() if path!=currule.path: currule.path = path info.append("Rule url path changed") parameters = getval(form, 'rule_urlparameters').strip() if parameters!=currule.parameters: currule.parameters = parameters info.append("Rule url parameters changed") query = getval(form, 'rule_urlquery').strip() if query!=currule.query: currule.query = query info.append("Rule url query changed") fragment = getval(form, 'rule_urlfragment').strip() if fragment!=currule.fragment: currule.fragment = fragment info.append("Rule url fragment changed")
|
info.append("Rule url parameters changed")
|
info.append(i18n._("Rule url parameters changed"))
|
def _form_rule_urlparts (form): scheme = getval(form, 'rule_urlscheme').strip() if scheme!=currule.scheme: currule.scheme = scheme info.append("Rule url scheme changed") host = getval(form, 'rule_urlhost').strip() if host!=currule.host: currule.host = host info.append("Rule url host changed") port = getval(form, 'rule_urlport').strip() if port!=currule.port: currule.port = port info.append("Rule url port changed") path = getval(form, 'rule_urlpath').strip() if path!=currule.path: currule.path = path info.append("Rule url path changed") parameters = getval(form, 'rule_urlparameters').strip() if parameters!=currule.parameters: currule.parameters = parameters info.append("Rule url parameters changed") query = getval(form, 'rule_urlquery').strip() if query!=currule.query: currule.query = query info.append("Rule url query changed") fragment = getval(form, 'rule_urlfragment').strip() if fragment!=currule.fragment: currule.fragment = fragment info.append("Rule url fragment changed")
|
info.append("Rule url query changed")
|
info.append(i18n._("Rule url query changed"))
|
def _form_rule_urlparts (form): scheme = getval(form, 'rule_urlscheme').strip() if scheme!=currule.scheme: currule.scheme = scheme info.append("Rule url scheme changed") host = getval(form, 'rule_urlhost').strip() if host!=currule.host: currule.host = host info.append("Rule url host changed") port = getval(form, 'rule_urlport').strip() if port!=currule.port: currule.port = port info.append("Rule url port changed") path = getval(form, 'rule_urlpath').strip() if path!=currule.path: currule.path = path info.append("Rule url path changed") parameters = getval(form, 'rule_urlparameters').strip() if parameters!=currule.parameters: currule.parameters = parameters info.append("Rule url parameters changed") query = getval(form, 'rule_urlquery').strip() if query!=currule.query: currule.query = query info.append("Rule url query changed") fragment = getval(form, 'rule_urlfragment').strip() if fragment!=currule.fragment: currule.fragment = fragment info.append("Rule url fragment changed")
|
info.append("Rule url fragment changed")
|
info.append(i18n._("Rule url fragment changed"))
|
def _form_rule_urlparts (form): scheme = getval(form, 'rule_urlscheme').strip() if scheme!=currule.scheme: currule.scheme = scheme info.append("Rule url scheme changed") host = getval(form, 'rule_urlhost').strip() if host!=currule.host: currule.host = host info.append("Rule url host changed") port = getval(form, 'rule_urlport').strip() if port!=currule.port: currule.port = port info.append("Rule url port changed") path = getval(form, 'rule_urlpath').strip() if path!=currule.path: currule.path = path info.append("Rule url path changed") parameters = getval(form, 'rule_urlparameters').strip() if parameters!=currule.parameters: currule.parameters = parameters info.append("Rule url parameters changed") query = getval(form, 'rule_urlquery').strip() if query!=currule.query: currule.query = query info.append("Rule url query changed") fragment = getval(form, 'rule_urlfragment').strip() if fragment!=currule.fragment: currule.fragment = fragment info.append("Rule url fragment changed")
|
info.append("Rule blocked url changed")
|
info.append(i18n._("Rule blocked url changed"))
|
def _form_apply_block (form): _form_rule_urlparts(form) url = getval(form, 'rule_blockedurl').strip() if url!=currule.url: currule.url = url info.append("Rule blocked url changed")
|
error.append("Empty header rule name")
|
error.append(i18n._("Empty header rule name"))
|
def _form_apply_header (form): _form_rule_matchurl(form) name = getval(form, 'rule_headername').strip() if not name: error.append("Empty header rule name") elif name!=currule.name: currule.name = name info.append("Rule header name changed") value = getval(form, 'rule_headervalue').strip() if value!=currule.value: currule.value = value info.append("Rule header value changed")
|
info.append("Rule header name changed")
|
info.append(i18n._("Rule header name changed"))
|
def _form_apply_header (form): _form_rule_matchurl(form) name = getval(form, 'rule_headername').strip() if not name: error.append("Empty header rule name") elif name!=currule.name: currule.name = name info.append("Rule header name changed") value = getval(form, 'rule_headervalue').strip() if value!=currule.value: currule.value = value info.append("Rule header value changed")
|
info.append("Rule header value changed")
|
info.append(i18n._("Rule header value changed"))
|
def _form_apply_header (form): _form_rule_matchurl(form) name = getval(form, 'rule_headername').strip() if not name: error.append("Empty header rule name") elif name!=currule.name: currule.name = name info.append("Rule header name changed") value = getval(form, 'rule_headervalue').strip() if value!=currule.value: currule.value = value info.append("Rule header value changed")
|
error.append("Invalid image width value")
|
error.append(i18n._("Invalid image width value"))
|
def _form_apply_image (form): _form_rule_matchurl(form) width = getval(form, 'rule_imgwidth').strip() try: width = int(width) except ValueError: error.append("Invalid image width value") return if width!=currule.width: currule.width = width info.append("Rule image width changed") height = getval(form, 'rule_imgheight').strip() try: height = int(height) except ValueError: error.append("Invalid image height value") return if height!=currule.height: currule.height = height info.append("Rule image height changed") # XXX todo: image types
|
info.append("Rule image width changed")
|
info.append(i18n._("Rule image width changed"))
|
def _form_apply_image (form): _form_rule_matchurl(form) width = getval(form, 'rule_imgwidth').strip() try: width = int(width) except ValueError: error.append("Invalid image width value") return if width!=currule.width: currule.width = width info.append("Rule image width changed") height = getval(form, 'rule_imgheight').strip() try: height = int(height) except ValueError: error.append("Invalid image height value") return if height!=currule.height: currule.height = height info.append("Rule image height changed") # XXX todo: image types
|
error.append("Invalid image height value")
|
error.append(i18n._("Invalid image height value"))
|
def _form_apply_image (form): _form_rule_matchurl(form) width = getval(form, 'rule_imgwidth').strip() try: width = int(width) except ValueError: error.append("Invalid image width value") return if width!=currule.width: currule.width = width info.append("Rule image width changed") height = getval(form, 'rule_imgheight').strip() try: height = int(height) except ValueError: error.append("Invalid image height value") return if height!=currule.height: currule.height = height info.append("Rule image height changed") # XXX todo: image types
|
info.append("Rule image height changed")
|
info.append(i18n._("Rule image height changed"))
|
def _form_apply_image (form): _form_rule_matchurl(form) width = getval(form, 'rule_imgwidth').strip() try: width = int(width) except ValueError: error.append("Invalid image width value") return if width!=currule.width: currule.width = width info.append("Rule image width changed") height = getval(form, 'rule_imgheight').strip() try: height = int(height) except ValueError: error.append("Invalid image height value") return if height!=currule.height: currule.height = height info.append("Rule image height changed") # XXX todo: image types
|
print "XXX apply pics"
|
for service in pics_services: if form.has_key("service_%s"%service): if not currule.ratings.has_key(service): currule.ratings[service] = {} for category in pics_categories[service]: currule.ratings[service][category] = 0 info.append(i18n._("PICS service %s enabled") % \ pics_data[service]['name']) else: if currule.ratings.has_key(service): del currule.ratings[service] info.append(i18n._("PICS service %s disabled") % \ pics_data[service]['name']) if currule.ratings.has_key(service): for category in pics_categories[service]: if form.has_key("category_%s_%s" % (service, category)): if not currule.ratings[service][category]: currule.ratings[service][category] = 1 info.append(i18n._("PICS service %s, category %s enabled") %\ (pics_data[service]['name'], category)) else: if currule.ratings[service][category]: currule.ratings[service][category] = 0 info.append(i18n._("PICS service %s, category %s disabled") %\ (pics_data[service]['name'], category))
|
def _form_apply_pics (form): _form_rule_matchurl(form) print "XXX apply pics"
|
info.append("Rule replace search changed")
|
info.append(i18n._("Rule replace search changed"))
|
def _form_apply_replace (form): _form_rule_matchurl(form) # note: do not strip() the search and replace form values search = getval(form, 'rule_search') if search!=currule.search: currule.search = search info.append("Rule replace search changed") replace = getval(form, 'rule_replace') if replace!=currule.replace: currule.replace = replace info.append("Rule replacement changed")
|
info.append("Rule replacement changed")
|
info.append(i18n._("Rule replacement changed"))
|
def _form_apply_replace (form): _form_rule_matchurl(form) # note: do not strip() the search and replace form values search = getval(form, 'rule_search') if search!=currule.search: currule.search = search info.append("Rule replace search changed") replace = getval(form, 'rule_replace') if replace!=currule.replace: currule.replace = replace info.append("Rule replacement changed")
|
extra = self.persistent and "persistent " or ""
|
extra = "" if self.persistent: extra += "persistent " if self.server: extra += "server "
|
def __repr__ (self): """object representation""" extra = self.persistent and "persistent " or "" if self.request: try: extra += self.request.split()[1] except IndexError: extra += '???'+self.request else: extra += 'being read' return '<%s:%-8s %s>' % ('client', self.state, extra)
|
assert self.server, "%s server_content(%s) had no server" % \
|
assert self.server, "%s server_content(%r) had no server" % \
|
def server_content (self, data): """The server received some content. Write it to the client.""" assert self.server, "%s server_content(%s) had no server" % \ (self, data) if data: self.write(data)
|
super(HttpClient, self).handle_close()
|
def handle_close (self): """The client closed the connection, so cancel the server connection""" wc.log.debug(wc.LOG_PROXY, '%s handle_close', self) self.send_buffer = '' super(HttpClient, self).handle_close() if self.server: self.server.client_abort() self.server = None # If there isn't a server, then it's in the process of # doing DNS lookup or connecting. The matchmaker will # check to see if the client is still connected.
|
|
self.headers['Host'] += "%s:%d\r"%(hostname, port)
|
self.headers['Host'] = "%s:%d\r"%(hostname, port)
|
def __init__ (self, client, request, headers, content, nofilter, compress, mime=None): self.client = client self.server = None self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter self.mime = mime debug(PROXY, "ClientServer: %s", `self.request`) self.method, self.url, protocol = self.request.split() # strip leading zeros and other stuff protocol = fix_http_version(protocol) scheme, hostname, port, document = spliturl(self.url) # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not scheme: # default scheme is http scheme = "http" elif scheme != 'http': warn(PROXY, "Forbidden scheme encountered at %s", self.url) client.error(403, i18n._("Forbidden")) return if not hostname and self.headers.has_key('Host'): host = self.headers['Host'] hostname, port = splitnport(host, 80) if not hostname or \ (hostname in config['localhosts'] and port==config['port']): # this is a direct proxy call, delegate it to local handler client.handle_local() return # fix missing trailing / if not document: document = '/' # add missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('Host'): if port!=80: self.headers['Host'] += "%s:%d\r"%(hostname, port) else: self.headers['Host'] = "%s\r"%hostname debug(PROXY, "ClientServer: splitted url %s %s %d %s", scheme, hostname, port, document) # prepare DNS lookup if config['parentproxy']: self.hostname = config['parentproxy'] self.port = config['parentproxyport'] self.document = self.url if config['parentproxycreds']: auth = config['parentproxycreds'] self.headers['Proxy-Authorization'] = "%s\r"%auth else: self.hostname = hostname self.port = port self.document = document # append information for wcheaders tool wc.proxy.HEADERS.append((self.url, 'client', self.headers.items())) # start DNS lookup self.state = 'dns' dns_lookups.background_lookup(self.hostname, self.handle_dns)
|
data = pickle.load(fp)
|
rating_cache = pickle.load(fp)
|
def rating_cache_load (): """load cached rating data from disk or return an empty cache if no cached data is found""" if os.path.isfile(rating_cachefile): fp = file(rating_cachefile) data = pickle.load(fp) fp.close() # remove invalid entries for url in data: if not is_valid_url(url): error(FILTER, "Invalid rating url %r", url) del data[url] return data return {}
|
for url in data:
|
toremove = [] for url in rating_cache:
|
def rating_cache_load (): """load cached rating data from disk or return an empty cache if no cached data is found""" if os.path.isfile(rating_cachefile): fp = file(rating_cachefile) data = pickle.load(fp) fp.close() # remove invalid entries for url in data: if not is_valid_url(url): error(FILTER, "Invalid rating url %r", url) del data[url] return data return {}
|
del data[url] return data return {}
|
toremove.append(url) if toremove: for url in toremove: del rating_cache[url] rating_cache_write()
|
def rating_cache_load (): """load cached rating data from disk or return an empty cache if no cached data is found""" if os.path.isfile(rating_cachefile): fp = file(rating_cachefile) data = pickle.load(fp) fp.close() # remove invalid entries for url in data: if not is_valid_url(url): error(FILTER, "Invalid rating url %r", url) del data[url] return data return {}
|
rating_cache = rating_cache_load()
|
rating_cache = {} rating_cache_load()
|
def rating_cache_load (): """load cached rating data from disk or return an empty cache if no cached data is found""" if os.path.isfile(rating_cachefile): fp = file(rating_cachefile) data = pickle.load(fp) fp.close() # remove invalid entries for url in data: if not is_valid_url(url): error(FILTER, "Invalid rating url %r", url) del data[url] return data return {}
|
_attrs = {}
|
_attrs = wc.containers.ListDict()
|
def dict_attrs (attrs): _attrs = {} for name in attrs.getQNames(): _attrs[name] = attrs.getValueByQName(name) return _attrs
|
self.prefixuri = {} self.uriprefix = {}
|
self.ns_current = [] self.ns_stack = []
|
def __init__ (self, xmlrules, htmlrules, url, localhost): """ Init rules and buffers. """ # filter rules self.xmlrules = xmlrules self.htmlrules = htmlrules self.url = url # XML namespaces {name -> uri} and {uri -> name} self.prefixuri = {} self.uriprefix = {} # already filtered XML data self.outbuf = StringIO() self.tagbuf = [] self.rulestack = [] self.stack = [] self.encoding = "UTF-8"
|
print "XXX setDocumentLocator", locator
|
print >>sys.stderr, "XXX setDocumentLocator", locator
|
def setDocumentLocator (self, locator): print "XXX setDocumentLocator", locator
|
self.prefixuri[prefix] = uri self.uriprefix[uri] = prefix
|
ns = (prefix, uri) self.ns_stack.append(ns) self.ns_current.append(ns)
|
def startPrefixMapping (self, prefix, uri): self.prefixuri[prefix] = uri self.uriprefix[uri] = prefix
|
if prefix in self.prefixuri: uri = self.prefixuri[prefix] del self.uriprefix[uri] del self.prefixuri[prefix] else: self.error("Removing unknown prefix mapping %r" % prefix)
|
if not self.ns_stack or self.ns_stack[-1][0] != prefix: self.error("Removing unknown prefix mapping (%r)" % prefix) del self.ns_stack[-1] def find_namespace (self, uri): for prefix, nsuri in reversed(self.ns_stack): if nsuri == uri: return (prefix, uri) return None
|
def endPrefixMapping (self, prefix): if prefix in self.prefixuri: uri = self.prefixuri[prefix] del self.uriprefix[uri] del self.prefixuri[prefix] else: self.error("Removing unknown prefix mapping %r" % prefix)
|
if not self.stack: for prefix, uri in self.prefixuri.items(): if prefix: attrs[u"xmlns:%s" % prefix] = uri else: attrs[u"xmlns"] = uri
|
for prefix, uri in self.ns_current: if prefix: attrs["xmlns:%s" % prefix] = uri else: attrs["xmlns"] = uri self.ns_current = []
|
def startElement (self, name, attrs): attrs = dict_attrs(attrs) if not self.stack: for prefix, uri in self.prefixuri.items(): if prefix: attrs[u"xmlns:%s" % prefix] = uri else: attrs[u"xmlns"] = uri self.stack.append((wc.filter.xmlfilt.STARTTAG, name, attrs)) item = [wc.filter.xmlfilt.STARTTAG, name, attrs] self.tagbuf.append(item) rulelist = [rule for rule in self.xmlrules \ if rule.match_tag(self.stack)] if rulelist: pos = len(self.tagbuf) self.rulestack.append((pos, rulelist))
|
if name[0]: ns = self.uriprefix[name[0]] if ns: name = u"%s:%s" % (ns, name[1]) else: name = name[1] else: name = name[1] self.startElement(name, attrs)
|
tag = name[1] namespace = self.find_namespace(name[0]) if namespace and namespace[0]: tag = u"%s:%s" % (namespace[0], name[1]) self.startElement(tag, attrs)
|
def startElementNS (self, name, qname, attrs): if name[0]: ns = self.uriprefix[name[0]] if ns: name = u"%s:%s" % (ns, name[1]) else: name = name[1] else: name = name[1] self.startElement(name, attrs)
|
if name[0]: ns = self.uriprefix[name[0]] if ns: name = u"%s:%s" % (ns, name[1]) else: name = name[1] else: name = name[1] self.endElement(name)
|
tag = name[1] namespace = self.find_namespace(name[0]) if namespace and namespace[0]: tag = u"%s:%s" % (namespace[0], name[1]) self.endElement(tag)
|
def endElementNS (self, name, qname): if name[0]: ns = self.uriprefix[name[0]] if ns: name = u"%s:%s" % (ns, name[1]) else: name = name[1] else: name = name[1] self.endElement(name)
|
print "XXX skippedEntity", name
|
print >>sys.stderr, "XXX skippedEntity", name
|
def skippedEntity (self, name): print "XXX skippedEntity", name
|
print "XXX notationDecl", name, publicId, systemId
|
print >>sys.stderr, "XXX notationDecl", name, publicId, systemId
|
def notationDecl (self, name, publicId, systemId): print "XXX notationDecl", name, publicId, systemId
|
print "XXX unparsedEntityDecl", name, publicId, systemId, ndata
|
print >>sys.stderr, "XXX unparsedEntityDecl", name, publicId, systemId, ndata
|
def unparsedEntityDecl (self, name, publicId, systemId, ndata): print "XXX unparsedEntityDecl", name, publicId, systemId, ndata
|
if not path:
|
if not path or path=='/':
|
def norm_url (url): """replace empty paths with / and normalize them""" urlparts = list(urlparse.urlparse(url)) path = urlparts[2] if not path: urlparts[2] = '/' else: # XXX only windows and posix?? # collapse redundant path segments urlparts[2] = os.path.normpath(path).replace('\\', '/') if path.endswith('/'): urlparts[2] += '/' return urlparse.urlunparse(urlparts)
|
if not self.mimelist: return True
|
def applies_to_mime (self, mime): if mime is None: return False if not self.mimelist: return True for ro in self.mimelist: if ro.match(mime): return True return False
|
|
import profile, wc
|
import profile
|
def _main (): """USAGE: test/run.sh test/pconfig.py""" from test import initlog initlog("test/logging.conf") import profile, wc profile.run("config = wc.Configuration()", "filter.prof")
|
os.makedirs(os.path.dirname("downloads/"+target))
|
d = os.path.dirname("downloads/"+target) if not os.path.isdir(d): os.makedirs(d)
|
def geturl (basedir, file, fun, saveas=None): if saveas is not None: target = saveas else: target = file if os.path.exists("downloads/"+target): print "downloads/%s already exists"%target else: print "downloading", basedir+file os.makedirs(os.path.dirname("downloads/"+target)) urldata = urllib2.urlopen(basedir+file) f = open("downloads/"+target, 'w') f.write(urldata.read()) f.close() fun(target)
|
print "remove old data..."
|
print "remove old extracted data..."
|
def remove_old_data (): print "remove old data..." for d in ("extracted", "config/blacklists_new"): if os.path.isdir(d): rm_rf(d)
|
print "read data..." read_blacklists("config/blacklists")
|
def remove_old_data (): print "remove old data..." for d in ("extracted", "config/blacklists_new"): if os.path.isdir(d): rm_rf(d)
|
|
auth = ",".join(creds['NTLM'][0])
|
attrs = { 'host': creds['NTLM'][0]['host'], 'domain': creds['NTLM'][0]['domain'], 'type': NTLMSSP_CHALLENGE, } auth = ",".join(get_challenges(**attrs))
|
def process_headers (self): """read and filter client request headers""" # Two newlines ends headers i = self.recv_buffer.find('\r\n\r\n') if i < 0: return i += 4 # Skip over newline terminator # the first 2 chars are the newline of request fp = StringIO(self.read(i)[2:]) msg = WcMessage(fp) # put unparsed data (if any) back to the buffer msg.rewindbody() self.recv_buffer = fp.read() + self.recv_buffer fp.close() debug(PROXY, "%s client headers \n%s", self, msg) filters = [FILTER_REQUEST_HEADER, FILTER_REQUEST_DECODE, FILTER_REQUEST_MODIFY, FILTER_REQUEST_ENCODE, ] self.attrs['headers'] = msg self.set_persistent(msg, self.http_ver) self.mangle_request_headers(msg) self.compress = client_set_encoding_headers(msg) # filter headers self.headers = applyfilter(FILTER_REQUEST_HEADER, msg, "finish", self.attrs) # add decoders self.decoders = [] self.bytes_remaining = get_content_length(self.headers) # chunked encoded if self.headers.has_key('Transfer-Encoding'): # XXX don't look at value, assume chunked encoding for now debug(PROXY, '%s Transfer-encoding %r', self, self.headers['Transfer-encoding']) self.decoders.append(UnchunkStream()) client_remove_encoding_headers(self.headers) self.bytes_remaining = None if self.bytes_remaining is None: self.persistent = False if not self.hostname and self.headers.has_key('Host'): if self.method=='CONNECT': defaultport = 443 else: defaultport = 80 host = self.headers['Host'] self.hostname, self.port = splitnport(host, defaultport) if not self.hostname: error(PROXY, "%s missing hostname in request", self) self.error(400, i18n._("Bad Request")) # local request? if self.hostname in config['localhosts'] and self.port==config['port']: # this is a direct proxy call, jump directly to content self.state = 'content' return # add missing host headers for HTTP/1.1 if not self.headers.has_key('Host'): warn(PROXY, "%s request without Host header encountered", self) if self.port!=80: self.headers['Host'] = "%s:%d\r"%(self.hostname, self.port) else: self.headers['Host'] = "%s\r"%self.hostname if config["proxyuser"]: creds = get_header_credentials(self.headers, 'Proxy-Authorization') if not creds: auth = ", ".join(get_challenges()) self.error(407, i18n._("Proxy Authentication Required"), auth=auth) return if 'NTLM' in creds: if creds['NTLM'][0]['type']==NTLMSSP_NEGOTIATE: auth = ",".join(creds['NTLM'][0]) self.error(407, i18n._("Proxy Authentication Required"), auth=auth) return # XXX the data=None argument should hold POST data if not check_credentials(creds, username=config['proxyuser'], password_b64=config['proxypass'], uri=get_auth_uri(self.url), method=self.method, data=None): warn(AUTH, "Bad proxy authentication from %s", self.addr[0]) auth = ", ".join(get_challenges()) self.error(407, i18n._("Proxy Authentication Required"), auth=auth) return if self.method in ['OPTIONS', 'TRACE'] and \ client_get_max_forwards(self.headers)==0: # XXX display options ? self.state = 'done' ServerHandleDirectly(self, '%s 200 OK'%self.protocol, 200, WcMessage(StringIO('Content-Type: text/plain\r\n\r\n')), '') return self.state = 'content'
|
raise RatingParseError("Invalid rating url %s." % repr(url))
|
raise wc.filter.rating.RatingParseError( "Invalid rating url %s." % repr(url))
|
def check_url (self, url): if not wc.url.is_safe_url(url): raise RatingParseError("Invalid rating url %s." % repr(url))
|
self.ratings = []
|
self.ratings = {} for category in wc.filter.rating.categories: self.ratings[category.name] = None
|
def __init__ (self, sid=None, titles=None, descriptions=None, disable=0, matchurls=None, nomatchurls=None): super(RatingRule, self).__init__(sid=sid, titles=titles, descriptions=descriptions, disable=disable, matchurls=matchurls, nomatchurls=nomatchurls) # list of RuleRating objects self.ratings = [] self.url = ""
|
self.ratings.append((self._category, self._data))
|
self.ratings[self._category] = self._data
|
def end_data (self, name): super(RatingRule, self).end_data(name) if name == 'category': assert self._category self.ratings.append((self._category, self._data)) pass elif name == 'url': self.url = self._data
|
if mime is not None and not attrs['mime'].startswith(mime):
|
origmime = attrs['mime'] if mime is not None and origmime is not None and \ not origmime.startswith(mime):
|
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 wc.log.debug(wc.LOG_FILTER, "MIME recognize %d bytes of data", buf.tell()) try: mime = wc.magic.classify(buf) wc.log.debug(wc.LOG_FILTER, "MIME recognized %r", mime) if mime is not None and not attrs['mime'].startswith(mime): wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r at %r", attrs['mime'], mime, attrs['url']) attrs['mime'] = mime attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() return data
|
attrs['mime'], mime, attrs['url'])
|
origmime, mime, attrs['url'])
|
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 wc.log.debug(wc.LOG_FILTER, "MIME recognize %d bytes of data", buf.tell()) try: mime = wc.magic.classify(buf) wc.log.debug(wc.LOG_FILTER, "MIME recognized %r", mime) if mime is not None and not attrs['mime'].startswith(mime): wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r at %r", attrs['mime'], mime, attrs['url']) attrs['mime'] = mime attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() return data
|
self['noproxyfor'] = [{}, [], {}] self['allowedhosts'] = [{}, [], {}]
|
self['noproxyfor'] = None self['allowedhosts'] = None
|
def reset (self): """Reset to default values""" self['port'] = 8080 self['proxyuser'] = "" self['proxypass'] = "" self['parentproxy'] = "" self['parentproxyport'] = 3128 self['parentproxyuser'] = "" self['parentproxypass'] = "" self['logfile'] = "" self['strict_whitelist'] = 0 self['debuglevel'] = 0 self['rules'] = [] self['filters'] = [] self['filterlist'] = [[],[],[],[],[],[],[],[],[],[]] self['colorize'] = 0 self['noproxyfor'] = [{}, [], {}] self['allowedhosts'] = [{}, [], {}] self['starttime'] = time.time() self['requests'] = {'valid':0, 'error':0, 'blocked':0} self['local_sockets_only'] = 0 self['localip'] = socket.gethostbyname(socket.gethostname()) self['mime_content_rewriting'] = [] self['headersave'] = 100 self['showerrors'] = None
|
if self.config['noproxyfor']:
|
if self.config['noproxyfor'] is not None:
|
def start_element (self, name, attrs): if name=='webcleaner': for key,val in attrs.items(): self.config[str(key)] = unxmlify(val) for key in ('port','parentproxyport', 'debuglevel','colorize','showerrors', 'strict_whitelist'): self.config[key] = int(self.config[key]) for key in ('version', 'parentproxy', 'logfile', 'proxyuser', 'proxypass', 'parentproxyuser', 'parentproxypass', ): if self.config[key] is not None: self.config[key] = str(self.config[key]) if self.config['noproxyfor']: strhosts = str(self.config['noproxyfor']) self.config['noproxyfor'] = host_set(strhosts) if self.config['allowedhosts']: strhosts = str(self.config['allowedhosts']) self.config['allowedhosts'] = host_set(strhosts) if self.config['logfile'] == '<stdout>': self.config['logfile'] = sys.stdout elif self.config['logfile']: self.config['logfile'] = open(self.config['logfile'], 'a') elif name=='filter': debug(BRING_IT_ON, "enable filter module %s" % attrs['name']) self.config['filters'].append(attrs['name'])
|
if self.config['allowedhosts']:
|
else: self.config['noproxyfor'] = [{}, [], {}] if self.config['allowedhosts'] is not None:
|
def start_element (self, name, attrs): if name=='webcleaner': for key,val in attrs.items(): self.config[str(key)] = unxmlify(val) for key in ('port','parentproxyport', 'debuglevel','colorize','showerrors', 'strict_whitelist'): self.config[key] = int(self.config[key]) for key in ('version', 'parentproxy', 'logfile', 'proxyuser', 'proxypass', 'parentproxyuser', 'parentproxypass', ): if self.config[key] is not None: self.config[key] = str(self.config[key]) if self.config['noproxyfor']: strhosts = str(self.config['noproxyfor']) self.config['noproxyfor'] = host_set(strhosts) if self.config['allowedhosts']: strhosts = str(self.config['allowedhosts']) self.config['allowedhosts'] = host_set(strhosts) if self.config['logfile'] == '<stdout>': self.config['logfile'] = sys.stdout elif self.config['logfile']: self.config['logfile'] = open(self.config['logfile'], 'a') elif name=='filter': debug(BRING_IT_ON, "enable filter module %s" % attrs['name']) self.config['filters'].append(attrs['name'])
|
if not attrs.has_key('buffer') or attrs['buffer'].closed:
|
if not attrs.has_key('buffer'): return data if attrs['blocked']: return '' if attrs['buffer'].closed:
|
def filter (self, data, **attrs): if not attrs.has_key('buffer') or attrs['buffer'].closed: # we do not block this image # or we do not have enough buffer data yet return data buf = attrs['buffer'] buf.write(data) if buf.tell() > self.min_bufsize: if self.check_sizes(buf, attrs['sizes']): # size is ok data = buf.getvalue() buf.close() return data return ''
|
if self.check_sizes(buf, attrs['sizes']): data = buf.getvalue() buf.close() return data
|
attrs['blocked'] = not self.check_sizes(buf, attrs['sizes'], attrs['url']) data = buf.getvalue() buf.close() if attrs['blocked']: return self.blockdata return data
|
def filter (self, data, **attrs): if not attrs.has_key('buffer') or attrs['buffer'].closed: # we do not block this image # or we do not have enough buffer data yet return data buf = attrs['buffer'] buf.write(data) if buf.tell() > self.min_bufsize: if self.check_sizes(buf, attrs['sizes']): # size is ok data = buf.getvalue() buf.close() return data return ''
|
if not attrs.has_key('buffer') or attrs['buffer'].closed:
|
if not attrs.has_key('buffer'): return data if attrs['blocked']: return '' if attrs['buffer'].closed:
|
def finish (self, data, **attrs): if not attrs.has_key('buffer') or attrs['buffer'].closed: # we do not block this image return data buf = attrs['buffer'] buf.write(data) if self.check_sizes(buf, attrs['sizes']): # size is ok data = buf.getvalue() buf.close() return data return ''
|
if self.check_sizes(buf, attrs['sizes']): data = buf.getvalue() buf.close() return data return ''
|
attrs['blocked'] = not self.check_sizes(buf, attrs['sizes'], attrs['url']) data = buf.getvalue() buf.close() if attrs['blocked']: return self.blockdata return data
|
def finish (self, data, **attrs): if not attrs.has_key('buffer') or attrs['buffer'].closed: # we do not block this image return data buf = attrs['buffer'] buf.write(data) if self.check_sizes(buf, attrs['sizes']): # size is ok data = buf.getvalue() buf.close() return data return ''
|
def check_sizes (self, buf, sizes):
|
def check_sizes (self, buf, sizes, url):
|
def check_sizes (self, buf, sizes): try: buf.seek(0) img = Image.open(buf, 'r') for size, formats in sizes: if size==img.size: # size matches, look for format restriction if not formats: return False elif img.format.lower() in formats: return False except IOError: exception(FILTER, "Could not get image size") return True
|
exception(FILTER, "Could not get image size")
|
exception(FILTER, "Could not get image size from %s", url)
|
def check_sizes (self, buf, sizes): try: buf.seek(0) img = Image.open(buf, 'r') for size, formats in sizes: if size==img.size: # size matches, look for format restriction if not formats: return False elif img.format.lower() in formats: return False except IOError: exception(FILTER, "Could not get image size") return True
|
replace = re.sub(r"\$(\d)", r"\\1", replace)
|
replace = re.sub(r"\${?(\d)}?", r"\\\1", replace)
|
def convert_adzapper_replace (replace): # replace Perl back references with Python ones replace = re.sub(r"\$(\d)", r"\\1", replace) return replace
|
wc.log.info(wc.LOG_PROXY, '%s connect timed out', self)
|
wc.log.debug(wc.LOG_PROXY, '%s connect timed out', self)
|
def check_connect (self, addr): """ Check if the connection is etablished. See also http://cr.yp.to/docs/connect.html and connect(2) manpage. """ wc.log.debug(wc.LOG_PROXY, '%s check connect', self) self.connect_checks += 1 if self.connect_checks >= 50: wc.log.info(wc.LOG_PROXY, '%s connect timed out', self) self.handle_close() return try: (r, w, e) = select.select([], [self.fileno()], [], 0.2) except select.error, why: # not yet ready wc.log.debug(wc.LOG_PROXY, '%s connect error %s', self, str(why)) wc.proxy.make_timer(0.2, lambda a=addr: self.check_connect(addr)) return if self.fileno() not in w: # not yet ready wc.log.debug(wc.LOG_PROXY, '%s not writable', self) wc.proxy.make_timer(0.2, lambda a=addr: self.check_connect(addr)) return err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err == 0: self.addr = addr self.connected = True wc.log.debug(wc.LOG_PROXY, '%s connected', self) self.handle_connect() elif err in (errno.EINPROGRESS, errno.EWOULDBLOCK): wc.log.debug(wc.LOG_PROXY, '%s connect status in progress/would block', self) wc.proxy.make_timer(0.2, lambda a=addr: self.check_connect(addr)) else: strerr = errno.errorcode[err] wc.log.info(wc.LOG_PROXY, '%s connect error %s', self, strerr) self.handle_close()
|
os.system('invoke-rc.d webcleaner start')
|
from wc import daemon daemon.start(parent_exit=0)
|
def onCmdProxyStart (self, sender, sel, ptr): os.system('invoke-rc.d webcleaner start') debug(GUI, "webcleaner start") return 1
|
os.system('invoke-rc.d webcleaner stop')
|
from wc import daemon daemon.stop()
|
def onCmdProxyStop (self, sender, sel, ptr): os.system('invoke-rc.d webcleaner stop') debug(GUI, "webcleaner stop") return 1
|
os.system('invoke-rc.d webcleaner restart')
|
from wc import daemon daemon.restart(parent_exit=0)
|
def onCmdProxyRestart (self, sender, sel, ptr): os.system('invoke-rc.d webcleaner restart') debug(GUI, "webcleaner restart") return 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.