rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
for (k, v) in output_formats.items(): if data.get("GENERATE_" + k, v[0]) == "YES":
|
for (k, v) in output_formats.iteritems(): if data.get("GENERATE_" + k, v[0]).upper() == "YES":
|
def DoxyEmitter(source, target, env): """Doxygen Doxyfile emitter""" # possible output formats and their default values and output locations output_formats = { "HTML": ("YES", "html"), "LATEX": ("YES", "latex"), "RTF": ("NO", "rtf"), "MAN": ("YES", "man"), "XML": ("NO", "xml"), } data = DoxyfileParse(source[0].get_contents(), str(source[0].dir)) targets = [] out_dir = data.get("OUTPUT_DIRECTORY", ".") # add our output locations for (k, v) in output_formats.items(): if data.get("GENERATE_" + k, v[0]) == "YES": # Grmpf ... need to use a File object here. The problem is, that # Dir.scan() is implemented to just return the directory entries # and does *not* invoke the source-file scanners .. ARGH !! dir = env.Dir( os.path.join(str(source[0].dir), out_dir, data.get(k + "_OUTPUT", v[1])) ) node = env.File( os.path.join(str(dir), ".stamp" ) ) env.Clean(node, dir) targets.append( node ) if data.has_key("GENERATE_TAGFILE"): targets.append(env.File( os.path.join(str(source[0].dir), data["GENERATE_TAGFILE"]) )) # don't clobber targets for node in targets: env.Precious(node) return (targets, source)
|
data = DoxyfileParse(node.sources[0].get_contents(), str(node.sources[0].dir)) if data.get("GENERATE_HTML",'YES') != 'YES' : return None return os.path.normpath(os.path.join( str(node.sources[0].dir),
|
data = DoxyfileParse(node.sources[0].abspath) if data.get("GENERATE_HTML",'YES').upper() != 'YES' : return None return os.path.normpath(os.path.join( node.sources[0].abspath,
|
def doxyNodeHtmlDir(node): if not node.sources : return None data = DoxyfileParse(node.sources[0].get_contents(), str(node.sources[0].dir)) if data.get("GENERATE_HTML",'YES') != 'YES' : return None return os.path.normpath(os.path.join( str(node.sources[0].dir), data.get("OUTPUT_DIRECTORY","."), data.get("HTML_OUTPUT","html") ))
|
data = DoxyfileParse(source[0].get_contents(), str(source[0].dir))
|
data = DoxyfileParse(source[0].abspath)
|
def DoxyGenerator(source, target, env, for_signature): data = DoxyfileParse(source[0].get_contents(), str(source[0].dir)) actions = [ env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}") ] # This will add automatic 'installdox' calls. # # For every referenced tagfile, the generator first checks for the # existence of a construction variable '<name>_DOXY_URL' where # '<name>' is the uppercased name of the tagfile sans extension # (e.g. 'Utils.tag' -> 'UTILS_DOXY_URL'). If this variable exists, # it must contain the url or path to the installed documentation # corresponding to the tag file. # # Is the variable is not found and if a referenced tag file is a # target within this same build, the generator will parse the # 'Doxyfile' from which the tag file is built. It will # automatically create the html directory from the information in # that 'Doxyfile'. # # If for any referenced tagfile no url can be found, 'installdox' # will *not* be called and a warning about the missing url is # generated. if data.get('GENERATE_HTML','YES') == "YES": output_dir = os.path.normpath(os.path.join( str(source[0].dir), data.get("OUTPUT_DIRECTORY","."), data.get("HTML_OUTPUT","html") )) args = [] for tagfile in data.get('TAGFILES',[]): url = env.get(os.path.splitext(os.path.basename(tagfile))[0].upper()+"_DOXY_URL", None) if not url: url = doxyNodeHtmlDir( env.File(os.path.normpath(os.path.join( str(source[0].dir), tagfile )))) if url : url = relpath(output_dir, url) if not url: print "WARNING:",str(node.sources[0]),": missing tagfile url for",tagfile args = None if args is not None and url: args.append("-l %s@%s" % ( os.path.basename(tagfile), url )) if args: actions.append(env.Action('cd %s && ./installdox %s' % (output_dir, " ".join(args)))) actions.append(env.Action([ "touch $TARGETS" ])) return actions
|
if data.get('GENERATE_HTML','YES') == "YES": output_dir = os.path.normpath(os.path.join( str(source[0].dir),
|
if data.get('GENERATE_HTML','YES').upper() == "YES": output_dir = os.path.normpath(os.path.join( source[0].dir.abspath,
|
def DoxyGenerator(source, target, env, for_signature): data = DoxyfileParse(source[0].get_contents(), str(source[0].dir)) actions = [ env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}") ] # This will add automatic 'installdox' calls. # # For every referenced tagfile, the generator first checks for the # existence of a construction variable '<name>_DOXY_URL' where # '<name>' is the uppercased name of the tagfile sans extension # (e.g. 'Utils.tag' -> 'UTILS_DOXY_URL'). If this variable exists, # it must contain the url or path to the installed documentation # corresponding to the tag file. # # Is the variable is not found and if a referenced tag file is a # target within this same build, the generator will parse the # 'Doxyfile' from which the tag file is built. It will # automatically create the html directory from the information in # that 'Doxyfile'. # # If for any referenced tagfile no url can be found, 'installdox' # will *not* be called and a warning about the missing url is # generated. if data.get('GENERATE_HTML','YES') == "YES": output_dir = os.path.normpath(os.path.join( str(source[0].dir), data.get("OUTPUT_DIRECTORY","."), data.get("HTML_OUTPUT","html") )) args = [] for tagfile in data.get('TAGFILES',[]): url = env.get(os.path.splitext(os.path.basename(tagfile))[0].upper()+"_DOXY_URL", None) if not url: url = doxyNodeHtmlDir( env.File(os.path.normpath(os.path.join( str(source[0].dir), tagfile )))) if url : url = relpath(output_dir, url) if not url: print "WARNING:",str(node.sources[0]),": missing tagfile url for",tagfile args = None if args is not None and url: args.append("-l %s@%s" % ( os.path.basename(tagfile), url )) if args: actions.append(env.Action('cd %s && ./installdox %s' % (output_dir, " ".join(args)))) actions.append(env.Action([ "touch $TARGETS" ])) return actions
|
print "WARNING:",str(node.sources[0]),": missing tagfile url for",tagfile
|
print "WARNING:",source[0].abspath, ": missing tagfile url for", tagfile
|
def DoxyGenerator(source, target, env, for_signature): data = DoxyfileParse(source[0].get_contents(), str(source[0].dir)) actions = [ env.Action("cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}") ] # This will add automatic 'installdox' calls. # # For every referenced tagfile, the generator first checks for the # existence of a construction variable '<name>_DOXY_URL' where # '<name>' is the uppercased name of the tagfile sans extension # (e.g. 'Utils.tag' -> 'UTILS_DOXY_URL'). If this variable exists, # it must contain the url or path to the installed documentation # corresponding to the tag file. # # Is the variable is not found and if a referenced tag file is a # target within this same build, the generator will parse the # 'Doxyfile' from which the tag file is built. It will # automatically create the html directory from the information in # that 'Doxyfile'. # # If for any referenced tagfile no url can be found, 'installdox' # will *not* be called and a warning about the missing url is # generated. if data.get('GENERATE_HTML','YES') == "YES": output_dir = os.path.normpath(os.path.join( str(source[0].dir), data.get("OUTPUT_DIRECTORY","."), data.get("HTML_OUTPUT","html") )) args = [] for tagfile in data.get('TAGFILES',[]): url = env.get(os.path.splitext(os.path.basename(tagfile))[0].upper()+"_DOXY_URL", None) if not url: url = doxyNodeHtmlDir( env.File(os.path.normpath(os.path.join( str(source[0].dir), tagfile )))) if url : url = relpath(output_dir, url) if not url: print "WARNING:",str(node.sources[0]),": missing tagfile url for",tagfile args = None if args is not None and url: args.append("-l %s@%s" % ( os.path.basename(tagfile), url )) if args: actions.append(env.Action('cd %s && ./installdox %s' % (output_dir, " ".join(args)))) actions.append(env.Action([ "touch $TARGETS" ])) return actions
|
return env.Command(target, testRunner, [ './$SOURCE $BOOSTTESTARGS | tee $TARGET' ])
|
return env.Command([ target, stamp ], testRunner, [ '( $SOURCE $BOOSTTESTARGS 2>&1 && touch ${TARGETS[1]} ) | tee ${TARGETS[0]}' ])
|
def BoostUnitTests(env, target, source, test_source=None, LIBS = [], DEPENDS = [], **kw): path, name = os.path.split(target) if test_source: if type(test_source) is not type([]): test_source = [ test_source ] else: test_source = [] testEnv = env.Copy(**kw) testEnv.Append(LIBS = '$BOOSTTESTLIB') testEnv.Append(LIBS = LIBS) sources = [] if source: sources = sources + env.Object(source) sources = sources + test_source binName = os.path.join(path,'.' + os.path.splitext(name)[0]+'.bin') testRunner = testEnv.Program(binName, sources) if DEPENDS: env.Depends(testRunner, DEPENDS) return env.Command(target, testRunner, [ './$SOURCE $BOOSTTESTARGS | tee $TARGET' ])
|
env.Append(CXXFLAGS = [ '-O3', '-g' ], LINKFLAGS = [ '-g' ])
|
env.Append(CXXFLAGS = [ '-O3' ], CPPDEFINES = [ 'NDEBUG' ])
|
def MakeEnvironment(): global opts, finalizers InitOpts() env = SCons.Environment.Environment(options=opts) if SCons.Script.SConscript.Arguments.get('final'): env['final'] = 1 env.Help(opts.GenerateHelpText(env)) #conf = env.Configure() #env = conf.env if os.environ.has_key('SSH_AUTH_SOCK'): env.Append( ENV = { 'SSH_AUTH_SOCK': os.environ['SSH_AUTH_SOCK'] } ) for finalizer in finalizers: finalizer(env) env.Append(CXXFLAGS = [ '-Wall', '-Woverloaded-virtual', '-Wno-long-long', '-pedantic', '-ansi' ], LOCALLIBDIR = [ '#' ], LIBPATH = [ '$LOCALLIBDIR' ]) if env['final']: env.Append(CXXFLAGS = [ '-O3', '-g' ], LINKFLAGS = [ '-g' ]) else: env.Append(CXXFLAGS = [ '-O0', '-g', '-fno-inline' ], LINKFLAGS = [ '-g' ]) #return conf.Finish() return env
|
testEnv.Append(LIBS = '$BOOSTTESTLIB') testEnv.Append(LIBS = LIBS)
|
testEnv.Prepend(LIBS = '$BOOSTTESTLIB') testEnv.Prepend(LIBS = LIBS)
|
def BoostUnitTests(env, target, source, test_source=None, LIBS = [], DEPENDS = [], **kw): path, name = os.path.split(target) if test_source: if type(test_source) is not type([]): test_source = [ test_source ] else: test_source = [] testEnv = env.Copy(**kw) testEnv.Append(LIBS = '$BOOSTTESTLIB') testEnv.Append(LIBS = LIBS) sources = [] if source: sources = sources + env.Object(source) sources = sources + test_source binName = os.path.join(path,'.' + os.path.splitext(name)[0]+'.bin') testRunner = testEnv.Program(binName, sources) stamp = os.path.join(path,'.' + os.path.splitext(name)[0]+'.stamp') if DEPENDS: env.Depends(testRunner, DEPENDS) return env.Command([ target, stamp ], testRunner, [ '( $SOURCE $BOOSTTESTARGS 2>&1 && touch ${TARGETS[1]} ) | tee ${TARGETS[0]}' ])
|
if not env['final'] : runtime += "gd"
|
if env['final'] : runtime += env.get('BOOST_RUNTIME','') else : runtime += env.get('BOOST_DEBUG_RUNTIME','gd')
|
def FinalizeBoost(env): env.Tool('BoostUnitTests', [os.path.split(__file__)[0]]) if env['BOOST_TOOLSET']: runtime = "" if not env['final'] : runtime += "gd" if env['STLPORT_LIB'] : runtime += "p" if runtime: runtime = "-" + runtime env['BOOST_VARIANT'] = "-" + env['BOOST_TOOLSET'] + runtime env['BOOSTTESTLIB'] = 'libboost_unit_test_framework' + env['BOOST_VARIANT'] env.Append(LIBPATH = [ '$BOOST_LIBDIR' ], CPPPATH = [ '$BOOST_INCLUDES' ])
|
env['STLPORT_DEBUGLIB'] = ''
|
def FinalizeSTLPort(env): env['STLPORT_DEBUGLIB'] = '' if env['STLPORT_LIB']: env['STLPORT_DEBUGLIB'] = env['STLPORT_LIB'] + '_stldebug' env.Append(LIBPATH = [ '$STLPORT_LIBDIR' ], CPPPATH = [ '$STLPORT_INCLUDES' ]) if env['final']: env.Append(LIBS = [ '$STLPORT_LIB' ]) else: env.Append(LIBS = [ '$STLPORT_DEBUGLIB' ], CPPDEFINES = [ '_STLP_DEBUG' ])
|
|
env['STLPORT_DEBUGLIB'] = env['STLPORT_LIB'] + '_stldebug'
|
if not env['STLPORT_DEBUGLIB']: env['STLPORT_DEBUGLIB'] = env['STLPORT_LIB'] + '_stldebug'
|
def FinalizeSTLPort(env): env['STLPORT_DEBUGLIB'] = '' if env['STLPORT_LIB']: env['STLPORT_DEBUGLIB'] = env['STLPORT_LIB'] + '_stldebug' env.Append(LIBPATH = [ '$STLPORT_LIBDIR' ], CPPPATH = [ '$STLPORT_INCLUDES' ]) if env['final']: env.Append(LIBS = [ '$STLPORT_LIB' ]) else: env.Append(LIBS = [ '$STLPORT_DEBUGLIB' ], CPPDEFINES = [ '_STLP_DEBUG' ])
|
env.Append(CXXFLAGS = [ '-Wall', '-Woverloaded-virtual', '-Wno-long-long', '-pedantic', '-ansi' ],
|
env.Append(CXXFLAGS = [ '-Wall', '-Woverloaded-virtual', '-Wno-long-long' ],
|
def MakeEnvironment(): global opts, finalizers InitOpts() env = SCons.Environment.Environment(options=opts) if SCons.Script.SConscript.Arguments.get('final'): env['final'] = 1 env.Help(opts.GenerateHelpText(env)) #conf = env.Configure() #env = conf.env if os.environ.has_key('SSH_AUTH_SOCK'): env.Append( ENV = { 'SSH_AUTH_SOCK': os.environ['SSH_AUTH_SOCK'] } ) for finalizer in finalizers: finalizer(env) env.Append(CXXFLAGS = [ '-Wall', '-Woverloaded-virtual', '-Wno-long-long', '-pedantic', '-ansi' ], LOCALLIBDIR = [ '#' ], LIBPATH = [ '$LOCALLIBDIR' ]) if env['final']: env.Append(CXXFLAGS = [ '-O3' ], CPPDEFINES = [ 'NDEBUG' ]) else: env.Append(CXXFLAGS = [ '-O0', '-g', '-fno-inline' ], LINKFLAGS = [ '-g' ]) #return conf.Finish() return env
|
LINKFLAGS = [ '-g' ])
|
LINKFLAGS = [ '-g' ]) env.Append(CPPDEFINES = [ '$EXTRA_DEFINES' ], LIBS = [ '$EXTRA_LIBS' ])
|
def MakeEnvironment(): global opts, finalizers InitOpts() env = SCons.Environment.Environment(options=opts) if SCons.Script.SConscript.Arguments.get('final'): env['final'] = 1 env.Help(opts.GenerateHelpText(env)) #conf = env.Configure() #env = conf.env if os.environ.has_key('SSH_AUTH_SOCK'): env.Append( ENV = { 'SSH_AUTH_SOCK': os.environ['SSH_AUTH_SOCK'] } ) for finalizer in finalizers: finalizer(env) env.Append(CXXFLAGS = [ '-Wall', '-Woverloaded-virtual', '-Wno-long-long', '-pedantic', '-ansi' ], LOCALLIBDIR = [ '#' ], LIBPATH = [ '$LOCALLIBDIR' ]) if env['final']: env.Append(CXXFLAGS = [ '-O3' ], CPPDEFINES = [ 'NDEBUG' ]) else: env.Append(CXXFLAGS = [ '-O0', '-g', '-fno-inline' ], LINKFLAGS = [ '-g' ]) #return conf.Finish() return env
|
[ '( $SOURCE $BOOSTTESTARGS 2>&1 && touch ${TARGETS[1]} ) | tee ${TARGETS[0]}' ])
|
[ '( $SOURCE $BOOSTTESTARGS 2>&1 && touch ${TARGETS[1]} ) | tee ${TARGETS[0]}; exit $$PIPESTATUS[0]' ])
|
def BoostUnitTests(env, target, source, test_source=None, LIBS = [], DEPENDS = [], **kw): path, name = os.path.split(target) if test_source: if type(test_source) is not type([]): test_source = [ test_source ] else: test_source = [] testEnv = env.Copy(**kw) testEnv.Prepend(LIBS = '$BOOSTTESTLIB') testEnv.Prepend(LIBS = LIBS) sources = [] if source: sources = sources + env.Object(source) sources = sources + test_source binName = os.path.join(path,'.' + os.path.splitext(name)[0]+'.bin') testRunner = testEnv.Program(binName, sources) stamp = os.path.join(path,'.' + os.path.splitext(name)[0]+'.stamp') if DEPENDS: env.Depends(testRunner, DEPENDS) return env.Command([ target, stamp ], testRunner, [ '( $SOURCE $BOOSTTESTARGS 2>&1 && touch ${TARGETS[1]} ) | tee ${TARGETS[0]}' ])
|
if callback: _sqlitecache.log_handler_add(callback.log)
|
self.callback = callback
|
def __init__(self, storedir, repoid, callback=None):
|
return self.open_database(_sqlitecache.update_primary(location, checksum))
|
return self.open_database(_sqlitecache.update_primary(location, checksum, self.callback))
|
def getPrimary(self, location, checksum): """Load primary.xml.gz from an sqlite cache and update it if required""" return self.open_database(_sqlitecache.update_primary(location, checksum))
|
return self.open_database(_sqlitecache.update_filelist(location, checksum))
|
return self.open_database(_sqlitecache.update_filelist(location, checksum, self.callback))
|
def getFilelists(self, location, checksum): """Load filelist.xml.gz from an sqlite cache and update it if required""" return self.open_database(_sqlitecache.update_filelist(location, checksum))
|
host, port = urllib.splitnport(host)
|
newhost, port = urllib.splitnport(host) if port is not None: host = newhost
|
def url_fix_host (urlparts): """ Unquote and fix hostname. Returns is_idn. """ urlparts[1], is_idn = idna_encode(urllib.unquote(urlparts[1]).lower()) # a leading backslash in path causes urlsplit() to add the # path components up to the first slash to host # try to find this case... i = urlparts[1].find("\\") if i != -1: # ...and fix it by prepending the misplaced components to the path comps = urlparts[1][i:] # note: still has leading backslash if not urlparts[2] or urlparts[2] == '/': urlparts[2] = comps else: urlparts[2] = "%s%s" % (comps, urllib.unquote(urlparts[2])) urlparts[1] = urlparts[1][:i] else: # a leading ? in path causes urlsplit() to add the query to the # host name i = urlparts[1].find("?") if i != -1: urlparts[1], urlparts[3] = urlparts[1].split('?', 1) # path urlparts[2] = urllib.unquote(urlparts[2]) if urlparts[1]: userpass, host = urllib.splituser(urlparts[1]) if userpass: # append AT for easy concatenation userpass += "@" else: userpass = "" host, port = urllib.splitnport(host) # remove trailing dot if host.endswith("."): host = host[:-1] # remove a default (or invalid) port if port in [-1, None, default_ports.get(urlparts[0])]: urlparts[1] = userpass+host else: urlparts[1] = "%s%s:%d" % (userpass, host, port)
|
if not urlparts[2]: if urlparts[0] and \ urlparts[0] not in urlparse.non_hierarchical and \ (urlparts[3] or urlparts[4]): urlparts[2] = '/' else: urlparts[2] = collapse_segments(urlparts[2])
|
is_hierarchical = urlparts[0] not in urlparse.non_hierarchical if is_hierarchical: if not urlparts[2]: if urlparts[0] and (urlparts[3] or urlparts[4]): urlparts[2] = '/' else: urlparts[2] = collapse_segments(urlparts[2])
|
def url_norm (url): """ Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return: (normed url, idna flag) @rtype: tuple of length two """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # mailto: urlsplit is broken if urlparts[0] == 'mailto': url_fix_mailto_urlsplit(urlparts) # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query urlparts[3] = url_parse_query(urlparts[3]) if not urlparts[2]: # empty path is allowed if url is non-hierarchical, or if both # query and fragment are also empty # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and \ urlparts[0] not in urlparse.non_hierarchical and \ (urlparts[3] or urlparts[4]): urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn)
|
data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl))
|
def create_conf_file (self, data, directory=None): """create local config file from given data (list of lines) in the directory (or current directory if not given) """ data.insert(0, "# this file is automatically created by setup.py") data.insert(0, "# -*- coding: iso-8859-1 -*-") if directory is None: directory = os.getcwd() filename = self.get_conf_filename(directory) # add metadata metanames = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "fullname") for name in metanames: method = "get_" + name cmd = "%s = %r" % (name, getattr(self.metadata, method)()) data.append(cmd) data.append('appname = "WebCleaner"') data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl)) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run)
|
|
scripts = [ 'webcleaner', 'webcleaner-certificates', ] if os.name=='nt' or win_cross_compiling: scripts.append('install-webcleaner.py')
|
def create_conf_file (self, data, directory=None): """create local config file from given data (list of lines) in the directory (or current directory if not given) """ data.insert(0, "# this file is automatically created by setup.py") data.insert(0, "# -*- coding: iso-8859-1 -*-") if directory is None: directory = os.getcwd() filename = self.get_conf_filename(directory) # add metadata metanames = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "fullname") for name in metanames: method = "get_" + name cmd = "%s = %r" % (name, getattr(self.metadata, method)()) data.append(cmd) data.append('appname = "WebCleaner"') data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl)) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run)
|
|
scripts = ['webcleaner', 'webcleaner-certificates'],
|
scripts = scripts,
|
def create_conf_file (self, data, directory=None): """create local config file from given data (list of lines) in the directory (or current directory if not given) """ data.insert(0, "# this file is automatically created by setup.py") data.insert(0, "# -*- coding: iso-8859-1 -*-") if directory is None: directory = os.getcwd() filename = self.get_conf_filename(directory) # add metadata metanames = ("name", "version", "author", "author_email", "maintainer", "maintainer_email", "url", "license", "description", "long_description", "keywords", "platforms", "fullname", "contact", "contact_email", "fullname") for name in metanames: method = "get_" + name cmd = "%s = %r" % (name, getattr(self.metadata, method)()) data.append(cmd) data.append('appname = "WebCleaner"') data.append("has_crypto = %s" % str(has_crypto)) data.append("has_pil = %s" % str(has_pil)) data.append("has_ssl = %s" % str(has_ssl)) util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run)
|
filters = [FILTER_RESPONSE_HEADER, FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE, ] self.attrs = get_filterattrs(self.url, filters, headers=msg)
|
self.attrs = get_filterattrs(self.url, [FILTER_RESPONSE_HEADER], headers=msg)
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # get headers fp = StringIO(self.read(m.end())) msg = WcMessage(fp) # put unparsed data (if any) back to the buffer msg.rewindbody() self.recv_buffer = fp.read() + self.recv_buffer debug(PROXY, "%s server headers\n%s", str(self), str(msg)) if self.statuscode==100: # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return http_ver = serverpool.http_versions[self.addr] if http_ver >= (1,1): self.persistent = not has_header_value(msg, 'Connection', 'Close') elif http_ver >= (1,0): self.persistent = has_header_value(msg, 'Connection', 'Keep-Alive') else: self.persistent = False filters = [FILTER_RESPONSE_HEADER, FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE, ] self.attrs = get_filterattrs(self.url, filters, headers=msg) try: self.headers = applyfilter(FILTER_RESPONSE_HEADER, msg, "finish", self.attrs) except FilterPics, msg: self.statuscode = 403 debug(PROXY, "%s FilterPics %s", str(self), `msg`) # XXX get version response = "HTTP/1.1 403 Forbidden" headers = WcMessage(StringIO('Content-type: text/plain\r\n' 'Content-Length: %d\r\n\r\n' % len(msg))) self.client.server_response(response, self.statuscode, headers) self.client.server_content(msg) self.client.server_close() self.state = 'recycle' self.reuse() return server_set_headers(self.headers) self.bytes_remaining = server_set_encoding_headers(self.headers, self.is_rewrite(), self.decoders, self.client.compress, self.bytes_remaining) # 304 Not Modified does not send any type info, because it was cached if self.statuscode!=304: server_set_content_headers(self.headers, self.document, self.mime, self.url) # XXX <doh> #if not self.headers.has_key('Content-Length'): # self.headers['Connection'] = 'close\r' #remove_headers(self.headers, ['Keep-Alive']) # XXX </doh> if self.statuscode!=407: self.client.server_response(self.response, self.statuscode, self.headers) if self.statuscode in (204, 304) or self.method == 'HEAD': # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
|
is_closed = decoder.closed or is_closed
|
debug(PROXY, "%s have run decoder %s", str(self), str(decoder)) if not is_closed and decoder.closed: is_closed = True
|
def process_content (self): data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # If we do know how many bytes we're dealing with, # we'll close the connection when we're done self.bytes_remaining -= len(data) debug(PROXY, "%s %d bytes remaining", str(self), self.bytes_remaining) is_closed = False for decoder in self.decoders: data = decoder.decode(data) is_closed = decoder.closed or is_closed try: for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: data = applyfilter(i, data, "filter", self.attrs) if data: if self.statuscode!=407: self.client.server_content(data) self.data_written = True except FilterWait, msg: debug(PROXY, "%s FilterWait %s", str(self), `msg`) except FilterPics, msg: debug(PROXY, "%s FilterPics %s", str(self), `msg`) assert not self.data_written # XXX interactive options here self.client.server_content(str(msg)) self.client.server_close() self.state = 'recycle' self.reuse() return underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: warn(PROXY, i18n._("server received %d bytes more than content-length"), (-self.bytes_remaining)) if is_closed or self.bytes_remaining==0: # Either we ran out of bytes, or the decoder says we're done self.state = 'recycle'
|
for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]:
|
for i in _response_filters:
|
def process_content (self): data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # If we do know how many bytes we're dealing with, # we'll close the connection when we're done self.bytes_remaining -= len(data) debug(PROXY, "%s %d bytes remaining", str(self), self.bytes_remaining) is_closed = False for decoder in self.decoders: data = decoder.decode(data) is_closed = decoder.closed or is_closed try: for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: data = applyfilter(i, data, "filter", self.attrs) if data: if self.statuscode!=407: self.client.server_content(data) self.data_written = True except FilterWait, msg: debug(PROXY, "%s FilterWait %s", str(self), `msg`) except FilterPics, msg: debug(PROXY, "%s FilterPics %s", str(self), `msg`) assert not self.data_written # XXX interactive options here self.client.server_content(str(msg)) self.client.server_close() self.state = 'recycle' self.reuse() return underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: warn(PROXY, i18n._("server received %d bytes more than content-length"), (-self.bytes_remaining)) if is_closed or self.bytes_remaining==0: # Either we ran out of bytes, or the decoder says we're done self.state = 'recycle'
|
for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]:
|
for i in _response_filters:
|
def flush (self): """flush data of decoders (if any) and filters""" debug(PROXY, "%s flushing", str(self)) self.flushing = True data = "" while self.decoders: data = self.decoders[0].flush() del self.decoders[0] for decoder in self.decoders: data = decoder.decode(data) try: for i in [FILTER_RESPONSE_DECODE, FILTER_RESPONSE_MODIFY, FILTER_RESPONSE_ENCODE]: data = applyfilter(i, data, "finish", self.attrs) except FilterWait, msg: debug(PROXY, "%s FilterWait %s", str(self), `msg`) # the filter still needs some data so try flushing again # after a while make_timer(0.2, lambda : self.flush()) return # the client might already have closed if self.client and self.statuscode!=407: if data: self.client.server_content(data) self.client.server_close() self.attrs = {} if self.statuscode!=407: self.reuse()
|
version = "3.0",
|
version = "2.40",
|
def get_file_list (self): super(MySdist, self).get_file_list() self.filelist.append("MANIFEST")
|
BUF_SIZE=512
|
def getAttrs (self, headers, url): # weed out the rules that dont apply to this url rules = filter(lambda r, u=url: r.appliesTo(u), self.rules) if not rules: return {} return {'buf': Buf(rules)}
|
|
self.rules = rules
|
def __init__ (self, rules): self.buf = "" self.rules = rules
|
|
data = self.buf + data
|
self.buf += data if len(self.buf) > 512: self._replace() if len(self.buf) > 256: data = self.buf self.buf = self.buf[-256:] return data[:-256] return "" def _replace (self):
|
def replace (self, data): data = self.buf + data for rule in self.rules: data = rule.search.sub(rule.replace, data) self.buf = data[-BUF_SIZE:] return data[:-BUF_SIZE]
|
data = rule.search.sub(rule.replace, data) self.buf = data[-BUF_SIZE:] return data[:-BUF_SIZE]
|
self.buf = rule.search.sub(rule.replace, self.buf)
|
def replace (self, data): data = self.buf + data for rule in self.rules: data = rule.search.sub(rule.replace, data) self.buf = data[-BUF_SIZE:] return data[:-BUF_SIZE]
|
fv = FXVerticalFrame(g, LAYOUT_FILL_X|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0)
|
fv = FXVerticalFrame(g, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0)
|
def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory)
|
FXTextField(fh, 25, self, FXPicsRuleFrame.ID_URL).setText(self.rule.url) scroll = FXScrollWindow(self, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP|SCROLLERS_TRACK, 0,0,0,0) fv = FXVerticalFrame(scroll, LAYOUT_FILL_X|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0)
|
FXTextField(fh, 27, self, FXPicsRuleFrame.ID_URL).setText(self.rule.url) scroll = FXScrollWindow(fv, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP|SCROLLERS_TRACK, 0,0,0,0) fv = FXVerticalFrame(scroll, LAYOUT_FILL_X|LAYOUT_FILL_Y|LAYOUT_LEFT|LAYOUT_TOP, 0,0,0,0, 0,0,0,0, 0,0)
|
def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory)
|
for service, sdata in services.items():
|
_services = services.keys() _services.sort() for service in _services: sdata = services[service]
|
def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory)
|
for category in sdata['categories'].keys():
|
_categories = sdata['categories'].keys() _categories.sort() for category in _categories:
|
def __init__ (self, parent, rule, index): """initialize pics rule display frame""" FXRuleFrame.__init__(self, parent, rule, index) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_URL,FXPicsRuleFrame.onCmdUrl) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_SERVICE,FXPicsRuleFrame.onCmdService) FXMAPFUNC(self,SEL_COMMAND,FXPicsRuleFrame.ID_CATEGORY,FXPicsRuleFrame.onCmdCategory)
|
def create_message1 (flags=0xb203): ""
|
def create_message1 (flags="\xb2\x03"):
|
def create_message1 (flags=0xb203): "" # overall lenght = 48 bytes protocol = 'NTLMSSP\000' #name type = '\001\000' #type 1 zeros1 = '\000\000' flags = utils.hex2str(flags) zeros2 = '\000\000\000\000\000\000\000\000\000' zeros3 = '\000\000\000\000\000\000\000\000\000\000\000' smthg1 = '0\000\000\000\000\000\000\000' # something with chr(48) length? smthg2 = '0\000\000\000' # something with chr(48) lenght? msg1 = protocol + type + zeros1 + flags + zeros2 + zeros3 + smthg1 + smthg2 msg1 = base64.encodestring(msg1) msg1 = msg1.replace('\012', '') return msg1
|
protocol = 'NTLMSSP\000' type = '\001\000' zeros1 = '\000\000' flags = utils.hex2str(flags) zeros2 = '\000\000\000\000\000\000\000\000\000' zeros3 = '\000\000\000\000\000\000\000\000\000\000\000' smthg1 = '0\000\000\000\000\000\000\000' smthg2 = '0\000\000\000' msg1 = protocol + type + zeros1 + flags + zeros2 + zeros3 + smthg1 + smthg2 msg1 = base64.encodestring(msg1) msg1 = msg1.replace('\012', '') return msg1 def create_message2 (flags=0x8201): ""
|
protocol = 'NTLMSSP\x00' type = '\x01' zero3 = '\x00'*3 zero2 = '\x00'*2 domain = "WORKGROUP" dom_len = len(domain) host = "UNKNOWN" host_len = len(host) host_off = 32 dom_off = host_off + len(host) msg = "%(protocol)s%(type)s%(zero3)s%(flags)s%(zero2)s%(dom_len)02d%(dom_len)02d%(dom_off)02d00%(host_len)02d%(host_len)02d%(host_off)02d00%(host)s%(domain)s" % locals() return base64.encodestring(msg).strip() def create_message2 (flags="\x82\x01"):
|
def create_message1 (flags=0xb203): "" # overall lenght = 48 bytes protocol = 'NTLMSSP\000' #name type = '\001\000' #type 1 zeros1 = '\000\000' flags = utils.hex2str(flags) zeros2 = '\000\000\000\000\000\000\000\000\000' zeros3 = '\000\000\000\000\000\000\000\000\000\000\000' smthg1 = '0\000\000\000\000\000\000\000' # something with chr(48) length? smthg2 = '0\000\000\000' # something with chr(48) lenght? msg1 = protocol + type + zeros1 + flags + zeros2 + zeros3 + smthg1 + smthg2 msg1 = base64.encodestring(msg1) msg1 = msg1.replace('\012', '') return msg1
|
flags = utils.hex2str(flags)
|
def create_message2 (flags=0x8201): "" protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' flags = utils.hex2str(flags) nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
|
|
return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals() def create_message3 (nonce, domain, username, host, flags=0x8201,
|
msg = "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals() return base64.encodestring(msg).strip() def create_message3 (nonce, domain, username, host, flags="\x82\x01",
|
def create_message2 (flags=0x8201): "" protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' flags = utils.hex2str(flags) nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
|
"" flags = utils.hex2str(flags)
|
def create_message3 (nonce, domain, username, host, flags=0x8201, lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): "" flags = utils.hex2str(flags) protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data # base64 encode m3 = base64.encodestring(m3) m3 = m3.replace('\012', '') return m3
|
|
m3 = base64.encodestring(m3) m3 = m3.replace('\012', '') return m3
|
return base64.encodestring(m3).strip()
|
def create_message3 (nonce, domain, username, host, flags=0x8201, lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): "" flags = utils.hex2str(flags) protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data # base64 encode m3 = base64.encodestring(m3) m3 = m3.replace('\012', '') return m3
|
def error(self, code, msg, txt=""):
|
def error(self, code, msg, txt=''):
|
def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content)
|
{'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \
|
{'title': 'WebCleaner Proxy Error %d %s' % (code, msg), 'header': 'Bummer!', 'content': 'WebCleaner Proxy Error %d %s<br>%s<br>' % \
|
def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content)
|
else: auth = ""
|
http_ver = '1.0' else: auth = '' http_ver = '1.1'
|
def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content)
|
'HTTP/1.0 %d %s\r\n',
|
'HTTP/%s %d %s\r\n' % (http_ver, code, msg),
|
def error(self, code, msg, txt=""): content = wc.proxy.HTML_TEMPLATE % \ {'title': "WebCleaner Proxy Error %d %s" % (code, msg), 'header': "Bummer!", 'content': "WebCleaner Proxy Error %d %s<br>%s<br>" % \ (code, msg, txt), } if config['proxyuser']: auth = 'Proxy-Authenticate: Basic realm="WebCleaner"\r\n' else: auth = "" ServerHandleDirectly(self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' +\ 'Content-type: text/html\r\n' +\ '%s'%auth +\ '\r\n', content)
|
if not self.headers.has_key("Proxy-Authorization"):
|
if not self.check_proxy_auth():
|
def __init__(self, client, request, headers, content, nofilter): self.client = client self.request = request self.headers = headers if config["proxyuser"]: if not self.headers.has_key("Proxy-Authorization"): self.error(407, _("Proxy Authentication Required")) return auth = self.headers['Proxy-Authorization'] # XXX more self.content = content self.nofilter = nofilter self.url = "" try: self.method, self.url, protocol = request.split() except: config['requests']['error'] += 1 self.error(400, _("Can't parse request")) return if not self.url: config['requests']['error'] += 1 self.error(400, _("Empty URL")) return scheme, hostname, port, document = wc.proxy.spliturl(self.url) #debug(HURT_ME_PLENTY, "splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n' % (mtype or 'application/octet-stream'), open(document, 'rb').read()) return
|
auth = self.headers['Proxy-Authorization']
|
def __init__(self, client, request, headers, content, nofilter): self.client = client self.request = request self.headers = headers if config["proxyuser"]: if not self.headers.has_key("Proxy-Authorization"): self.error(407, _("Proxy Authentication Required")) return auth = self.headers['Proxy-Authorization'] # XXX more self.content = content self.nofilter = nofilter self.url = "" try: self.method, self.url, protocol = request.split() except: config['requests']['error'] += 1 self.error(400, _("Can't parse request")) return if not self.url: config['requests']['error'] += 1 self.error(400, _("Empty URL")) return scheme, hostname, port, document = wc.proxy.spliturl(self.url) #debug(HURT_ME_PLENTY, "splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n' % (mtype or 'application/octet-stream'), open(document, 'rb').read()) return
|
|
version = "3.0",
|
version = "2.37.1",
|
def run (self): if self.all: # remove share directory directory = os.path.join("build", "share") if os.path.exists(directory): remove_tree(directory, dry_run=self.dry_run) else: distutils.log.warn("'%s' does not exist -- can't clean it", directory) clean.run(self)
|
return email.parseaddr(address)[1]
|
cleaned = parseaddr(address) if not cleaned[0]: return cleaned[1] return '%s <%s>'%cleaned
|
def valid_mail (address): """return cleaned up mail, or an empty string on errors""" return email.parseaddr(address)[1]
|
new_url = client.scheme+"://"+answer.data
|
new_url = self.client.scheme+"://"+answer.data
|
def handle_dns (self, hostname, answer): assert self.state == 'dns' debug(PROXY, "%s handle dns", self) if not self.client.connected: warn(PROXY, "%s client closed after DNS", self) # The browser has already closed this connection, so abort return if answer.isFound(): self.ipaddr = answer.data[0] self.state = 'server' self.find_server() elif answer.isRedirect(): # Let's use a different hostname new_url = client.scheme+"://"+answer.data if self.port != 80: new_url += ':%d' % self.port # XXX does not work with parent proxy new_url += self.document info(PROXY, "%s redirecting %r", self, new_url) self.state = 'done' # XXX find http version! ServerHandleDirectly( self.client, '%s 301 Moved Permanently' % self.protocol, 301, WcMessage(StringIO('Content-type: text/plain\r\n' 'Location: %s\r\n\r\n' % new_url)), i18n._('Host %s is an abbreviation for %s')%(hostname, answer.data)) else: # Couldn't look up the host, # close this connection self.state = 'done' self.client.error(504, i18n._("Host not found"), i18n._('Host %s not found .. %s')%(hostname, answer.data))
|
handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount)
|
handler = RotatingFileHandler(logfile, mode, maxBytes, backupCount)
|
def get_root_handler (): """return a handler for basic logging""" if os.name=="nt": from logging.handlers import NTEventLogHandler return set_format(NTEventLogHandler(Name)) logfile = get_log_file("%s.err"%Name) mode = 'a' maxBytes = 1024*1024*2 # 2 MB backupCount = 5 # number of files to generate handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) return set_format(handler)
|
handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount)
|
handler = RotatingFileHandler(logfile, mode, maxBytes, backupCount)
|
def get_wc_handler (): """return a handler for webcleaner logging""" if os.name=="nt": from logging.handlers import NTEventLogHandler return set_format(NTEventLogHandler(Name)) logfile = get_log_file("%s.log"%Name) mode = 'a' maxBytes = 1024*1024*2 # 2 MB backupCount = 5 # number of files to generate handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) return set_format(handler)
|
handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount)
|
handler = RotatingFileHandler(logfile, mode, maxBytes, backupCount)
|
def get_access_handler (): """return a handler for access logging""" logfile = get_log_file("%s-access.log"%Name) mode = 'a' maxBytes = 1024*1024*2 # 2 MB backupCount = 5 # number of files to generate handler = WcRotatingFileHandler(logfile, mode, maxBytes, backupCount) # log only the message handler.setFormatter(logging.Formatter("%(message)s")) return handler
|
class WcRotatingFileHandler (RotatingFileHandler): def emit (self, record): """ A little more verbose emit function. """ try: msg = self.format(record) self.stream.write("%s\n" % msg) self.flush() except: print >>sys.stderr, "Could not format record", record self.handleError(record)
|
def get_last_word_boundary (s, width): """Get maximal index i of a whitespace char in s with 0 < i < width. Note: if s contains no whitespace this returns width-1""" match = re.compile(".*\s").match(s[0:width]) if match: return match.end() return width-1
|
|
ret.add("%s/%d" % (net, mask2suffix(mask)))
|
ret.add("%s/%d" % (num2dq(net), mask2suffix(mask)))
|
def map2hosts (hostmap): ret = hostmap[0].copy() for net, mask in hostmap[1]: ret.add("%s/%d" % (net, mask2suffix(mask))) return ret
|
hosts, nets = hosts2map(["192.168.1.1/16"]) for net, mask in nets: print num2dq(net), mask2suffix(mask)
|
hosts = ["192.168.1.1/16"] hostmap = hosts2map(hosts) print hostmap print map2hosts(hostmap)
|
def _test (): hosts, nets = hosts2map(["192.168.1.1/16"]) for net, mask in nets: print num2dq(net), mask2suffix(mask)
|
result = result.lstrip('\x08').strip().replace(' \x08', '')
|
result = result.lstrip('\x08').replace(' \x08', '').strip()
|
def classify (self, f): if not self.entries: raise StandardError("Not initialised properly") # Are we still looking for the ruleset to apply or are we in a rule found_rule = False # If we failed part of the rule there is no point looking for # higher level subrule allow_next = 0 # String provided by the successfull rule result = ""
|
return '<Listener:%s>' % self.addr
|
return '<Listener:%s>' % str(self.addr)
|
def __repr__ (self): """return listener class and address""" return '<Listener:%s>' % self.addr
|
self.ratings[service] = {}
|
self.ratings[self.service] = {}
|
def fill_attrs (self, attrs, name): if name=='pics': UrlRule.fill_attrs(self, attrs, name) elif name=='service': self.service = unxmlify(attrs.get('name')).encode('iso8859-1') self.ratings[service] = {} elif name=='category': assert self.service self.category = unxmlify(attrs.get('name')).encode('iso8859-1') else: raise ValueError(i18n._("Invalid pics rule tag name `%s',"+\ " check your configuration")%name)
|
pid = ing(file(pidfile).read())
|
pid = int(file(pidfile).read())
|
def status (pidfile): if os.path.exists(pidfile): pid = ing(file(pidfile).read()) return i18n._("WebCleaner is running (PID %d)")%pid, 0 else: return i18n._("WebCleaner is not running (no lock file found)"), 3
|
from wc import BaseUrl
|
doreload = False from wc import update, BaseUrl, Configuration
|
def onCmdConfUpdate (self, sender, sel, ptr): """download files from http://webcleaner.sourceforge.net/zapper/ and copy them over the existing config""" # base url for all files from wc import BaseUrl dialog = FXMessageBox(self,i18n._("Update Help"),UpdateHelp % BaseUrl,None,MBOX_OK_CANCEL) if self.getApp().doShow(dialog) != MBOX_CLICKED_OK: return 1 try: # XXX log into window wc.update.update(wc.config, BaseUrl) wc.config.write_filterconf() except IOError, msg: self.getApp().error(i18n._("Update Error"), "%s: %s" % (i18n._("Update Error"), msg)) else: if doreload: self.handle(self, MKUINT(ConfWindow.ID_PROXYRELOAD,SEL_COMMAND), None) return 1
|
wc.update.update(wc.config, BaseUrl) wc.config.write_filterconf()
|
doreload = update.update(config, BaseUrl, dryrun=True) config.write_filterconf()
|
def onCmdConfUpdate (self, sender, sel, ptr): """download files from http://webcleaner.sourceforge.net/zapper/ and copy them over the existing config""" # base url for all files from wc import BaseUrl dialog = FXMessageBox(self,i18n._("Update Help"),UpdateHelp % BaseUrl,None,MBOX_OK_CANCEL) if self.getApp().doShow(dialog) != MBOX_CLICKED_OK: return 1 try: # XXX log into window wc.update.update(wc.config, BaseUrl) wc.config.write_filterconf() except IOError, msg: self.getApp().error(i18n._("Update Error"), "%s: %s" % (i18n._("Update Error"), msg)) else: if doreload: self.handle(self, MKUINT(ConfWindow.ID_PROXYRELOAD,SEL_COMMAND), None) return 1
|
wc.wstartfunc(handle=self.hWaitStop, confdir=self.configdir,
|
wc.start.wstartfunc(handle=self.hWaitStop, confdir=self.configdir,
|
def SvcDoRun (self): """start this service""" import servicemanager # Log a "started" message to the event log. servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, '')) wc.wstartfunc(handle=self.hWaitStop, confdir=self.configdir, filelogs=self.filelogs) # Now log a "service stopped" message servicemanager.LogMsg( servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STOPPED, (self._svc_name_,''))
|
if self.rulestack and self.rulestack[-1][1][0].match_tag(tag):
|
if self.rulestack and self.rulestack[-1][1][0].match_tag(tag) and \ self.stackcount[-1][0]==tag and self.stackcount[-1][1]<=0: del self.stackcount[-1]
|
def filterEndElement (self, tag): # remember: self.rulestack[-1][1] is the rulelist that # matched for a start tag. and if the first one ([0]) # matches, all other match too if self.rulestack and self.rulestack[-1][1][0].match_tag(tag): pos, rulelist = self.rulestack.pop() for rule in rulelist: if rule.match_complete(pos, self.buf): rule.filter_complete(pos, self.buf) return True return False
|
if image_re.match(urlTuple[3][-4:]): return '%s %s %s' % (method, blocked or self.block_image, 'image/gif')
|
if blocked: doc = blocked elif image_re.match(urlTuple[3][-4:]): doc = self.block_image
|
def doit (self, data, **args): debug(FILTER, "block filter working on %s", `data`) splitted = data.split() if len(splitted)!=3: error(FILTER, "invalid request: %s", `data`) return data method,url,protocol = splitted urlTuple = list(urlparse.urlparse(url)) netloc = urlTuple[1] s = netloc.split(":") if len(s)==2: urlTuple[1:2] = s else: urlTuple[1:2] = [netloc,80] if self.allowed(urlTuple): return data blocked = self.strict_whitelist or self.blocked(urlTuple) if blocked is not None: debug(FILTER, "blocked url %s", url) # index 3, not 2! if image_re.match(urlTuple[3][-4:]): return '%s %s %s' % (method, blocked or self.block_image, 'image/gif') else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? return '%s %s %s' % (method, blocked or self.block_url, 'text/html') return data
|
return '%s %s %s' % (method, blocked or self.block_url, 'text/html')
|
doc = self.block_url return '%s %s HTTP/1.1' % (method, doc)
|
def doit (self, data, **args): debug(FILTER, "block filter working on %s", `data`) splitted = data.split() if len(splitted)!=3: error(FILTER, "invalid request: %s", `data`) return data method,url,protocol = splitted urlTuple = list(urlparse.urlparse(url)) netloc = urlTuple[1] s = netloc.split(":") if len(s)==2: urlTuple[1:2] = s else: urlTuple[1:2] = [netloc,80] if self.allowed(urlTuple): return data blocked = self.strict_whitelist or self.blocked(urlTuple) if blocked is not None: debug(FILTER, "blocked url %s", url) # index 3, not 2! if image_re.match(urlTuple[3][-4:]): return '%s %s %s' % (method, blocked or self.block_image, 'image/gif') else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? return '%s %s %s' % (method, blocked or self.block_url, 'text/html') return data
|
if not self.rulestack:
|
if not self.rulestack and \ (not self.javascript or tag!='script'):
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" rulelist = [] filtered = 0 # default data tobuffer = (STARTTAG, tag, attrs) # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): #debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: tobuffer = rule.filter_tag(tag, attrs) filtered = "True" # give'em a chance to replace more than one attribute if tobuffer[0]==STARTTAG and tobuffer[1]==tag: foo,tag,attrs = tobuffer continue else: break else: #debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buffer) self.rulestack.append((pos, rulelist)) # if its not yet filtered, try filter javascript if filtered: self.buffer_append_data(tobuffer) elif self.javascript: self.jsStartElement(tag, attrs) else: self.buffer.append(tobuffer) # if rule stack is empty, write out the buffered data if not self.rulestack: self.buffer2data()
|
self.jsEnv.executeScriptAsFunction(val)
|
self.jsEnv.executeScriptAsFunction(val, 0.0)
|
def jsPopup (self, attrs, name): """check if attrs[name] javascript opens a popup window""" val = attrs[name] if not val: return self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val) self.jsEnv.detachListener(self) return self.popup_counter
|
pass
|
print >>sys.stderr, "JS:", data
|
def processData (self, data): # XXX pass
|
if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return # XXX
|
|
return "\n".join(HEADERS.getall()) or "-"
|
return "\n".join(wc.proxy.HEADERS.getall()) or "-"
|
def text_headers (): return "\n".join(HEADERS.getall()) or "-"
|
debug(PROXY, '%s <= read %d', str(self), len(data))
|
def handle_read (self): assert self.connected
|
|
klass = wc.filter.rating.storage.pickle.PickleStorage rating_store = wc.filter.rating.storage.get_rating_store(klass)
|
rating_store = wc.filter.rating.get_ratings()
|
def rating_allow (self, url): """ Asks cache if the rule allows the rating data for given url Looks up cache to find rating data, if not returns a MISSING message. """ klass = wc.filter.rating.storage.pickle.PickleStorage rating_store = wc.filter.rating.storage.get_rating_store(klass) # sanitize url url = wc.filter.rating.make_safe_url(url) if url in rating_store: return self.check_against(rating_store[url]) return MISSING
|
if os.name=='nt': macros = [('YY_NO_UNISTD_H', None)] else: macros = []
|
def create_batch_file(self, directory, data, filename): filename = os.path.join(directory, filename) # write the batch file util.execute(write_file, (filename, data), "creating %s" % filename, self.verbose>=1, self.dry_run)
|
|
import wc
|
import wc.configuration
|
def get_wc_config (): """ Get WebCleaner configuration object. """ global _wc_config if _wc_config is None: import wc _wc_config = wc.configuration.init() return _wc_config
|
debug(PROXY, 'Proxy: reserve_server %s %s', str(addr), str(server))
|
debug(PROXY, 'pool reserve %s %s', str(addr), str(server))
|
def reserve_server (self, addr): for server,status in self.map.get(addr, {}).items(): if status[0] == 'available': # Let's reuse this one self.map[addr][server] = ('busy', ) debug(PROXY, 'Proxy: reserve_server %s %s', str(addr), str(server)) return server return None
|
callback(self.hostname, DnsResponse('found', ip_addrs))
|
if self.hostname[-4:] in ('.com','.net') and \ '64.94.110.11' in ip_addrs: callback(self.hostname, DnsResponse('error', 'not found')) else: callback(self.hostname, DnsResponse('found', ip_addrs))
|
def process_read (self): # Assume that the entire answer comes in one packet if self.conntype == 'tcp': if len(self.recv_buffer) < 2: return header = self.recv_buffer[:2] count = dnslib.Lib.unpack16bit(header) if len(self.recv_buffer) < 2+count: return self.read(2) # header data = self.read(count) self.socket.shutdown(1) else: data = self.read(1024)
|
import wc
|
import wc.configuration
|
def _main (): """USAGE: test/run.sh test/getssl.py <https url>""" if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) import wc wc.configuration.config = wc.configuration.init() port = config['port'] sslport = config['sslport'] request(sys.argv[1], sslport) #rawrequest(sys.argv[1], sslport) #rawrequest2(sys.argv[1], sslport) rawrequest3(sys.argv[1], port)
|
port = config['port'] sslport = config['sslport']
|
port = wc.configuration.config['port'] sslport = wc.configuration.config['sslport']
|
def _main (): """USAGE: test/run.sh test/getssl.py <https url>""" if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) import wc wc.configuration.config = wc.configuration.init() port = config['port'] sslport = config['sslport'] request(sys.argv[1], sslport) #rawrequest(sys.argv[1], sslport) #rawrequest2(sys.argv[1], sslport) rawrequest3(sys.argv[1], port)
|
if ctype not in (0, 2): raise IOError("Invalid NTLM challenge type")
|
def get_ntlm_challenge (**attrs): """return initial challenge token for ntlm authentication""" ctype = attrs.get('type', 0) if ctype not in (0, 2): raise IOError("Invalid NTLM challenge type") if ctype==0: # initial challenge return "NTLM" if ctype==2: # after getting first credentials return "NTLM %s" % base64.encodestring(create_message2()).strip()
|
|
if ctype==2:
|
elif ctype==2:
|
def get_ntlm_challenge (**attrs): """return initial challenge token for ntlm authentication""" ctype = attrs.get('type', 0) if ctype not in (0, 2): raise IOError("Invalid NTLM challenge type") if ctype==0: # initial challenge return "NTLM" if ctype==2: # after getting first credentials return "NTLM %s" % base64.encodestring(create_message2()).strip()
|
if ctype not in (1, 3): raise IOError("Invalid NTLM credentials type")
|
def get_ntlm_credentials (challenge, **attrs): ctype = attrs.get('type', 1) if ctype not in (1, 3): raise IOError("Invalid NTLM credentials type") if ctype==1: msg = create_message1() elif ctype==3: nonce = attrs['nonce'] domain = attrs['domain'] username = attrs['username'] host = attrs['host'] msg = create_message3(nonce, domain, username, host) return "NTLM %s" % base64.encodestring(msg).strip()
|
|
def get_ntlm_type3_message (**attrs):
|
def get_ntlm_type3_message (**attrs): # extract the required attributes
|
|
lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1]
|
def _traceit (frame, event, arg): """ Print current executed line. """ if event == "line": lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] name = frame.f_globals["__name__"] line = linecache.getline(filename, lineno) info(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) return _traceit
|
|
line = linecache.getline(filename, lineno) info(tracelog, "%s:%s: %s", name, lineno, line.rstrip())
|
if name not in _trace_ignore: lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] line = linecache.getline(filename, lineno) print "THREAD(%d) %s:%d: %s" % \ (_thread.get_ident(), name, lineno, line.rstrip())
|
def _traceit (frame, event, arg): """ Print current executed line. """ if event == "line": lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] name = frame.f_globals["__name__"] line = linecache.getline(filename, lineno) info(tracelog, "%s:%s: %s", name, lineno, line.rstrip()) return _traceit
|
parts = wc.filter.rating.split_url(url)
|
parts = split_url(url)
|
def make_safe_url (url): """Remove unsafe parts of url for rating cache check.""" parts = wc.filter.rating.split_url(url) pathparts = [make_safe_part(x) for x in parts[2:]] pathparts[0:2] = parts[0:2] return "".join(pathparts)
|
if not wc.url.is_safe_url(url): raise ValueError("Invalid rating url %r." % url)
|
if wc.url.is_safe_url(url): return url return make_safe_url(url)
|
def check_url (self, url): """If url is not safe raise a ValueError.""" if not wc.url.is_safe_url(url): raise ValueError("Invalid rating url %r." % url)
|
self.check_url(url)
|
url = self.check_url(url)
|
def __setitem__ (self, url, rating): """Add rating for given url.""" self.check_url(url) self.cache[url] = rating
|
self.check_url(url)
|
url = self.check_url(url)
|
def __getitem__ (self, url): """Get rating for given url.""" self.check_url(url) # use a specialized form of longest prefix matching: # split the url in parts and the longest matching part wins parts = split_url(url) # the range selects from all parts (full url) down to the first two parts for i in range(len(parts), 1, -1): url = "".join(parts[:i]) if url in self.cache: return self.cache[url] raise KeyError(url)
|
def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) urlparts[0] = urllib.unquote(urlparts[0]).lower() is_idn = url_fix_host(urlparts)
|
def url_fix_mailto_urlsplit (urlparts): """Split query part of mailto url if found.""" if "?" in urlparts[2]: urlparts[2], urlparts[3] = urlparts[2].split('?', 1) def url_parse_query (query): """Parse and re-join the given CGI query."""
|
def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query l = [] for k, v in parse_qsl(urlparts[3], True): k = urllib.quote(k, '/-:,') if v: v = urllib.quote(v, '/-:,') l.append("%s=%s" % (k, v)) elif v is None: l.append(k) else: # some sites do not work when the equal sign is missing l.append("%s=" % k) urlparts[3] = '&'.join(l) if not urlparts[2]: # empty path should be a slash, but not in certain schemes # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and urlparts[0] not in urlparse.non_hierarchical: urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn)
|
for k, v in parse_qsl(urlparts[3], True):
|
for k, v in parse_qsl(query, True):
|
def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query l = [] for k, v in parse_qsl(urlparts[3], True): k = urllib.quote(k, '/-:,') if v: v = urllib.quote(v, '/-:,') l.append("%s=%s" % (k, v)) elif v is None: l.append(k) else: # some sites do not work when the equal sign is missing l.append("%s=" % k) urlparts[3] = '&'.join(l) if not urlparts[2]: # empty path should be a slash, but not in certain schemes # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and urlparts[0] not in urlparse.non_hierarchical: urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn)
|
urlparts[3] = '&'.join(l)
|
return '&'.join(l) def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) urlparts[0] = urllib.unquote(urlparts[0]).lower() if urlparts[0] == 'mailto': url_fix_mailto_urlsplit(urlparts) is_idn = url_fix_host(urlparts) urlparts[3] = url_parse_query(urlparts[3])
|
def url_norm (url): """Normalize the given URL which must be quoted. Supports unicode hostnames (IDNA encoding) according to RFC 3490. @return (normed url, idna flag) """ urlparts = list(urlparse.urlsplit(url)) # scheme urlparts[0] = urllib.unquote(urlparts[0]).lower() # host (with path or query side effects) is_idn = url_fix_host(urlparts) # query l = [] for k, v in parse_qsl(urlparts[3], True): k = urllib.quote(k, '/-:,') if v: v = urllib.quote(v, '/-:,') l.append("%s=%s" % (k, v)) elif v is None: l.append(k) else: # some sites do not work when the equal sign is missing l.append("%s=" % k) urlparts[3] = '&'.join(l) if not urlparts[2]: # empty path should be a slash, but not in certain schemes # note that in relative links, urlparts[0] might be empty # in this case, do not make any assumptions if urlparts[0] and urlparts[0] not in urlparse.non_hierarchical: urlparts[2] = '/' else: # fix redundant path parts urlparts[2] = collapse_segments(urlparts[2]) # quote parts again urlparts[0] = urllib.quote(urlparts[0]) # scheme urlparts[1] = urllib.quote(urlparts[1], '@:') # host urlparts[2] = urllib.quote(urlparts[2], _nopathquote_chars) # path res = urlparse.urlunsplit(urlparts) if url.endswith('#') and not urlparts[4]: # re-append trailing empty fragment res += '#' return (res, is_idn)
|
return match_host(spliturl(url)[1], domainlist)
|
return match_host(url_split(url)[1], domainlist)
|
def match_url (url, domainlist): """return True if host part of url matches an entry in given domain list""" if not url: return False return match_host(spliturl(url)[1], domainlist)
|
s2 = "11 22%(sep)s33 44%(sep)s55" % {'sep': os.linesep}
|
l = len(os.linesep) gap = " "*l s2 = "11%(gap)s22%(sep)s33%(gap)s44%(sep)s55" % \ {'sep': os.linesep, 'gap': gap}
|
def test_wrap (self): """test line wrapping""" s = "11%(sep)s22%(sep)s33%(sep)s44%(sep)s55" % {'sep': os.linesep} # testing width <= 0 self.assertEquals(wc.strformat.wrap(s, -1), s) self.assertEquals(wc.strformat.wrap(s, 0), s) s2 = "11 22%(sep)s33 44%(sep)s55" % {'sep': os.linesep} # splitting lines self.assertEquals(wc.strformat.wrap(s2, 2), s) # combining lines self.assertEquals(wc.strformat.wrap(s, 5), s2)
|
self.assertEquals(wc.strformat.wrap(s, 5), s2)
|
self.assertEquals(wc.strformat.wrap(s, 4+l), s2)
|
def test_wrap (self): """test line wrapping""" s = "11%(sep)s22%(sep)s33%(sep)s44%(sep)s55" % {'sep': os.linesep} # testing width <= 0 self.assertEquals(wc.strformat.wrap(s, -1), s) self.assertEquals(wc.strformat.wrap(s, 0), s) s2 = "11 22%(sep)s33 44%(sep)s55" % {'sep': os.linesep} # splitting lines self.assertEquals(wc.strformat.wrap(s2, 2), s) # combining lines self.assertEquals(wc.strformat.wrap(s, 5), s2)
|
def p (path):
|
def normpath (path):
|
def p (path): """norm a path name to platform specific notation""" return os.path.normpath(path)
|
os.path.normcase(os.path.join(base, 'config')))
|
cnormpath(os.path.join(base, 'config')))
|
def run (self): super(MyInstall, self).run() # we have to write a configuration file because we need the # <install_data> directory (and other stuff like author, url, ...) data = [] for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']: attr = 'install_%s'%d if self.root: # cut off root path prefix cutoff = len(self.root) # don't strip the path separator if self.root.endswith(os.sep): cutoff -= 1 val = getattr(self, attr)[cutoff:] else: val = getattr(self, attr) if attr=="install_data": base = os.path.join(val, 'share', 'webcleaner') data.append('config_dir = %r' % \ os.path.normcase(os.path.join(base, 'config'))) data.append('template_dir = %r' % \ os.path.normcase(os.path.join(base, 'templates'))) data.append("%s = %r" % (attr, val)) self.distribution.create_conf_file(data, directory=self.install_lib)
|
os.path.normcase(os.path.join(base, 'templates')))
|
cnormpath(os.path.join(base, 'templates'))) val = cnormpath(val)
|
def run (self): super(MyInstall, self).run() # we have to write a configuration file because we need the # <install_data> directory (and other stuff like author, url, ...) data = [] for d in ['purelib', 'platlib', 'lib', 'headers', 'scripts', 'data']: attr = 'install_%s'%d if self.root: # cut off root path prefix cutoff = len(self.root) # don't strip the path separator if self.root.endswith(os.sep): cutoff -= 1 val = getattr(self, attr)[cutoff:] else: val = getattr(self, attr) if attr=="install_data": base = os.path.join(val, 'share', 'webcleaner') data.append('config_dir = %r' % \ os.path.normcase(os.path.join(base, 'config'))) data.append('template_dir = %r' % \ os.path.normcase(os.path.join(base, 'templates'))) data.append("%s = %r" % (attr, val)) self.distribution.create_conf_file(data, directory=self.install_lib)
|
bv = "7.1"
|
def get_exe_bytes (self): if win_cross_compiling: bv = "7.1" # wininst-x.y.exe is in the same directory as bdist_wininst directory = os.path.dirname(distutils.command.__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. filename = os.path.join(directory, "wininst-%s.exe" % bv) return open(filename, "rb").read() return super(MyBdistWininst, self).get_exe_bytes()
|
|
filename = os.path.join(directory, "wininst-%s.exe" % bv)
|
filename = os.path.join(directory, "wininst.exe")
|
def get_exe_bytes (self): if win_cross_compiling: bv = "7.1" # wininst-x.y.exe is in the same directory as bdist_wininst directory = os.path.dirname(distutils.command.__file__) # we must use a wininst-x.y.exe built with the same C compiler # used for python. filename = os.path.join(directory, "wininst-%s.exe" % bv) return open(filename, "rb").read() return super(MyBdistWininst, self).get_exe_bytes()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.