rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
data.append(v) | dataappend(v) | def _mk_bitmap(bits): data = [] if _sre.CODESIZE == 2: start = (1, 0) else: start = (1L, 0L) m, v = start for c in bits: if c: v = v + m m = m << 1 if m > MAXCODE: data.append(v) m, v = start return data |
for i in range(fixup(av[0]), fixup(av[1])+1): | for i in xrange(fixup(av[0]), fixup(av[1])+1): | def _optimize_unicode(charset, fixup): try: import array except ImportError: return charset charmap = [False]*65536 negate = 0 try: for op, av in charset: if op is NEGATE: negate = 1 elif op is LITERAL: charmap[fixup(av)] = True elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = True elif op is CATEGORY: # XXX: could expand category return charset # cannot compress except IndexError: # non-BMP characters return charset if negate: if sys.maxunicode != 65535: # XXX: negation does not work with big charsets return charset for i in range(65536): charmap[i] = not charmap[i] comps = {} mapping = [0]*256 block = 0 data = [] for i in range(256): chunk = tuple(charmap[i*256:(i+1)*256]) new = comps.setdefault(chunk, block) mapping[i] = new if new == block: block = block + 1 data = data + _mk_bitmap(chunk) header = [block] if MAXCODE == 65535: code = 'H' else: code = 'L' # Convert block indices to byte array of 256 bytes mapping = array.array('b', mapping).tostring() # Convert byte array to word array header = header + array.array(code, mapping).tolist() data[0:0] = header return [(BIGCHARSET, data)] |
for i in range(65536): | for i in xrange(65536): | def _optimize_unicode(charset, fixup): try: import array except ImportError: return charset charmap = [False]*65536 negate = 0 try: for op, av in charset: if op is NEGATE: negate = 1 elif op is LITERAL: charmap[fixup(av)] = True elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = True elif op is CATEGORY: # XXX: could expand category return charset # cannot compress except IndexError: # non-BMP characters return charset if negate: if sys.maxunicode != 65535: # XXX: negation does not work with big charsets return charset for i in range(65536): charmap[i] = not charmap[i] comps = {} mapping = [0]*256 block = 0 data = [] for i in range(256): chunk = tuple(charmap[i*256:(i+1)*256]) new = comps.setdefault(chunk, block) mapping[i] = new if new == block: block = block + 1 data = data + _mk_bitmap(chunk) header = [block] if MAXCODE == 65535: code = 'H' else: code = 'L' # Convert block indices to byte array of 256 bytes mapping = array.array('b', mapping).tostring() # Convert byte array to word array header = header + array.array(code, mapping).tolist() data[0:0] = header return [(BIGCHARSET, data)] |
for i in range(256): | for i in xrange(256): | def _optimize_unicode(charset, fixup): try: import array except ImportError: return charset charmap = [False]*65536 negate = 0 try: for op, av in charset: if op is NEGATE: negate = 1 elif op is LITERAL: charmap[fixup(av)] = True elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = True elif op is CATEGORY: # XXX: could expand category return charset # cannot compress except IndexError: # non-BMP characters return charset if negate: if sys.maxunicode != 65535: # XXX: negation does not work with big charsets return charset for i in range(65536): charmap[i] = not charmap[i] comps = {} mapping = [0]*256 block = 0 data = [] for i in range(256): chunk = tuple(charmap[i*256:(i+1)*256]) new = comps.setdefault(chunk, block) mapping[i] = new if new == block: block = block + 1 data = data + _mk_bitmap(chunk) header = [block] if MAXCODE == 65535: code = 'H' else: code = 'L' # Convert block indices to byte array of 256 bytes mapping = array.array('b', mapping).tostring() # Convert byte array to word array header = header + array.array(code, mapping).tolist() data[0:0] = header return [(BIGCHARSET, data)] |
prefix.append(av) | prefixappend(av) | def _compile_info(code, pattern, flags): # internal: compile an info block. in the current version, # this contains min/max pattern width, and an optional literal # prefix or a character map lo, hi = pattern.getwidth() if lo == 0: return # not worth it # look for a literal prefix prefix = [] prefix_skip = 0 charset = [] # not used if not (flags & SRE_FLAG_IGNORECASE): # look for literal prefix for op, av in pattern.data: if op is LITERAL: if len(prefix) == prefix_skip: prefix_skip = prefix_skip + 1 prefix.append(av) elif op is SUBPATTERN and len(av[1]) == 1: op, av = av[1][0] if op is LITERAL: prefix.append(av) else: break else: break # if no prefix, look for charset prefix if not prefix and pattern.data: op, av = pattern.data[0] if op is SUBPATTERN and av[1]: op, av = av[1][0] if op is LITERAL: charset.append((op, av)) elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is IN: charset = av |
charset.append((op, av)) | charsetappend((op, av)) | def _compile_info(code, pattern, flags): # internal: compile an info block. in the current version, # this contains min/max pattern width, and an optional literal # prefix or a character map lo, hi = pattern.getwidth() if lo == 0: return # not worth it # look for a literal prefix prefix = [] prefix_skip = 0 charset = [] # not used if not (flags & SRE_FLAG_IGNORECASE): # look for literal prefix for op, av in pattern.data: if op is LITERAL: if len(prefix) == prefix_skip: prefix_skip = prefix_skip + 1 prefix.append(av) elif op is SUBPATTERN and len(av[1]) == 1: op, av = av[1][0] if op is LITERAL: prefix.append(av) else: break else: break # if no prefix, look for charset prefix if not prefix and pattern.data: op, av = pattern.data[0] if op is SUBPATTERN and av[1]: op, av = av[1][0] if op is LITERAL: charset.append((op, av)) elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is IN: charset = av |
c.append((op, av)) | cappend((op, av)) | def _compile_info(code, pattern, flags): # internal: compile an info block. in the current version, # this contains min/max pattern width, and an optional literal # prefix or a character map lo, hi = pattern.getwidth() if lo == 0: return # not worth it # look for a literal prefix prefix = [] prefix_skip = 0 charset = [] # not used if not (flags & SRE_FLAG_IGNORECASE): # look for literal prefix for op, av in pattern.data: if op is LITERAL: if len(prefix) == prefix_skip: prefix_skip = prefix_skip + 1 prefix.append(av) elif op is SUBPATTERN and len(av[1]) == 1: op, av = av[1][0] if op is LITERAL: prefix.append(av) else: break else: break # if no prefix, look for charset prefix if not prefix and pattern.data: op, av = pattern.data[0] if op is SUBPATTERN and av[1]: op, av = av[1][0] if op is LITERAL: charset.append((op, av)) elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is IN: charset = av |
for i in range(len(prefix)): | for i in xrange(len(prefix)): | def _compile_info(code, pattern, flags): # internal: compile an info block. in the current version, # this contains min/max pattern width, and an optional literal # prefix or a character map lo, hi = pattern.getwidth() if lo == 0: return # not worth it # look for a literal prefix prefix = [] prefix_skip = 0 charset = [] # not used if not (flags & SRE_FLAG_IGNORECASE): # look for literal prefix for op, av in pattern.data: if op is LITERAL: if len(prefix) == prefix_skip: prefix_skip = prefix_skip + 1 prefix.append(av) elif op is SUBPATTERN and len(av[1]) == 1: op, av = av[1][0] if op is LITERAL: prefix.append(av) else: break else: break # if no prefix, look for charset prefix if not prefix and pattern.data: op, av = pattern.data[0] if op is SUBPATTERN and av[1]: op, av = av[1][0] if op is LITERAL: charset.append((op, av)) elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is BRANCH: c = [] for p in av[1]: if not p: break op, av = p[0] if op is LITERAL: c.append((op, av)) else: break else: charset = c elif op is IN: charset = av |
exit_status = not main() | exit_status = int(not main()) | def main(): """Script main program.""" import getopt try: opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:') except getopt.error, msg: print msg print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \ "[-x regexp] [directory ...]" print "-l: don't recurse down" print "-f: force rebuild even if timestamps are up-to-date" print "-q: quiet operation" print "-d destdir: purported directory name for error messages" print " if no directory arguments, -l sys.path is assumed" print "-x regexp: skip files matching the regular expression regexp" print " the regexp is search for in the full path of the file" sys.exit(2) maxlevels = 10 ddir = None force = 0 quiet = 0 rx = None for o, a in opts: if o == '-l': maxlevels = 0 if o == '-d': ddir = a if o == '-f': force = 1 if o == '-q': quiet = 1 if o == '-x': import re rx = re.compile(a) if ddir: if len(args) != 1: print "-d destdir require exactly one directory argument" sys.exit(2) success = 1 try: if args: for dir in args: if not compile_dir(dir, maxlevels, ddir, force, rx, quiet): success = 0 else: success = compile_path() except KeyboardInterrupt: print "\n[interrupt]" success = 0 return success |
initial_slash = (path[0] == '/') | initial_slashes = path.startswith('/') if (initial_slashes and path.startswith('//') and not path.startswith('///')): initial_slashes = 2 | def normpath(path): """Normalize path, eliminating double slashes, etc.""" if path == '': return '.' initial_slash = (path[0] == '/') comps = path.split('/') new_comps = [] for comp in comps: if comp in ('', '.'): continue if (comp != '..' or (not initial_slash and not new_comps) or (new_comps and new_comps[-1] == '..')): new_comps.append(comp) elif new_comps: new_comps.pop() comps = new_comps path = '/'.join(comps) if initial_slash: path = '/' + path return path or '.' |
if (comp != '..' or (not initial_slash and not new_comps) or | if (comp != '..' or (not initial_slashes and not new_comps) or | def normpath(path): """Normalize path, eliminating double slashes, etc.""" if path == '': return '.' initial_slash = (path[0] == '/') comps = path.split('/') new_comps = [] for comp in comps: if comp in ('', '.'): continue if (comp != '..' or (not initial_slash and not new_comps) or (new_comps and new_comps[-1] == '..')): new_comps.append(comp) elif new_comps: new_comps.pop() comps = new_comps path = '/'.join(comps) if initial_slash: path = '/' + path return path or '.' |
if initial_slash: path = '/' + path | if initial_slashes: path = '/'*initial_slashes + path | def normpath(path): """Normalize path, eliminating double slashes, etc.""" if path == '': return '.' initial_slash = (path[0] == '/') comps = path.split('/') new_comps = [] for comp in comps: if comp in ('', '.'): continue if (comp != '..' or (not initial_slash and not new_comps) or (new_comps and new_comps[-1] == '..')): new_comps.append(comp) elif new_comps: new_comps.pop() comps = new_comps path = '/'.join(comps) if initial_slash: path = '/' + path return path or '.' |
'db4': {'libs': ('db-4.1', 'db-4.0',), | 'db4': {'libs': ('db-4.1', 'db41', 'db-4.0', 'db4',), | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
'db3': {'libs': ('db-3.3', 'db-3.2', 'db-3.1'), | 'db3': {'libs': ('db-3.3', 'db-3.2', 'db-3.1', 'db3',), | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
def _cmp(a, b): | def _cmp(a, b, sh, st): | def _cmp(a, b): try: return not abs(cmp(a, b)) except os.error: return 2 |
return not abs(cmp(a, b)) | return not abs(cmp(a, b, sh, st)) | def _cmp(a, b): try: return not abs(cmp(a, b)) except os.error: return 2 |
import tempfile try: execv(tempfile.mktemp(), ('blah',)) except error, _notfound: pass | if sys.platform[:4] == 'beos': try: unlink('/_ except error, _notfound: pass else: import tempfile t = tempfile.mktemp() try: execv(t, ('blah',)) except error, _notfound: pass | def _execvpe(file, args, env=None): if env is not None: func = execve argrest = (args, env) else: func = execv argrest = (args,) env = environ global _notfound head, tail = path.split(file) if head: apply(func, (file,) + argrest) return if env.has_key('PATH'): envpath = env['PATH'] else: envpath = defpath PATH = envpath.split(pathsep) if not _notfound: import tempfile # Exec a file that is guaranteed not to exist try: execv(tempfile.mktemp(), ('blah',)) except error, _notfound: pass exc, arg = error, _notfound for dir in PATH: fullname = path.join(dir, file) try: apply(func, (fullname,) + argrest) except error, (errno, msg): if errno != arg[0]: exc, arg = error, (errno, msg) raise exc, arg |
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0) if flags >= 0: | try: flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0) except IOError: pass else: | def _set_cloexec(fd): flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0) if flags >= 0: # flags read successfully, modify flags |= _fcntl.FD_CLOEXEC _fcntl.fcntl(fd, _fcntl.F_SETFD, flags) |
def extended_linecache_checkcache(orig_checkcache=linecache.checkcache): | def extended_linecache_checkcache(filename=None, orig_checkcache=linecache.checkcache): | def extended_linecache_checkcache(orig_checkcache=linecache.checkcache): """Extend linecache.checkcache to preserve the <pyshell#...> entries Rather than repeating the linecache code, patch it to save the pyshell# entries, call the original linecache.checkcache(), and then restore the saved entries. Assigning the orig_checkcache keyword arg freezes its value at definition time to the (original) method linecache.checkcache(), i.e. makes orig_checkcache lexical. """ cache = linecache.cache save = {} for filename in cache.keys(): if filename[:1] + filename[-1:] == '<>': save[filename] = cache[filename] orig_checkcache() cache.update(save) |
Rather than repeating the linecache code, patch it to save the pyshell entries, call the original linecache.checkcache(), and then restore the saved entries. Assigning the orig_checkcache keyword arg freezes its value at definition time to the (original) method linecache.checkcache(), i.e. makes orig_checkcache lexical. | Rather than repeating the linecache code, patch it to save the <pyshell (which destroys them), and then restore the saved entries. orig_checkcache is bound at definition time to the original method, allowing it to be patched. | def extended_linecache_checkcache(orig_checkcache=linecache.checkcache): """Extend linecache.checkcache to preserve the <pyshell#...> entries Rather than repeating the linecache code, patch it to save the pyshell# entries, call the original linecache.checkcache(), and then restore the saved entries. Assigning the orig_checkcache keyword arg freezes its value at definition time to the (original) method linecache.checkcache(), i.e. makes orig_checkcache lexical. """ cache = linecache.cache save = {} for filename in cache.keys(): if filename[:1] + filename[-1:] == '<>': save[filename] = cache[filename] orig_checkcache() cache.update(save) |
inc_dir = os.curdir | inc_dir = argv0_path | def get_config_h_filename(): """Return full pathname of installed pyconfig.h file.""" if python_build: inc_dir = os.curdir else: inc_dir = get_python_inc(plat_specific=1) if get_python_version() < '2.2': config_h = 'config.h' else: # The name of the config.h file changed in 2.2 config_h = 'pyconfig.h' return os.path.join(inc_dir, config_h) |
files=Open(**options).show() return files.split() | return Open(**options).show() | def askopenfilenames(**options): """Ask for multiple filenames to open Returns a list of filenames or empty list if cancel button selected """ options["multiple"]=1 files=Open(**options).show() return files.split() |
import traceback print "Exception in Tkinter callback" | import traceback, sys sys.stderr.write("Exception in Tkinter callback\n") | def report_callback_exception(self, exc, val, tb): import traceback print "Exception in Tkinter callback" traceback.print_exception(exc, val, tb) |
if not _default_root: _default_root = master | def _setup(self, master, cnf): if _support_default_root: global _default_root if not master: if not _default_root: _default_root = Tk() master = _default_root if not _default_root: _default_root = master self.master = master self.tk = master.tk name = None if cnf.has_key('name'): name = cnf['name'] del cnf['name'] if not name: name = `id(self)` self._name = name if master._w=='.': self._w = '.' + name else: self._w = master._w + '.' + name self.children = {} if self.master.children.has_key(self._name): self.master.children[self._name].destroy() self.master.children[self._name] = self |
|
self._addkey(key, (pos, siz)) | def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" if not self._index.has_key(key): (pos, siz) = self._addval(val) self._addkey(key, (pos, siz)) else: pos, siz = self._index[key] oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE if newblocks <= oldblocks: pos, siz = self._setval(pos, val) self._index[key] = pos, siz else: pos, siz = self._addval(val) self._index[key] = pos, siz self._addkey(key, (pos, siz)) |
|
if object < 0xff: | if object <= 0xff: | def save_int(self, object, pack=struct.pack): if self.bin: # If the int is small enough to fit in a signed 4-byte 2's-comp # format, we can store it more efficiently than the general # case. # First one- and two-byte unsigned ints: if object >= 0: if object < 0xff: self.write(BININT1 + chr(object)) return if object < 0xffff: self.write(BININT2 + chr(object&0xff) + chr(object>>8)) return # Next check for 4-byte signed ints: high_bits = object >> 31 # note that Python shift sign-extends if high_bits == 0 or high_bits == -1: # All high bits are copies of bit 2**31, so the value # fits in a 4-byte signed int. self.write(BININT + pack("<i", object)) return # Text pickle, or int too big to fit in signed 4-byte format. self.write(INT + `object` + '\n') |
if object < 0xffff: | if object <= 0xffff: | def save_int(self, object, pack=struct.pack): if self.bin: # If the int is small enough to fit in a signed 4-byte 2's-comp # format, we can store it more efficiently than the general # case. # First one- and two-byte unsigned ints: if object >= 0: if object < 0xff: self.write(BININT1 + chr(object)) return if object < 0xffff: self.write(BININT2 + chr(object&0xff) + chr(object>>8)) return # Next check for 4-byte signed ints: high_bits = object >> 31 # note that Python shift sign-extends if high_bits == 0 or high_bits == -1: # All high bits are copies of bit 2**31, so the value # fits in a 4-byte signed int. self.write(BININT + pack("<i", object)) return # Text pickle, or int too big to fit in signed 4-byte format. self.write(INT + `object` + '\n') |
assert flags == 0 | def search(pattern, string, flags=0): assert flags == 0 return compile(pattern, _fixflags(flags)).search(string) |
|
if isabs(b): path = b elif path == '' or path[-1:] in '/\\:': path = path + b else: path = path + "\\" + b | if len(path) == 2 and path[-1] == ":" and splitdrive(b)[0] == "": pass elif isabs(b) or path == "": path = "" elif path[-1:] not in "/\\": b = "\\" + b path += b | def join(a, *p): """Join two or more pathname components, inserting "\\" as needed""" path = a for b in p: if isabs(b): path = b elif path == '' or path[-1:] in '/\\:': path = path + b else: path = path + "\\" + b return path |
def triplet_to_pmwrgb(rgbtuple): | def triplet_to_fractional_rgb(rgbtuple): | def triplet_to_pmwrgb(rgbtuple): return map(operator.__div__, rgbtuple, _maxtuple) |
target = 'snow' red, green, blue = colordb.find_byname(target) print target, ':', red, green, blue, hex(rrggbb) name, aliases = colordb.find_byrgb((red, green, blue)) | red, green, blue = rgbtuple = colordb.find_byname(target) print target, ':', red, green, blue, triplet_to_rrggbb(rgbtuple) name, aliases = colordb.find_byrgb(rgbtuple) | def triplet_to_pmwrgb(rgbtuple): return map(operator.__div__, rgbtuple, _maxtuple) |
nearest = apply(colordb.nearest, target) | nearest = colordb.nearest(target) | def triplet_to_pmwrgb(rgbtuple): return map(operator.__div__, rgbtuple, _maxtuple) |
methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) else: return self.escape(cram(stripid(repr(x)), self.maxother)) | if hasattr(type(x), '__name__'): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) return self.escape(cram(stripid(repr(x)), self.maxother)) | def repr1(self, x, level): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) else: return self.escape(cram(stripid(repr(x)), self.maxother)) |
methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) else: return cram(stripid(repr(x)), self.maxother) | if hasattr(type(x), '__name__'): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) return cram(stripid(repr(x)), self.maxother) | def repr1(self, x, level): methodname = 'repr_' + join(split(type(x).__name__), '_') if hasattr(self, methodname): return getattr(self, methodname)(x, level) else: return cram(stripid(repr(x)), self.maxother) |
errors.append((srcname, dstname, why)) | errors.append((srcname, dstname, str(why))) | def copytree(src, dst, symlinks=False): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. XXX Consider this example code rather than the ultimate tool. """ names = os.listdir(src) os.makedirs(dst) errors = [] for name in names: srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks) else: copy2(srcname, dstname) # XXX What about devices, sockets etc.? except (IOError, os.error), why: errors.append((srcname, dstname, why)) # catch the Error from the recursive copytree so that we can # continue with other files except Error, err: errors.extend(err.args[0]) copystat(src, dst) if errors: raise Error, errors |
(self.__class__, self.testsRun, len(self.errors), | (_strclass(self.__class__), self.testsRun, len(self.errors), | def __repr__(self): return "<%s run=%i errors=%i failures=%i>" % \ (self.__class__, self.testsRun, len(self.errors), len(self.failures)) |
return "%s.%s" % (self.__class__, self.__testMethodName) | return "%s.%s" % (_strclass(self.__class__), self.__testMethodName) | def id(self): return "%s.%s" % (self.__class__, self.__testMethodName) |
(self.__class__, self.__testMethodName) | (_strclass(self.__class__), self.__testMethodName) | def __repr__(self): return "<%s testMethod=%s>" % \ (self.__class__, self.__testMethodName) |
return "<%s tests=%s>" % (self.__class__, self._tests) | return "<%s tests=%s>" % (_strclass(self.__class__), self._tests) | def __repr__(self): return "<%s tests=%s>" % (self.__class__, self._tests) |
return "%s (%s)" % (self.__class__, self.__testFunc.__name__) | return "%s (%s)" % (_strclass(self.__class__), self.__testFunc.__name__) | def __str__(self): return "%s (%s)" % (self.__class__, self.__testFunc.__name__) |
return "<%s testFunc=%s>" % (self.__class__, self.__testFunc) | return "<%s testFunc=%s>" % (_strclass(self.__class__), self.__testFunc) | def __repr__(self): return "<%s testFunc=%s>" % (self.__class__, self.__testFunc) |
"hooktestpackage.sub.subber": (False, test_co), | "hooktestpackage.sub.subber": (True, test_co), | def find_module(self, fullname, path=None): self.imports.append(fullname) return None |
inst.poll() | if inst.poll(_deadstate=sys.maxint) >= 0: try: _active.remove(inst) except ValueError: pass | def _cleanup(): for inst in _active[:]: inst.poll() |
_active.append(self) | def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0): """Create new Popen instance.""" _cleanup() |
|
def poll(self): | def poll(self, _deadstate=None): | def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode |
_active.remove(self) | def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0: self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode |
|
_active.remove(self) | def wait(self): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is None: obj = WaitForSingleObject(self._handle, INFINITE) self.returncode = GetExitCodeProcess(self._handle) _active.remove(self) return self.returncode |
|
_active.remove(self) def poll(self): | def poll(self, _deadstate=None): | def _handle_exitstatus(self, sts): if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) elif os.WIFEXITED(sts): self.returncode = os.WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") |
pass | if _deadstate is not None: self.returncode = _deadstate | def poll(self): """Check if child process has terminated. Returns returncode attribute.""" if self.returncode is None: try: pid, sts = os.waitpid(self.pid, os.WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except os.error: pass return self.returncode |
interp = interp[:-5] = interp[-4:] | interp = interp[:-5] + interp[-4:] | def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info i = rest.rfind('?') if i >= 0: rest, query = rest[:i], rest[i+1:] else: query = '' i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error(404, "No such CGI script (%s)" % `scriptname`) return if not os.path.isfile(scriptfile): self.send_error(403, "CGI script is not a plain file (%s)" % `scriptname`) return ispy = self.is_python(scriptname) if not ispy: if not (self.have_fork or self.have_popen2): self.send_error(403, "CGI script is not a Python script (%s)" % `scriptname`) return if not self.is_executable(scriptfile): self.send_error(403, "CGI script is not executable (%s)" % `scriptname`) return |
self.scriptsfolder = fss.NewAlias() | def makeusermenus(self): m = Wapplication.Menu(self.menubar, "File") newitem = FrameWork.MenuItem(m, "New", "N", 'new') openitem = FrameWork.MenuItem(m, "Open", "O", 'open') FrameWork.Separator(m) closeitem = FrameWork.MenuItem(m, "Close", "W", 'close') saveitem = FrameWork.MenuItem(m, "Save", "S", 'save') saveasitem = FrameWork.MenuItem(m, "Save as", None, 'save_as') FrameWork.Separator(m) saveasappletitem = FrameWork.MenuItem(m, "Save as Applet", None, 'save_as_applet') FrameWork.Separator(m) quititem = FrameWork.MenuItem(m, "Quit", "Q", 'quit') m = Wapplication.Menu(self.menubar, "Edit") undoitem = FrameWork.MenuItem(m, "Undo", 'Z', "undo") FrameWork.Separator(m) cutitem = FrameWork.MenuItem(m, "Cut", 'X', "cut") copyitem = FrameWork.MenuItem(m, "Copy", "C", "copy") pasteitem = FrameWork.MenuItem(m, "Paste", "V", "paste") FrameWork.MenuItem(m, "Clear", None, "clear") FrameWork.Separator(m) selallitem = FrameWork.MenuItem(m, "Select all", "A", "selectall") sellineitem = FrameWork.MenuItem(m, "Select line", "L", "selectline") FrameWork.Separator(m) finditem = FrameWork.MenuItem(m, "Find", "F", "find") findagainitem = FrameWork.MenuItem(m, "Find again", 'G', "findnext") enterselitem = FrameWork.MenuItem(m, "Enter search string", "E", "entersearchstring") replaceitem = FrameWork.MenuItem(m, "Replace", None, "replace") replacefinditem = FrameWork.MenuItem(m, "Replace & find again", 'T', "replacefind") FrameWork.Separator(m) shiftleftitem = FrameWork.MenuItem(m, "Shift left", "[", "shiftleft") shiftrightitem = FrameWork.MenuItem(m, "Shift right", "]", "shiftright") m = Wapplication.Menu(self.menubar, "Python") runitem = FrameWork.MenuItem(m, "Run window", "R", 'run') runselitem = FrameWork.MenuItem(m, "Run selection", None, 'runselection') FrameWork.Separator(m) moditem = FrameWork.MenuItem(m, "Module browser", "M", self.domenu_modulebrowser) FrameWork.Separator(m) mm = FrameWork.SubMenu(m, "Preferences") FrameWork.MenuItem(mm, "Set Scripts folder", None, self.do_setscriptsfolder) FrameWork.MenuItem(mm, "Editor default settings", None, self.do_editorprefs) self.openwindowsmenu = Wapplication.Menu(self.menubar, 'Windows') self.makeopenwindowsmenu() self._menustocheck = [closeitem, saveitem, saveasitem, saveasappletitem, undoitem, cutitem, copyitem, pasteitem, selallitem, sellineitem, finditem, findagainitem, enterselitem, replaceitem, replacefinditem, shiftleftitem, shiftrightitem, runitem, runselitem] prefs = self.getprefs() try: fss, fss_changed = macfs.RawAlias(prefs.scriptsfolder).Resolve() except: path = os.path.join(os.getcwd(), 'Scripts') if not os.path.exists(path): os.mkdir(path) fss = macfs.FSSpec(path) self.scriptsfolder = fss.NewAlias() self.scriptsfoldermodtime = fss.GetDates()[1] else: self.scriptsfolder = fss.NewAlias() self.scriptsfoldermodtime = fss.GetDates()[1] prefs.scriptsfolder = self.scriptsfolder.data self._scripts = {} self.scriptsmenu = None self.makescriptsmenu() |
|
if verbose: if output: print "%s %% %s =? %s ..." %\ (repr(formatstr), repr(args), repr(output)), else: print "%s %% %s works? ..." % (repr(formatstr), repr(args)), try: result = formatstr % args except OverflowError: if verbose: print 'overflow (this is fine)' else: if output and result != output: if verbose: print 'no' print "%s %% %s == %s != %s" %\ (repr(formatstr), repr(args), repr(result), repr(output)) else: if verbose: print 'yes' | if verbose: if output: print "%s %% %s =? %s ..." %\ (repr(formatstr), repr(args), repr(output)), else: print "%s %% %s works? ..." % (repr(formatstr), repr(args)), try: result = formatstr % args except OverflowError: if not overflowok: raise if verbose: print 'overflow (this is fine)' else: if output and result != output: if verbose: print 'no' print "%s %% %s == %s != %s" %\ (repr(formatstr), repr(args), repr(result), repr(output)) else: if verbose: print 'yes' | def testformat(formatstr, args, output=None): if verbose: if output: print "%s %% %s =? %s ..." %\ (repr(formatstr), repr(args), repr(output)), else: print "%s %% %s works? ..." % (repr(formatstr), repr(args)), try: result = formatstr % args except OverflowError: if verbose: print 'overflow (this is fine)' else: if output and result != output: if verbose: print 'no' print "%s %% %s == %s != %s" %\ (repr(formatstr), repr(args), repr(result), repr(output)) else: if verbose: print 'yes' |
if line[:2] == '|-': break | def emparse_cts(fp): while 1: line = fp.readline() if not line: raise Unparseable line = line[:-1] # Check that we're not in the returned message yet if string.lower(line)[:5] == 'from:': raise Unparseable line = string.split(line) if len(line) > 3 and line[0][:2] == '|-' and line[1] == 'Failed' \ and line[2] == 'addresses': # Yes, found it! break errors = [] while 1: line = fp.readline() if not line: break line = line[:-1] if not line: continue errors.append(line) if line[:2] == '|-': break return errors |
|
ret | return [None, None] | def _synthesize(browser): """Attempt to synthesize a controller base on existing controllers. This is useful to create a controller when a user specifies a path to an entry in the BROWSER environment variable -- we can copy a general controller to operate using a specific installation of the desired browser in this way. If we can't create a controller in this way, or if there is no executable for the requested browser, return [None, None]. """ if not os.path.exists(browser): return [None, None] name = os.path.basename(browser) try: command = _browsers[name.lower()] except KeyError: return [None, None] # now attempt to clone to fit the new name: controller = command[1] if controller and name.lower() == controller.basename: import copy controller = copy.copy(controller) controller.name = browser controller.basename = os.path.basename(browser) register(browser, None, controller) return [None, controller] ret |
for name in ('lib', 'purelib', 'platlib', | for name in ('libbase', 'lib', 'purelib', 'platlib', | def finalize_options (self): |
k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v | if ':' in item: k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v lastk = k elif lastk: self._info[lastk] += '\n' + item | def _parse(self, fp): """Override this method to support alternative .mo formats.""" unpack = struct.unpack filename = getattr(fp, 'name', '') # Parse the .mo file header, which consists of 5 little endian 32 # bit words. self._catalog = catalog = {} self.plural = lambda n: int(n != 1) # germanic plural by default buf = fp.read() buflen = len(buf) # Are we big endian or little endian? magic = unpack('<I', buf[:4])[0] if magic == self.LE_MAGIC: version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20]) ii = '<II' elif magic == self.BE_MAGIC: version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20]) ii = '>II' else: raise IOError(0, 'Bad magic number', filename) # Now put all messages from the .mo file buffer into the catalog # dictionary. for i in xrange(0, msgcount): mlen, moff = unpack(ii, buf[masteridx:masteridx+8]) mend = moff + mlen tlen, toff = unpack(ii, buf[transidx:transidx+8]) tend = toff + tlen if mend < buflen and tend < buflen: msg = buf[moff:mend] tmsg = buf[toff:tend] else: raise IOError(0, 'File is corrupt', filename) # See if we're looking at GNU .mo conventions for metadata if mlen == 0: # Catalog description for item in tmsg.splitlines(): item = item.strip() if not item: continue k, v = item.split(':', 1) k = k.strip().lower() v = v.strip() self._info[k] = v if k == 'content-type': self._charset = v.split('charset=')[1] elif k == 'plural-forms': v = v.split(';') plural = v[1].split('plural=')[1] self.plural = c2py(plural) # Note: we unconditionally convert both msgids and msgstrs to # Unicode using the character encoding specified in the charset # parameter of the Content-Type header. The gettext documentation # strongly encourages msgids to be us-ascii, but some appliations # require alternative encodings (e.g. Zope's ZCML and ZPT). For # traditional gettext applications, the msgid conversion will # cause no problems since us-ascii should always be a subset of # the charset encoding. We may want to fall back to 8-bit msgids # if the Unicode conversion fails. if msg.find('\x00') >= 0: # Plural forms msgid1, msgid2 = msg.split('\x00') tmsg = tmsg.split('\x00') if self._charset: msgid1 = unicode(msgid1, self._charset) tmsg = [unicode(x, self._charset) for x in tmsg] for i in range(len(tmsg)): catalog[(msgid1, i)] = tmsg[i] else: if self._charset: msg = unicode(msg, self._charset) tmsg = unicode(tmsg, self._charset) catalog[msg] = tmsg # advance to next entry in the seek tables masteridx += 8 transidx += 8 |
util.mkpath (name, mode, self.verbose, self.dry_run) | dir_util.mkpath(name, mode, self.verbose, self.dry_run) | def mkpath (self, name, mode=0777): util.mkpath (name, mode, self.verbose, self.dry_run) |
return util.copy_file (infile, outfile, preserve_mode, preserve_times, not self.force, link, self.verbose >= level, self.dry_run) | return file_util.copy_file( infile, outfile, preserve_mode, preserve_times, not self.force, link, self.verbose >= level, self.dry_run) | def copy_file (self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" |
return util.copy_tree (infile, outfile, preserve_mode,preserve_times,preserve_symlinks, not self.force, self.verbose >= level, self.dry_run) | return dir_util.copy_tree( infile, outfile, preserve_mode,preserve_times,preserve_symlinks, not self.force, self.verbose >= level, self.dry_run) | def copy_tree (self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1): """Copy an entire directory tree respecting verbose, dry-run, and force flags. """ return util.copy_tree (infile, outfile, preserve_mode,preserve_times,preserve_symlinks, not self.force, self.verbose >= level, self.dry_run) |
return util.move_file (src, dst, self.verbose >= level, self.dry_run) | return file_util.move_file (src, dst, self.verbose >= level, self.dry_run) | def move_file (self, src, dst, level=1): """Move a file respecting verbose and dry-run flags.""" return util.move_file (src, dst, self.verbose >= level, self.dry_run) |
return util.make_archive (base_name, format, root_dir, base_dir, self.verbose, self.dry_run) | return archive_util.make_archive( base_name, format, root_dir, base_dir, self.verbose, self.dry_run) | def make_archive (self, base_name, format, root_dir=None, base_dir=None): return util.make_archive (base_name, format, root_dir, base_dir, self.verbose, self.dry_run) |
if self.force or util.newer_group (infiles, outfile): | if self.force or dep_util.newer_group (infiles, outfile): | def make_file (self, infiles, outfile, func, args, exec_msg=None, skip_msg=None, level=1): """Special case of 'execute()' for operations that process one or more input files and generate one output file. Works just like 'execute()', except the operation is skipped and a different message printed if 'outfile' already exists and is newer than all files listed in 'infiles'. If the command defined 'self.force', and it is true, then the command is unconditionally run -- does no timestamp checks. """ if exec_msg is None: exec_msg = "generating %s from %s" % \ (outfile, string.join (infiles, ', ')) if skip_msg is None: skip_msg = "skipping %s (inputs unchanged)" % outfile |
self.prefix = sys.prefix | self.prefix = os.path.normpath (sys.prefix) | def set_final_options (self): |
self.exec_prefix = sys.exec_prefix | self.exec_prefix = os.path.normpath (sys.exec_prefix) | def set_final_options (self): |
sys_prefix = sys.exec_prefix | sys_prefix = os.path.normpath (sys.exec_prefix) | def replace_sys_prefix (self, config_attr, fallback_postfix, use_exec=0): """Attempts to glean a simple pattern from an installation directory available as a 'sysconfig' attribute: if the directory name starts with the "system prefix" (the one hard-coded in the Makefile and compiled into Python), then replace it with the current installation prefix and return the "relocated" installation directory.""" |
sys_prefix = sys.prefix | sys_prefix = os.path.normpath (sys.prefix) | def replace_sys_prefix (self, config_attr, fallback_postfix, use_exec=0): """Attempts to glean a simple pattern from an installation directory available as a 'sysconfig' attribute: if the directory name starts with the "system prefix" (the one hard-coded in the Makefile and compiled into Python), then replace it with the current installation prefix and return the "relocated" installation directory.""" |
def classlink(self, object, modname, *dicts): | def classlink(self, object, modname): | def classlink(self, object, modname, *dicts): """Make a link for a class.""" name = classname(object, modname) for dict in dicts: if dict.has_key(object): return '<a href="%s">%s</a>' % (dict[object], name) return name |
for dict in dicts: if dict.has_key(object): return '<a href="%s">%s</a>' % (dict[object], name) | if sys.modules.has_key(object.__module__) and \ getattr(sys.modules[object.__module__], object.__name__) is object: return '<a href="%s.html object.__module__, object.__name__, name) | def classlink(self, object, modname, *dicts): """Make a link for a class.""" name = classname(object, modname) for dict in dicts: if dict.has_key(object): return '<a href="%s">%s</a>' % (dict[object], name) return name |
def formattree(self, tree, modname, classes={}, parent=None): | def formattree(self, tree, modname, parent=None): | def formattree(self, tree, modname, classes={}, parent=None): """Produce HTML for a class tree as given by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + '<dt><font face="helvetica, arial"><small>' result = result + self.classlink(c, modname, classes) if bases and bases != (parent,): parents = [] for base in bases: parents.append(self.classlink(base, modname, classes)) result = result + '(' + join(parents, ', ') + ')' result = result + '\n</small></font></dt>' elif type(entry) is type([]): result = result + '<dd>\n%s</dd>\n' % self.formattree( entry, modname, classes, c) return '<dl>\n%s</dl>\n' % result |
result = result + self.classlink(c, modname, classes) | result = result + self.classlink(c, modname) | def formattree(self, tree, modname, classes={}, parent=None): """Produce HTML for a class tree as given by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + '<dt><font face="helvetica, arial"><small>' result = result + self.classlink(c, modname, classes) if bases and bases != (parent,): parents = [] for base in bases: parents.append(self.classlink(base, modname, classes)) result = result + '(' + join(parents, ', ') + ')' result = result + '\n</small></font></dt>' elif type(entry) is type([]): result = result + '<dd>\n%s</dd>\n' % self.formattree( entry, modname, classes, c) return '<dl>\n%s</dl>\n' % result |
parents.append(self.classlink(base, modname, classes)) | parents.append(self.classlink(base, modname)) | def formattree(self, tree, modname, classes={}, parent=None): """Produce HTML for a class tree as given by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + '<dt><font face="helvetica, arial"><small>' result = result + self.classlink(c, modname, classes) if bases and bases != (parent,): parents = [] for base in bases: parents.append(self.classlink(base, modname, classes)) result = result + '(' + join(parents, ', ') + ')' result = result + '\n</small></font></dt>' elif type(entry) is type([]): result = result + '<dd>\n%s</dd>\n' % self.formattree( entry, modname, classes, c) return '<dl>\n%s</dl>\n' % result |
entry, modname, classes, c) | entry, modname, c) | def formattree(self, tree, modname, classes={}, parent=None): """Produce HTML for a class tree as given by inspect.getclasstree().""" result = '' for entry in tree: if type(entry) is type(()): c, bases = entry result = result + '<dt><font face="helvetica, arial"><small>' result = result + self.classlink(c, modname, classes) if bases and bases != (parent,): parents = [] for base in bases: parents.append(self.classlink(base, modname, classes)) result = result + '(' + join(parents, ', ') + ')' result = result + '\n</small></font></dt>' elif type(entry) is type([]): result = result + '<dd>\n%s</dd>\n' % self.formattree( entry, modname, classes, c) return '<dl>\n%s</dl>\n' % result |
contents = [self.formattree( inspect.getclasstree(classlist, 1), name, cdict)] | contents = [ self.formattree(inspect.getclasstree(classlist, 1), name)] | def docmodule(self, object, name=None, mod=None): """Produce HTML documentation for a module object.""" name = object.__name__ # ignore the passed-in name parts = split(name, '.') links = [] for i in range(len(parts)-1): links.append( '<a href="%s.html"><font color="#ffffff">%s</font></a>' % (join(parts[:i+1], '.'), parts[i])) linkedname = join(links + parts[-1:], '.') head = '<big><big><strong>%s</strong></big></big>' % linkedname try: path = inspect.getabsfile(object) filelink = '<a href="file:%s">%s</a>' % (path, path) except TypeError: filelink = '(built-in)' info = [] if hasattr(object, '__version__'): version = str(object.__version__) if version[:11] == '$' + 'Revision: ' and version[-1:] == '$': version = strip(version[11:-1]) info.append('version %s' % self.escape(version)) if hasattr(object, '__date__'): info.append(self.escape(str(object.__date__))) if info: head = head + ' (%s)' % join(info, ', ') result = self.heading( head, '#ffffff', '#7799ee', '<a href=".">index</a><br>' + filelink) |
parents.append( self.classlink(base, object.__module__, classes)) | parents.append(self.classlink(base, object.__module__)) | def docclass(self, object, name=None, mod=None, funcs={}, classes={}): """Produce HTML documentation for a class object.""" realname = object.__name__ name = name or realname bases = object.__bases__ contents = '' |
url = '%s.html imclass.__module__, imclass.__name__, name) note = ' from <a href="%s">%s</a>' % ( url, classname(imclass, mod)) | note = ' from ' + self.classlink(imclass, mod) | def docroutine(self, object, name=None, mod=None, funcs={}, classes={}, methods={}, cl=None): """Produce HTML documentation for a function or method object.""" realname = object.__name__ name = name or realname anchor = (cl and cl.__name__ or '') + '-' + name note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.im_class if cl: if imclass is not cl: url = '%s.html#%s-%s' % ( imclass.__module__, imclass.__name__, name) note = ' from <a href="%s">%s</a>' % ( url, classname(imclass, mod)) skipdocs = 1 else: inst = object.im_self note = (inst and ' method of %s instance' % classname(inst.__class__, mod) or ' unbound %s method' % classname(imclass, mod)) object = object.im_func |
inst = object.im_self note = (inst and ' method of %s instance' % classname(inst.__class__, mod) or ' unbound %s method' % classname(imclass, mod)) | if object.im_self: note = ' method of %s instance' % self.classlink( object.im_self.__class__, mod) else: note = ' unbound %s method' % self.classlink(imclass,mod) | def docroutine(self, object, name=None, mod=None, funcs={}, classes={}, methods={}, cl=None): """Produce HTML documentation for a function or method object.""" realname = object.__name__ name = name or realname anchor = (cl and cl.__name__ or '') + '-' + name note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.im_class if cl: if imclass is not cl: url = '%s.html#%s-%s' % ( imclass.__module__, imclass.__name__, name) note = ' from <a href="%s">%s</a>' % ( url, classname(imclass, mod)) skipdocs = 1 else: inst = object.im_self note = (inst and ' method of %s instance' % classname(inst.__class__, mod) or ' unbound %s method' % classname(imclass, mod)) object = object.im_func |
inst = object.im_self note = (inst and ' method of %s instance' % classname(inst.__class__, mod) or ' unbound %s method' % classname(imclass, mod)) | if object.im_self: note = ' method of %s instance' % classname( object.im_self.__class__, mod) else: note = ' unbound %s method' % classname(imclass,mod) | def docroutine(self, object, name=None, mod=None, cl=None): """Produce text documentation for a function or method object.""" realname = object.__name__ name = name or realname note = '' skipdocs = 0 if inspect.ismethod(object): imclass = object.im_class if cl: if imclass is not cl: note = ' from ' + classname(imclass, mod) skipdocs = 1 else: inst = object.im_self note = (inst and ' method of %s instance' % classname(inst.__class__, mod) or ' unbound %s method' % classname(imclass, mod)) object = object.im_func |
print 'new.code()' | def break_yolks(self): self.yolks = self.yolks - 2 |
|
class CustomProxy: def __init__(self, proto, func=None, proxy_addr=None): self.proto = proto self.func = func self.addr = proxy_addr def handle(self, req): if self.func and self.func(req): return 1 def get_proxy(self): return self.addr class CustomProxyHandler(BaseHandler): handler_order = 100 def __init__(self, *proxies): self.proxies = {} def proxy_open(self, req): proto = req.get_type() try: proxies = self.proxies[proto] except KeyError: return None for p in proxies: if p.handle(req): req.set_proxy(p.get_proxy()) return self.parent.open(req) return None def do_proxy(self, p, req): return self.parent.open(req) def add_proxy(self, cpo): if cpo.proto in self.proxies: self.proxies[cpo.proto].append(cpo) else: self.proxies[cpo.proto] = [cpo] | def proxy_open(self, req, proxy, type): orig_type = req.get_type() proxy_type, user, password, hostport = _parse_proxy(proxy) if proxy_type is None: proxy_type = orig_type if user and password: user_pass = '%s:%s' % (unquote(user), unquote(password)) creds = base64.encodestring(user_pass).strip() req.add_header('Proxy-authorization', 'Basic ' + creds) hostport = unquote(hostport) req.set_proxy(hostport, proxy_type) if orig_type == proxy_type: # let other handlers take care of it return None else: # need to start over, because the other handlers don't # grok the proxy's URL type # e.g. if we have a constructor arg proxies like so: # {'http': 'ftp://proxy.example.com'}, we may end up turning # a request for http://acme.example.com/a into one for # ftp://proxy.example.com/a return self.parent.open(req) |
|
class OpenerFactory: default_handlers = [UnknownHandler, HTTPHandler, HTTPDefaultErrorHandler, HTTPRedirectHandler, FTPHandler, FileHandler] handlers = [] replacement_handlers = [] def add_handler(self, h): self.handlers = self.handlers + [h] def replace_handler(self, h): pass def build_opener(self): opener = OpenerDirector() for ph in self.default_handlers: if inspect.isclass(ph): ph = ph() opener.add_handler(ph) | def gopher_open(self, req): import gopherlib # this raises DeprecationWarning in 2.5 host = req.get_host() if not host: raise GopherError('no host given') host = unquote(host) selector = req.get_selector() type, selector = splitgophertype(selector) selector, query = splitquery(selector) selector = unquote(selector) if query: query = unquote(query) fp = gopherlib.send_query(selector, query, host) else: fp = gopherlib.send_selector(selector, host) return addinfourl(fp, noheaders(), req.get_full_url()) |
|
else: _tryorder = filter(lambda x: _browsers.has_key(x.lower()) or x.find("%s") > -1, _tryorder) | for cmd in _tryorder: if not _browsers.has_key(cmd.lower()): if _iscommand(cmd.lower()): register(cmd.lower(), None, GenericBrowser("%s %%s" % cmd.lower())) _tryorder = filter(lambda x: _browsers.has_key(x.lower()) or x.find("%s") > -1, _tryorder) | def open_new(self, url): # Deprecated. May be removed in 2.1. self.open(url) |
0x00b4: 0x0403, | 0x00b4: 0x0404, | def getregentry(): return (Codec().encode,Codec().decode,StreamReader,StreamWriter) |
print self.skip, self.stack, | print '!'*self.debugging, 'process:', self.skip, self.stack, | def process(self, accu): if self.debugging > 1: print self.skip, self.stack, if accu: print accu[0][:30], if accu[0][30:] or accu[1:]: print '...', print if self.stack and self.stack[-1] == 'menu': # XXX should be done differently for line in accu: mo = miprog.match(line) if not mo: line = string.strip(line) + '\n' self.expand(line) continue bgn, end = mo.span(0) a, b = mo.span(1) c, d = mo.span(2) e, f = mo.span(3) g, h = mo.span(4) label = line[a:b] nodename = line[c:d] if nodename[0] == ':': nodename = label else: nodename = line[e:f] punct = line[g:h] self.write(' <LI><A HREF="', makefile(nodename), '">', nodename, '</A>', punct, '\n') self.expand(line[end:]) else: text = string.joinfields(accu, '') self.expand(text) |
if self.stack and self.stack[-1] == 'menu': | if self.inmenu(): | def process(self, accu): if self.debugging > 1: print self.skip, self.stack, if accu: print accu[0][:30], if accu[0][30:] or accu[1:]: print '...', print if self.stack and self.stack[-1] == 'menu': # XXX should be done differently for line in accu: mo = miprog.match(line) if not mo: line = string.strip(line) + '\n' self.expand(line) continue bgn, end = mo.span(0) a, b = mo.span(1) c, d = mo.span(2) e, f = mo.span(3) g, h = mo.span(4) label = line[a:b] nodename = line[c:d] if nodename[0] == ':': nodename = label else: nodename = line[e:f] punct = line[g:h] self.write(' <LI><A HREF="', makefile(nodename), '">', nodename, '</A>', punct, '\n') self.expand(line[end:]) else: text = string.joinfields(accu, '') self.expand(text) |
if self.debugging: print '--> file', `file` | print '!'*self.debugging, '--> file', `file` | def do_include(self, args): file = args file = os.path.join(self.includedir, file) try: fp = open(file, 'r') except IOError, msg: print '*** Can\'t open include file', `file` return if self.debugging: print '--> file', `file` save_done = self.done save_skip = self.skip save_stack = self.stack self.includedepth = self.includedepth + 1 self.parserest(fp, 0) self.includedepth = self.includedepth - 1 fp.close() self.done = save_done self.skip = save_skip self.stack = save_stack if self.debugging: print '<-- file', `file` |
if self.debugging: print '<-- file', `file` | print '!'*self.debugging, '<-- file', `file` | def do_include(self, args): file = args file = os.path.join(self.includedir, file) try: fp = open(file, 'r') except IOError, msg: print '*** Can\'t open include file', `file` return if self.debugging: print '--> file', `file` save_done = self.done save_skip = self.skip save_stack = self.stack self.includedepth = self.includedepth + 1 self.parserest(fp, 0) self.includedepth = self.includedepth - 1 fp.close() self.done = save_done self.skip = save_skip self.stack = save_stack if self.debugging: print '<-- file', `file` |
print self.skip, self.stack, '@' + cmd, args | print '!'*self.debugging, 'command:', self.skip, self.stack, \ '@' + cmd, args | def command(self, line, mo): a, b = mo.span(1) cmd = line[a:b] args = string.strip(line[b:]) if self.debugging > 1: print self.skip, self.stack, '@' + cmd, args try: func = getattr(self, 'do_' + cmd) except AttributeError: try: func = getattr(self, 'bgn_' + cmd) except AttributeError: # don't complain if we are skipping anyway if not self.skip: self.unknown_cmd(cmd, args) return self.stack.append(cmd) func(args) return if not self.skip or cmd == 'end': func(args) |
print self.values | def do_set(self, args): fields = string.splitfields(args, ' ') key = fields[0] if len(fields) == 1: value = 1 else: value = string.joinfields(fields[1:], ' ') self.values[key] = value print self.values |
|
print self.stack print self.stackinfo if self.stackinfo[len(self.stack) + 1]: self.skip = self.skip - 1 del self.stackinfo[len(self.stack) + 1] | try: if self.stackinfo[len(self.stack) + 1]: self.skip = self.skip - 1 del self.stackinfo[len(self.stack) + 1] except KeyError: print '*** end_ifset: KeyError :', len(self.stack) + 1 | def end_ifset(self): print self.stack print self.stackinfo if self.stackinfo[len(self.stack) + 1]: self.skip = self.skip - 1 del self.stackinfo[len(self.stack) + 1] |
end_ifclear = end_ifset | def end_ifclear(self): try: if self.stackinfo[len(self.stack) + 1]: self.skip = self.skip - 1 del self.stackinfo[len(self.stack) + 1] except KeyError: print '*** end_ifclear: KeyError :', len(self.stack) + 1 | def bgn_ifclear(self, args): if args in self.values.keys() \ and self.values[args] is not None: self.skip = self.skip + 1 self.stackinfo[len(self.stack)] = 1 else: self.stackinfo[len(self.stack)] = 0 |
print args | def do_settitle(self, args): print args self.startsaving() self.expand(args) self.title = self.collectsavings() print self.title |
|
print self.title | def do_settitle(self, args): print args self.startsaving() self.expand(args) self.title = self.collectsavings() print self.title |
|
if self.debugging: print '--- writing', file | if self.debugging: print '!'*self.debugging, '--- writing', file | def do_node(self, args): self.endnode() self.nodelineno = 0 parts = string.splitfields(args, ',') while len(parts) < 4: parts.append('') for i in range(4): parts[i] = string.strip(parts[i]) self.nodelinks = parts [name, next, prev, up] = parts[:4] file = self.dirname + '/' + makefile(name) if self.filenames.has_key(file): print '*** Filename already in use: ', file else: if self.debugging: print '--- writing', file self.filenames[file] = 1 # self.nodefp = open(file, 'w') self.nodename = name if self.cont and self.nodestack: self.nodestack[-1].cont = self.nodename if not self.topname: self.topname = name title = name if self.title: title = title + ' -- ' + self.title self.node = self.Node(self.dirname, self.nodename, self.topname, title, next, prev, up) |
if self.itemarg[0] == '@' and self.itemarg[1:2] and \ | if self.itemarg[0] == '@' and self.itemarg[1] and \ | def do_item(self, args): if self.itemindex: self.index(self.itemindex, args) if self.itemarg: if self.itemarg[0] == '@' and self.itemarg[1:2] and \ self.itemarg[1] in string.ascii_letters: args = self.itemarg + '{' + args + '}' else: # some other character, e.g. '-' args = self.itemarg + ' ' + args if self.itemnumber <> None: args = self.itemnumber + '. ' + args self.itemnumber = increment(self.itemnumber) if self.stack and self.stack[-1] == 'table': self.write('<DT>') self.expand(args) self.write('\n<DD>') else: self.write('<LI>') self.expand(args) self.write(' ') |
print '--- Generating', self.indextitle[name], 'index' | print '!'*self.debugging, '--- Generating', \ self.indextitle[name], 'index' | def prindex(self, name): iscodeindex = (name not in self.noncodeindices) index = self.whichindex[name] if not index: return if self.debugging: print '--- Generating', self.indextitle[name], 'index' # The node already provides a title index1 = [] junkprog = re.compile('^(@[a-z]+)?{') for key, node in index: sortkey = string.lower(key) # Remove leading `@cmd{' from sort key # -- don't bother about the matching `}' oldsortkey = sortkey while 1: mo = junkprog.match(sortkey) if not mo: break i = mo.end() sortkey = sortkey[i:] index1.append((sortkey, key, node)) del index[:] index1.sort() self.write('<DL COMPACT>\n') prevkey = prevnode = None for sortkey, key, node in index1: if (key, node) == (prevkey, prevnode): continue if self.debugging > 1: print key, ':', node self.write('<DT>') if iscodeindex: key = '@code{' + key + '}' if key != prevkey: self.expand(key) self.write('\n<DD><A HREF="%s">%s</A>\n' % (makefile(node), node)) prevkey, prevnode = key, node self.write('</DL>\n') |
if self.debugging > 1: print key, ':', node | if self.debugging > 1: print '!'*self.debugging, key, ':', node | def prindex(self, name): iscodeindex = (name not in self.noncodeindices) index = self.whichindex[name] if not index: return if self.debugging: print '--- Generating', self.indextitle[name], 'index' # The node already provides a title index1 = [] junkprog = re.compile('^(@[a-z]+)?{') for key, node in index: sortkey = string.lower(key) # Remove leading `@cmd{' from sort key # -- don't bother about the matching `}' oldsortkey = sortkey while 1: mo = junkprog.match(sortkey) if not mo: break i = mo.end() sortkey = sortkey[i:] index1.append((sortkey, key, node)) del index[:] index1.sort() self.write('<DL COMPACT>\n') prevkey = prevnode = None for sortkey, key, node in index1: if (key, node) == (prevkey, prevnode): continue if self.debugging > 1: print key, ':', node self.write('<DT>') if iscodeindex: key = '@code{' + key + '}' if key != prevkey: self.expand(key) self.write('\n<DD><A HREF="%s">%s</A>\n' % (makefile(node), node)) prevkey, prevnode = key, node self.write('</DL>\n') |
while sys.argv[1:2] == ['-d']: | htmlhelp = '' while sys.argv[1] == ['-d']: | def test(): import sys debugging = 0 print_headers = 0 cont = 0 html3 = 0 while sys.argv[1:2] == ['-d']: debugging = debugging + 1 del sys.argv[1:2] if sys.argv[1] == '-p': print_headers = 1 del sys.argv[1] if sys.argv[1] == '-c': cont = 1 del sys.argv[1] if sys.argv[1] == '-3': html3 = 1 del sys.argv[1] if len(sys.argv) <> 3: print 'usage: texi2html [-d [-d]] [-p] [-c] inputfile outputdirectory' sys.exit(2) if html3: parser = TexinfoParserHTML3() else: parser = TexinfoParser() parser.cont = cont parser.debugging = debugging parser.print_headers = print_headers file = sys.argv[1] parser.setdirname(sys.argv[2]) if file == '-': fp = sys.stdin else: parser.setincludedir(os.path.dirname(file)) try: fp = open(file, 'r') except IOError, msg: print file, ':', msg sys.exit(1) parser.parse(fp) fp.close() parser.report() |
del sys.argv[1:2] | del sys.argv[1] | def test(): import sys debugging = 0 print_headers = 0 cont = 0 html3 = 0 while sys.argv[1:2] == ['-d']: debugging = debugging + 1 del sys.argv[1:2] if sys.argv[1] == '-p': print_headers = 1 del sys.argv[1] if sys.argv[1] == '-c': cont = 1 del sys.argv[1] if sys.argv[1] == '-3': html3 = 1 del sys.argv[1] if len(sys.argv) <> 3: print 'usage: texi2html [-d [-d]] [-p] [-c] inputfile outputdirectory' sys.exit(2) if html3: parser = TexinfoParserHTML3() else: parser = TexinfoParser() parser.cont = cont parser.debugging = debugging parser.print_headers = print_headers file = sys.argv[1] parser.setdirname(sys.argv[2]) if file == '-': fp = sys.stdin else: parser.setincludedir(os.path.dirname(file)) try: fp = open(file, 'r') except IOError, msg: print file, ':', msg sys.exit(1) parser.parse(fp) fp.close() parser.report() |
print 'usage: texi2html [-d [-d]] [-p] [-c] inputfile outputdirectory' | print 'usage: texi2hh [-d [-d]] [-p] [-c] [-3] [-H htmlhelp]', \ 'inputfile outputdirectory' | def test(): import sys debugging = 0 print_headers = 0 cont = 0 html3 = 0 while sys.argv[1:2] == ['-d']: debugging = debugging + 1 del sys.argv[1:2] if sys.argv[1] == '-p': print_headers = 1 del sys.argv[1] if sys.argv[1] == '-c': cont = 1 del sys.argv[1] if sys.argv[1] == '-3': html3 = 1 del sys.argv[1] if len(sys.argv) <> 3: print 'usage: texi2html [-d [-d]] [-p] [-c] inputfile outputdirectory' sys.exit(2) if html3: parser = TexinfoParserHTML3() else: parser = TexinfoParser() parser.cont = cont parser.debugging = debugging parser.print_headers = print_headers file = sys.argv[1] parser.setdirname(sys.argv[2]) if file == '-': fp = sys.stdin else: parser.setincludedir(os.path.dirname(file)) try: fp = open(file, 'r') except IOError, msg: print file, ':', msg sys.exit(1) parser.parse(fp) fp.close() parser.report() |
parser.setdirname(sys.argv[2]) if file == '-': fp = sys.stdin else: | dirname = sys.argv[2] parser.setdirname(dirname) | def test(): import sys debugging = 0 print_headers = 0 cont = 0 html3 = 0 while sys.argv[1:2] == ['-d']: debugging = debugging + 1 del sys.argv[1:2] if sys.argv[1] == '-p': print_headers = 1 del sys.argv[1] if sys.argv[1] == '-c': cont = 1 del sys.argv[1] if sys.argv[1] == '-3': html3 = 1 del sys.argv[1] if len(sys.argv) <> 3: print 'usage: texi2html [-d [-d]] [-p] [-c] inputfile outputdirectory' sys.exit(2) if html3: parser = TexinfoParserHTML3() else: parser = TexinfoParser() parser.cont = cont parser.debugging = debugging parser.print_headers = print_headers file = sys.argv[1] parser.setdirname(sys.argv[2]) if file == '-': fp = sys.stdin else: parser.setincludedir(os.path.dirname(file)) try: fp = open(file, 'r') except IOError, msg: print file, ':', msg sys.exit(1) parser.parse(fp) fp.close() parser.report() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.