rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
if hasattr(self.fp, "fileno"): self.fileno = self.fp.fileno
|
if hasattr(self.fp, "fileno"): self.fileno = self.fp.fileno else: self.fileno = lambda: None
|
def __init__(self, fp): self.fp = fp self.read = self.fp.read self.readline = self.fp.readline if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines if hasattr(self.fp, "fileno"): self.fileno = self.fp.fileno if hasattr(self.fp, "__iter__"): self.__iter__ = self.fp.__iter__ if hasattr(self.fp, "next"): self.next = self.fp.next
|
outputs[-1:] = \
|
outputs.extend (
|
def copy_tree (src, dst, preserve_mode=1, preserve_times=1, preserve_symlinks=0, update=0, verbose=0, dry_run=0): """Copy an entire directory tree 'src' to a new location 'dst'. Both 'src' and 'dst' must be directory names. If 'src' is not a directory, raise DistutilsFileError. If 'dst' does not exist, it is created with 'mkpath()'. The end result of the copy is that every file in 'src' is copied to 'dst', and directories under 'src' are recursively copied to 'dst'. Return the list of files copied (under their output names) -- note that if 'update' is true, this might be less than the list of files considered. Return value is not affected by 'dry_run'. 'preserve_mode' and 'preserve_times' are the same as for 'copy_file'; note that they only apply to regular files, not to directories. If 'preserve_symlinks' is true, symlinks will be copied as symlinks (on platforms that support them!); otherwise (the default), the destination of the symlink will be copied. 'update' and 'verbose' are the same as for 'copy_file'.""" if not dry_run and not os.path.isdir (src): raise DistutilsFileError, \ "cannot copy tree %s: not a directory" % src try: names = os.listdir (src) except os.error, (errno, errstr): if dry_run: names = [] else: raise DistutilsFileError, \ "error listing files in %s: %s" % (src, errstr) if not dry_run: mkpath (dst, verbose=verbose) outputs = [] for n in names: src_name = os.path.join (src, n) dst_name = os.path.join (dst, n) if preserve_symlinks and os.path.islink (src_name): link_dest = os.readlink (src_name) if verbose: print "linking %s -> %s" % (dst_name, link_dest) if not dry_run: os.symlink (link_dest, dst_name) outputs.append (dst_name) elif os.path.isdir (src_name): outputs[-1:] = \ copy_tree (src_name, dst_name, preserve_mode, preserve_times, preserve_symlinks, update, verbose, dry_run) else: if (copy_file (src_name, dst_name, preserve_mode, preserve_times, update, verbose, dry_run)): outputs.append (dst_name) return outputs
|
update, verbose, dry_run)
|
update, verbose, dry_run))
|
def copy_tree (src, dst, preserve_mode=1, preserve_times=1, preserve_symlinks=0, update=0, verbose=0, dry_run=0): """Copy an entire directory tree 'src' to a new location 'dst'. Both 'src' and 'dst' must be directory names. If 'src' is not a directory, raise DistutilsFileError. If 'dst' does not exist, it is created with 'mkpath()'. The end result of the copy is that every file in 'src' is copied to 'dst', and directories under 'src' are recursively copied to 'dst'. Return the list of files copied (under their output names) -- note that if 'update' is true, this might be less than the list of files considered. Return value is not affected by 'dry_run'. 'preserve_mode' and 'preserve_times' are the same as for 'copy_file'; note that they only apply to regular files, not to directories. If 'preserve_symlinks' is true, symlinks will be copied as symlinks (on platforms that support them!); otherwise (the default), the destination of the symlink will be copied. 'update' and 'verbose' are the same as for 'copy_file'.""" if not dry_run and not os.path.isdir (src): raise DistutilsFileError, \ "cannot copy tree %s: not a directory" % src try: names = os.listdir (src) except os.error, (errno, errstr): if dry_run: names = [] else: raise DistutilsFileError, \ "error listing files in %s: %s" % (src, errstr) if not dry_run: mkpath (dst, verbose=verbose) outputs = [] for n in names: src_name = os.path.join (src, n) dst_name = os.path.join (dst, n) if preserve_symlinks and os.path.islink (src_name): link_dest = os.readlink (src_name) if verbose: print "linking %s -> %s" % (dst_name, link_dest) if not dry_run: os.symlink (link_dest, dst_name) outputs.append (dst_name) elif os.path.isdir (src_name): outputs[-1:] = \ copy_tree (src_name, dst_name, preserve_mode, preserve_times, preserve_symlinks, update, verbose, dry_run) else: if (copy_file (src_name, dst_name, preserve_mode, preserve_times, update, verbose, dry_run)): outputs.append (dst_name) return outputs
|
s.save_views()
|
def run(app, s): try: app.start() except KeyboardInterrupt: pass # save the option database s.save_views()
|
|
run()
|
app, sb = build(initialcolor=initialcolor, initfile=initfile, ignore=ignore) run(app, sb) sb.save_views()
|
def main(): try: opts, args = getopt.getopt( sys.argv[1:], 'hd:i:X', ['database=', 'initfile=', 'ignore', 'help']) except getopt.error, msg: usage(1, msg) if len(args) == 0: initialcolor = None elif len(args) == 1: initialcolor = args[0] else: usage(1) ignore = 0 initfile = os.path.expanduser('~/.pynche') for opt, arg in opts: if opt in ('-h', '--help'): usage(0) elif opt in ('-d', '--database'): RGB_TXT.insert(0, arg) elif opt in ('-X', '--ignore'): ignore = 1 elif opt in ('-i', '--initfile'): initfile = arg run()
|
linker_so=('%s -mcygwin -mdll -static' %
|
linker_so=('%s -mcygwin -mdll' %
|
def __init__ (self, verbose=0, dry_run=0, force=0):
|
linker_so='%s -mno-cygwin -mdll -static %s'
|
linker_so='%s -mno-cygwin -mdll %s'
|
def __init__ (self, verbose=0, dry_run=0, force=0):
|
Return a list of all keywords, built-in functions and names currently defines in __main__ that match.
|
Return a list of all keywords, built-in functions and names currently defined in self.namespace that match.
|
def global_matches(self, text): """Compute matches when text is a simple name.
|
__main__.__dict__.keys()]:
|
self.namespace.keys()]:
|
def global_matches(self, text): """Compute matches when text is a simple name.
|
evaluatable in the globals of __main__, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.)
|
evaluatable in self.namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.)
|
def attr_matches(self, text): """Compute matches when text contains a dot.
|
object = eval(expr, __main__.__dict__)
|
object = eval(expr, self.namespace)
|
def attr_matches(self, text): """Compute matches when text contains a dot.
|
s = ''
|
s = []
|
def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads.
|
chunk = self.fp.read(amt)
|
chunk = self.fp.read(min(amt, MAXAMOUNT))
|
def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads.
|
s += chunk
|
s.append(chunk)
|
def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads.
|
return s
|
return ''.join(s)
|
def _safe_read(self, amt): """Read the number of bytes requested, compensating for partial reads.
|
assert sre.split("(b)|(:+)", ":a:b::c") == \ ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c']
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
|
>>> max(tupleids) - min(tupleids)
|
>>> int(max(tupleids) - min(tupleids))
|
>>> def f(n):
|
test(r"""sre.match(r"\%03o" % i, chr(i)) != None""", 1) test(r"""sre.match(r"\%03o0" % i, chr(i)+"0") != None""", 1) test(r"""sre.match(r"\%03o8" % i, chr(i)+"8") != None""", 1) test(r"""sre.match(r"\x%02x" % i, chr(i)) != None""", 1) test(r"""sre.match(r"\x%02x0" % i, chr(i)+"0") != None""", 1) test(r"""sre.match(r"\x%02xz" % i, chr(i)+"z") != None""", 1)
|
test(r"""sre.match(r"\%03o" % i, chr(i)) is not None""", 1) test(r"""sre.match(r"\%03o0" % i, chr(i)+"0") is not None""", 1) test(r"""sre.match(r"\%03o8" % i, chr(i)+"8") is not None""", 1) test(r"""sre.match(r"\x%02x" % i, chr(i)) is not None""", 1) test(r"""sre.match(r"\x%02x0" % i, chr(i)+"0") is not None""", 1) test(r"""sre.match(r"\x%02xz" % i, chr(i)+"z") is not None""", 1)
|
def test(expression, result, exception=None): try: r = eval(expression) except: if exception: if not isinstance(sys.exc_value, exception): print expression, "FAILED" # display name, not actual value if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got", sys.exc_type.__name__, str(sys.exc_value) else: print expression, "FAILED" traceback.print_exc(file=sys.stdout) else: if exception: print expression, "FAILED" if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got result", repr(r) else: if r != result: print expression, "FAILED" print "expected", repr(result) print "got result", repr(r)
|
test(r"""sre.match(sre.escape(chr(i)), chr(i)) != None""", 1)
|
test(r"""sre.match(sre.escape(chr(i)), chr(i)) is not None""", 1)
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""pat.match(p) != None""", 1)
|
test(r"""pat.match(p) is not None""", 1)
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
class _Verbose:
|
class _Verbose(object):
|
def _note(self, format, *args): if self.__verbose: format = format % args format = "%s: %s\n" % ( currentThread().getName(), format) _sys.stderr.write(format)
|
text = text[:75] + ' ...'
|
textlines = text.splitlines() for i, line in enumerate(textlines): if len(line) > 79: textlines[i] = line[:75] + ' ...' text = '\n'.join(textlines)
|
def showtip(self, text, parenleft, parenright): """Show the calltip, bind events which will close it and reposition it. """ # truncate overly long calltip if len(text) >= 79: text = text[:75] + ' ...' self.text = text if self.tipwindow or not self.text: return
|
def test_compresscopy(self): data0 = HAMLET_SCENE data1 = HAMLET_SCENE.swapcase() c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION) bufs0 = [] bufs0.append(c0.compress(data0)) c1 = c0.copy() bufs1 = bufs0[:] bufs0.append(c0.compress(data0)) bufs0.append(c0.flush()) s0 = ''.join(bufs0) bufs1.append(c1.compress(data1)) bufs1.append(c1.flush()) s1 = ''.join(bufs1) self.assertEqual(zlib.decompress(s0),data0+data0) self.assertEqual(zlib.decompress(s1),data0+data1) def test_badcompresscopy(self): c = zlib.compressobj() c.compress(HAMLET_SCENE) c.flush() self.assertRaises(ValueError, c.copy) def test_decompresscopy(self): data = HAMLET_SCENE comp = zlib.compress(data) d0 = zlib.decompressobj() bufs0 = [] bufs0.append(d0.decompress(comp[:32])) d1 = d0.copy() bufs1 = bufs0[:] bufs0.append(d0.decompress(comp[32:])) s0 = ''.join(bufs0) bufs1.append(d1.decompress(comp[32:])) s1 = ''.join(bufs1) self.assertEqual(s0,s1) self.assertEqual(s0,data) def test_baddecompresscopy(self): data = zlib.compress(HAMLET_SCENE) d = zlib.decompressobj() d.decompress(data) d.flush() self.assertRaises(ValueError, d.copy)
|
if hasattr(zlib.compressobj(), "copy"): def test_compresscopy(self): data0 = HAMLET_SCENE data1 = HAMLET_SCENE.swapcase() c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION) bufs0 = [] bufs0.append(c0.compress(data0)) c1 = c0.copy() bufs1 = bufs0[:] bufs0.append(c0.compress(data0)) bufs0.append(c0.flush()) s0 = ''.join(bufs0) bufs1.append(c1.compress(data1)) bufs1.append(c1.flush()) s1 = ''.join(bufs1) self.assertEqual(zlib.decompress(s0),data0+data0) self.assertEqual(zlib.decompress(s1),data0+data1) def test_badcompresscopy(self): c = zlib.compressobj() c.compress(HAMLET_SCENE) c.flush() self.assertRaises(ValueError, c.copy) if hasattr(zlib.decompressobj(), "copy"): def test_decompresscopy(self): data = HAMLET_SCENE comp = zlib.compress(data) d0 = zlib.decompressobj() bufs0 = [] bufs0.append(d0.decompress(comp[:32])) d1 = d0.copy() bufs1 = bufs0[:] bufs0.append(d0.decompress(comp[32:])) s0 = ''.join(bufs0) bufs1.append(d1.decompress(comp[32:])) s1 = ''.join(bufs1) self.assertEqual(s0,s1) self.assertEqual(s0,data) def test_baddecompresscopy(self): data = zlib.compress(HAMLET_SCENE) d = zlib.decompressobj() d.decompress(data) d.flush() self.assertRaises(ValueError, d.copy)
|
def test_compresscopy(self): # Test copying a compression object data0 = HAMLET_SCENE data1 = HAMLET_SCENE.swapcase() c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION) bufs0 = [] bufs0.append(c0.compress(data0))
|
alltests.addTest(module.suite())
|
alltests.addTest(module.test_suite())
|
def suite(): test_modules = [ 'test_associate', 'test_basics', 'test_compat', 'test_dbobj', 'test_dbshelve', 'test_dbtables', 'test_env_close', 'test_get_none', 'test_join', 'test_lock', 'test_misc', 'test_queue', 'test_recno', 'test_thread', ] alltests = unittest.TestSuite() for name in test_modules: module = __import__(name) alltests.addTest(module.suite()) return alltests
|
def _fix_link_args (self, objects, output_dir, takes_libs=0, libraries=None, library_dirs=None): """Typecheck and fix up some of the arguments supplied to the 'link_*' methods and return the fixed values. Specifically: ensure that 'objects' is a list; if output_dir is None, use self.output_dir; ensure that 'libraries' and 'library_dirs' are both lists, and augment them with 'self.libraries' and 'self.library_dirs'. If 'takes_libs' is true, return a tuple (objects, output_dir, libraries, library_dirs; else return (objects, output_dir)."""
|
def _fix_object_args (self, objects, output_dir): """Typecheck and fix up some arguments supplied to various methods. Specifically: ensure that 'objects' is a list; if output_dir is None, replace with self.output_dir. Return fixed versions of 'objects' and 'output_dir'."""
|
def _fix_link_args (self, objects, output_dir, takes_libs=0, libraries=None, library_dirs=None): """Typecheck and fix up some of the arguments supplied to the 'link_*' methods and return the fixed values. Specifically: ensure that 'objects' is a list; if output_dir is None, use self.output_dir; ensure that 'libraries' and 'library_dirs' are both lists, and augment them with 'self.libraries' and 'self.library_dirs'. If 'takes_libs' is true, return a tuple (objects, output_dir, libraries, library_dirs; else return (objects, output_dir)."""
|
if takes_libs: if libraries is None: libraries = self.libraries elif type (libraries) in (ListType, TupleType): libraries = list (libraries) + (self.libraries or []) else: raise TypeError, \ "'libraries' (if supplied) must be a list of strings" if library_dirs is None: library_dirs = self.library_dirs elif type (library_dirs) in (ListType, TupleType): library_dirs = list (library_dirs) + (self.library_dirs or []) else: raise TypeError, \ "'library_dirs' (if supplied) must be a list of strings" return (objects, output_dir, libraries, library_dirs)
|
return (objects, output_dir) def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs): """Typecheck and fix up some of the arguments supplied to the 'link_*' methods. Specifically: ensure that all arguments are lists, and augment them with their permanent versions (eg. 'self.libraries' augments 'libraries'). Return a tuple with fixed versions of all arguments.""" if libraries is None: libraries = self.libraries elif type (libraries) in (ListType, TupleType): libraries = list (libraries) + (self.libraries or [])
|
def _fix_link_args (self, objects, output_dir, takes_libs=0, libraries=None, library_dirs=None): """Typecheck and fix up some of the arguments supplied to the 'link_*' methods and return the fixed values. Specifically: ensure that 'objects' is a list; if output_dir is None, use self.output_dir; ensure that 'libraries' and 'library_dirs' are both lists, and augment them with 'self.libraries' and 'self.library_dirs'. If 'takes_libs' is true, return a tuple (objects, output_dir, libraries, library_dirs; else return (objects, output_dir)."""
|
return (objects, output_dir)
|
raise TypeError, \ "'libraries' (if supplied) must be a list of strings" if library_dirs is None: library_dirs = self.library_dirs elif type (library_dirs) in (ListType, TupleType): library_dirs = list (library_dirs) + (self.library_dirs or []) else: raise TypeError, \ "'library_dirs' (if supplied) must be a list of strings" if runtime_library_dirs is None: runtime_library_dirs = self.runtime_library_dirs elif type (runtime_library_dirs) in (ListType, TupleType): runtime_library_dirs = (list (runtime_library_dirs) + (self.runtime_library_dirs or [])) else: raise TypeError, \ "'runtime_library_dirs' (if supplied) " + \ "must be a list of strings" return (libraries, library_dirs, runtime_library_dirs)
|
def _fix_link_args (self, objects, output_dir, takes_libs=0, libraries=None, library_dirs=None): """Typecheck and fix up some of the arguments supplied to the 'link_*' methods and return the fixed values. Specifically: ensure that 'objects' is a list; if output_dir is None, use self.output_dir; ensure that 'libraries' and 'library_dirs' are both lists, and augment them with 'self.libraries' and 'self.library_dirs'. If 'takes_libs' is true, return a tuple (objects, output_dir, libraries, library_dirs; else return (objects, output_dir)."""
|
once and write the results to sys.stdout after the the
|
once and write the results to sys.stdout after the
|
def usage(outfile): outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
|
try: value = SyntaxError(msg, (filename, lineno, offset, line)) except: value = msg, (filename, lineno, offset, line)
|
value = SyntaxError(msg, (filename, lineno, offset, line))
|
def showsyntaxerror(self, filename=None): """Display the syntax error that just occurred.
|
except:
|
except ImportError:
|
def interact(banner=None, readfunc=None, local=None): """Closely emulate the interactive Python interpreter. This is a backwards compatible interface to the InteractiveConsole class. When readfunc is not specified, it attempts to import the readline module to enable GNU readline if it is available. Arguments (all optional, all default to None): banner -- passed to InteractiveConsole.interact() readfunc -- if not None, replaces InteractiveConsole.raw_input() local -- passed to InteractiveInterpreter.__init__() """ console = InteractiveConsole(local) if readfunc is not None: console.raw_input = readfunc else: try: import readline except: pass console.interact(banner)
|
import thread
|
import threading
|
def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size.
|
import dummy_thread as thread
|
import dummy_threading as threading
|
def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size.
|
self.mutex = thread.allocate_lock() self.esema = thread.allocate_lock() self.esema.acquire() self.fsema = thread.allocate_lock()
|
self.mutex = threading.Lock() self.not_empty = threading.Condition(self.mutex) self.not_full = threading.Condition(self.mutex)
|
def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size.
|
if block:
|
if not block: return self.put_nowait(item) self.not_full.acquire() try:
|
def put(self, item, block=True, timeout=None): """Put an item into the queue.
|
self.fsema.acquire() elif timeout >= 0: delay = 0.0005
|
while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number")
|
def put(self, item, block=True, timeout=None): """Put an item into the queue.
|
while True: if self.fsema.acquire(0): break
|
while self._full():
|
def put(self, item, block=True, timeout=None): """Put an item into the queue.
|
if remaining <= 0:
|
if remaining < 0.0:
|
def put(self, item, block=True, timeout=None): """Put an item into the queue.
|
delay = min(delay * 2, remaining, .05) _sleep(delay) else: raise ValueError("'timeout' must be a positive number") elif not self.fsema.acquire(0): raise Full self.mutex.acquire() release_fsema = True try: was_empty = self._empty()
|
self.not_full.wait(remaining)
|
def put(self, item, block=True, timeout=None): """Put an item into the queue.
|
if was_empty: self.esema.release() release_fsema = not self._full()
|
self.not_empty.notify()
|
def put(self, item, block=True, timeout=None): """Put an item into the queue.
|
if release_fsema: self.fsema.release() self.mutex.release()
|
self.not_full.release()
|
def put(self, item, block=True, timeout=None): """Put an item into the queue.
|
return self.put(item, False)
|
self.not_full.acquire() try: if self._full(): raise Full else: self._put(item) self.not_empty.notify() finally: self.not_full.release()
|
def put_nowait(self, item): """Put an item into the queue without blocking.
|
if block:
|
if not block: return self.get_nowait() self.not_empty.acquire() try:
|
def get(self, block=True, timeout=None): """Remove and return an item from the queue.
|
self.esema.acquire() elif timeout >= 0: delay = 0.0005
|
while self._empty(): self.not_empty.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number")
|
def get(self, block=True, timeout=None): """Remove and return an item from the queue.
|
while 1: if self.esema.acquire(0): break
|
while self._empty():
|
def get(self, block=True, timeout=None): """Remove and return an item from the queue.
|
if remaining <= 0:
|
if remaining < 0.0:
|
def get(self, block=True, timeout=None): """Remove and return an item from the queue.
|
delay = min(delay * 2, remaining, .05) _sleep(delay) else: raise ValueError("'timeout' must be a positive number") elif not self.esema.acquire(0): raise Empty self.mutex.acquire() release_esema = True try: was_full = self._full()
|
self.not_empty.wait(remaining)
|
def get(self, block=True, timeout=None): """Remove and return an item from the queue.
|
if was_full: self.fsema.release() release_esema = not self._empty()
|
self.not_full.notify() return item
|
def get(self, block=True, timeout=None): """Remove and return an item from the queue.
|
if release_esema: self.esema.release() self.mutex.release() return item
|
self.not_empty.release()
|
def get(self, block=True, timeout=None): """Remove and return an item from the queue.
|
return self.get(False)
|
self.not_empty.acquire() try: if self._empty(): raise Empty else: item = self._get() self.not_full.notify() return item finally: self.not_empty.release()
|
def get_nowait(self): """Remove and return an item from the queue without blocking.
|
def remap_element_names(root, name_map): queue = [] for child in root.childNodes: if child.nodeType == ELEMENT: queue.append(child) while queue: node = queue.pop() tagName = node.tagName if name_map.has_key(tagName): name, attrs = name_map[tagName] node._node.name = name for attr, value in attrs.items(): node.setAttribute(attr, value) for child in node.childNodes: if child.nodeType == ELEMENT: queue.append(child)
|
def remap_element_names(root, name_map): queue = [] for child in root.childNodes: if child.nodeType == ELEMENT: queue.append(child) while queue: node = queue.pop() tagName = node.tagName if name_map.has_key(tagName): name, attrs = name_map[tagName] node._node.name = name for attr, value in attrs.items(): node.setAttribute(attr, value) for child in node.childNodes: if child.nodeType == ELEMENT: queue.append(child)
|
|
"sectionauthor", "seealso",
|
"sectionauthor", "seealso", "itemize",
|
def move_elements_by_name(doc, source, dest, name, sep=None): nodes = [] for child in source.childNodes: if child.nodeType == ELEMENT and child.tagName == name: nodes.append(child) for node in nodes: source.removeChild(node) dest.appendChild(node) if sep: dest.appendChild(doc.createTextNode(sep))
|
"index", "indexii", "indexiii", "indexiv", "setindexsubitem",
|
"setindexsubitem",
|
def move_elements_by_name(doc, source, dest, name, sep=None): nodes = [] for child in source.childNodes: if child.nodeType == ELEMENT and child.tagName == name: nodes.append(child) for node in nodes: source.removeChild(node) dest.appendChild(node) if sep: dest.appendChild(doc.createTextNode(sep))
|
"moduleauthor", "indexterm",
|
"moduleauthor", "indexterm", "leader",
|
def move_elements_by_name(doc, source, dest, name, sep=None): nodes = [] for child in source.childNodes: if child.nodeType == ELEMENT and child.tagName == name: nodes.append(child) for node in nodes: source.removeChild(node) dest.appendChild(node) if sep: dest.appendChild(doc.createTextNode(sep))
|
module_name = entry.getAttribute("name")
|
module_name = entry.getAttribute("module")
|
def fixup_refmodindexes_chunk(container): # node is probably a <para>; let's see how often it isn't: if container.tagName != PARA_ELEMENT: bwrite("--- fixup_refmodindexes_chunk(%s)\n" % container) module_entries = find_all_elements(container, "module") if not module_entries: return index_entries = find_all_elements_from_set(container, REFMODINDEX_ELEMENTS) removes = [] for entry in index_entries: children = entry.childNodes if len(children) != 0: bwrite("--- unexpected number of children for %s node:\n" % entry.tagName) ewrite(entry.toxml() + "\n") continue found = 0 module_name = entry.getAttribute("name") for node in module_entries: if len(node.childNodes) != 1: continue this_name = node.childNodes[0].data if this_name == module_name: found = 1 node.setAttribute("index", "yes") if found: removes.append(entry) for node in removes: container.removeChild(node)
|
remap_element_names(fragment, { "tableii": ("table", {"cols": "2"}), "tableiii": ("table", {"cols": "3"}), "tableiv": ("table", {"cols": "4"}), "lineii": ("row", {}), "lineiii": ("row", {}), "lineiv": ("row", {}), "refmodule": ("module", {"link": "link"}), })
|
def convert(ifp, ofp): p = esistools.ExtendedEsisBuilder() p.feed(ifp.read()) doc = p.document fragment = p.fragment normalize(fragment) simplify(doc, fragment) handle_labels(doc, fragment) handle_appendix(doc, fragment) fixup_trailing_whitespace(doc, { "abstract": "\n", "title": "", "chapter": "\n\n", "section": "\n\n", "subsection": "\n\n", "subsubsection": "\n\n", "paragraph": "\n\n", "subparagraph": "\n\n", }) cleanup_root_text(doc) cleanup_trailing_parens(fragment, ["function", "method", "cfunction"]) cleanup_synopses(doc, fragment) fixup_descriptors(doc, fragment) fixup_verbatims(fragment) normalize(fragment) fixup_paras(doc, fragment) fixup_sectionauthors(doc, fragment) remap_element_names(fragment, { "tableii": ("table", {"cols": "2"}), "tableiii": ("table", {"cols": "3"}), "tableiv": ("table", {"cols": "4"}), "lineii": ("row", {}), "lineiii": ("row", {}), "lineiv": ("row", {}), "refmodule": ("module", {"link": "link"}), }) fixup_table_structures(doc, fragment) fixup_rfc_references(doc, fragment) fixup_signatures(doc, fragment) add_node_ids(fragment) fixup_refmodindexes(fragment) fixup_bifuncindexes(fragment) # d = {} for gi in p.get_empties(): d[gi] = gi if d.has_key("rfc"): del d["rfc"] knownempty = d.has_key # try: write_esis(fragment, ofp, knownempty) except IOError, (err, msg): # Ignore EPIPE; it just means that whoever we're writing to stopped # reading. The rest of the output would be ignored. All other errors # should still be reported, if err != errno.EPIPE: raise
|
|
makevars = parsesetup.getmakevars(makefile_in)
|
def main(): # overridable context prefix = None # settable with -p option exec_prefix = None # settable with -P option extensions = [] path = sys.path odir = '' win = sys.platform[:3] == 'win' # output files frozen_c = 'frozen.c' config_c = 'config.c' target = 'a.out' # normally derived from script name makefile = 'Makefile' subsystem = 'console' # parse command line try: opts, args = getopt.getopt(sys.argv[1:], 'he:o:p:P:s:w') except getopt.error, msg: usage('getopt error: ' + str(msg)) # proces option arguments for o, a in opts: if o == '-h': print __doc__ return if o == '-e': extensions.append(a) if o == '-o': odir = a if o == '-p': prefix = a if o == '-P': exec_prefix = a if o == '-w': win = not win if o == '-s': if not win: usage("-s subsystem option only on Windows") subsystem = a # default prefix and exec_prefix if not exec_prefix: if prefix: exec_prefix = prefix else: exec_prefix = sys.exec_prefix if not prefix: prefix = sys.prefix # determine whether -p points to the Python source tree ishome = os.path.exists(os.path.join(prefix, 'Include', 'pythonrun.h')) # locations derived from options version = sys.version[:3] if ishome: print "(Using Python source directory)" binlib = exec_prefix incldir = os.path.join(prefix, 'Include') config_c_in = os.path.join(prefix, 'Modules', 'config.c.in') frozenmain_c = os.path.join(prefix, 'Python', 'frozenmain.c') makefile_in = os.path.join(exec_prefix, 'Modules', 'Makefile') else: binlib = os.path.join(exec_prefix, 'lib', 'python%s' % version, 'config') incldir = os.path.join(prefix, 'include', 'python%s' % version) config_c_in = os.path.join(binlib, 'config.c.in') frozenmain_c = os.path.join(binlib, 'frozenmain.c') makefile_in = os.path.join(binlib, 'Makefile') supp_sources = [] defines = [] includes = ['-I' + incldir, '-I' + binlib] # sanity check of directories and files for dir in [prefix, exec_prefix, binlib, incldir] + extensions: if not os.path.exists(dir): usage('needed directory %s not found' % dir) if not os.path.isdir(dir): usage('%s: not a directory' % dir) if win: files = supp_sources else: files = [config_c_in, makefile_in] + supp_sources for file in supp_sources: if not os.path.exists(file): usage('needed file %s not found' % file) if not os.path.isfile(file): usage('%s: not a plain file' % file) if not win: for dir in extensions: setup = os.path.join(dir, 'Setup') if not os.path.exists(setup): usage('needed file %s not found' % setup) if not os.path.isfile(setup): usage('%s: not a plain file' % setup) # check that enough arguments are passed if not args: usage('at least one filename argument required') # check that the script name ends in ".py" if args[0][-3:] != ".py": usage('the script name must have a .py suffix') # check that file arguments exist for arg in args: if not os.path.exists(arg): usage('argument %s not found' % arg) if not os.path.isfile(arg): usage('%s: not a plain file' % arg) # process non-option arguments scriptfile = args[0] modules = args[1:] # derive target name from script name base = os.path.basename(scriptfile) base, ext = os.path.splitext(base) if base: if base != scriptfile: target = base else: target = base + '.bin' # handle -o option base_frozen_c = frozen_c base_config_c = config_c base_target = target if odir and not os.path.isdir(odir): try: os.mkdir(odir) print "Created output directory", odir except os.error, msg: usage('%s: mkdir failed (%s)' % (odir, str(msg))) if odir: frozen_c = os.path.join(odir, frozen_c) config_c = os.path.join(odir, config_c) target = os.path.join(odir, target) makefile = os.path.join(odir, makefile) # Actual work starts here... dict = findmodules.findmodules(scriptfile, modules, path) names = dict.keys() names.sort() print "Modules being frozen:" for name in names: print '\t', name backup = frozen_c + '~' try: os.rename(frozen_c, backup) except os.error: backup = None outfp = open(frozen_c, 'w') try: makefreeze.makefreeze(outfp, dict) finally: outfp.close() if backup: if cmp.cmp(backup, frozen_c): sys.stderr.write('%s not changed, not written\n' % frozen_c) os.rename(backup, frozen_c) if win: # Taking a shortcut here... import winmakemakefile outfp = open(makefile, 'w') try: winmakemakefile.makemakefile(outfp, locals(), [frozenmain_c, frozen_c], target) finally: outfp.close() return builtins = [] unknown = [] mods = dict.keys() mods.sort() for mod in mods: if dict[mod] == '<builtin>': builtins.append(mod) elif dict[mod] == '<unknown>': unknown.append(mod) addfiles = [] if unknown: addfiles, addmods = \ checkextensions.checkextensions(unknown, extensions) for mod in addmods: unknown.remove(mod) builtins = builtins + addmods if unknown: sys.stderr.write('Warning: unknown modules remain: %s\n' % string.join(unknown)) builtins.sort() infp = open(config_c_in) backup = config_c + '~' try: os.rename(config_c, backup) except os.error: backup = None outfp = open(config_c, 'w') try: makeconfig.makeconfig(infp, outfp, builtins) finally: outfp.close() infp.close() if backup: if cmp.cmp(backup, config_c): sys.stderr.write('%s not changed, not written\n' % config_c) os.rename(backup, config_c) cflags = defines + includes + ['$(OPT)'] libs = [os.path.join(binlib, 'libpython$(VERSION).a')] makevars = parsesetup.getmakevars(makefile_in) somevars = {} for key in makevars.keys(): somevars[key] = makevars[key] somevars['CFLAGS'] = string.join(cflags) # override files = ['$(OPT)', '$(LDFLAGS)', base_config_c, base_frozen_c] + \ supp_sources + addfiles + libs + \ ['$(MODLIBS)', '$(LIBS)', '$(SYSLIBS)'] backup = makefile + '~' try: os.rename(makefile, backup) except os.error: backup = None outfp = open(makefile, 'w') try: makemakefile.makemakefile(outfp, somevars, files, base_target) finally: outfp.close() if backup: if not cmp.cmp(backup, makefile): print 'previous Makefile saved as', backup else: sys.stderr.write('%s not changed, not written\n' % makefile) os.rename(backup, makefile) # Done! if odir: print 'Now run "make" in', odir, print 'to build the target:', base_target else: print 'Now run "make" to build the target:', base_target
|
|
def _utc2time(utc): return utc[1]
|
def _utc2time(utc): t = utc[1] if t < 0: t = t + 0x100000000L return t
|
def _utc2time(utc): return utc[1]
|
del os
|
fp = None try: fp = open(TESTFN, 'w+') except IOError: TMP_TESTFN = os.path.join('/tmp', TESTFN) try: fp = open(TMP_TESTFN, 'w+') TESTFN = TMP_TESTFN del TMP_TESTFN except IOError: print ('WARNING: tests will fail, unable to write to: %s or %s' % (TESTFN, TMP_TESTFN)) if fp is not None: fp.close() try: os.unlink(TESTFN) except: pass del os, fp
|
def fcmp(x, y): # fuzzy comparison function if type(x) == type(0.0) or type(y) == type(0.0): try: x, y = coerce(x, y) fuzz = (abs(x) + abs(y)) * FUZZ if abs(x-y) <= fuzz: return 0 except: pass elif type(x) == type(y) and type(x) in (type(()), type([])): for i in range(min(len(x), len(y))): outcome = fcmp(x[i], y[i]) if outcome != 0: return outcome return cmp(len(x), len(y)) return cmp(x, y)
|
return st[stat.ST_MTIME]
|
return st[stat.ST_ATIME]
|
def getatime(filename): """Return the last access time of a file, reported by os.stat()""" st = os.stat(filename) return st[stat.ST_MTIME]
|
self.lines = []
|
def __init__(self, fp=None, headers=None, outerboundary="", environ=os.environ, keep_blank_values=0, strict_parsing=0): """Constructor. Read multipart/* until last part.
|
|
self.lines.append(line)
|
def read_lines_to_eof(self): """Internal: read lines until EOF.""" while 1: line = self.fp.readline() if not line: self.done = -1 break self.lines.append(line) self.file.write(line)
|
|
self.lines.append(line)
|
def read_lines_to_outerboundary(self): """Internal: read lines until outerboundary.""" next = "--" + self.outerboundary last = next + "--" delim = "" while 1: line = self.fp.readline() if not line: self.done = -1 break self.lines.append(line) if line[:2] == "--": strippedline = string.strip(line) if strippedline == next: break if strippedline == last: self.done = 1 break odelim = delim if line[-2:] == "\r\n": delim = "\r\n" line = line[:-2] elif line[-1] == "\n": delim = "\n" line = line[:-1] else: delim = "" self.file.write(odelim + line)
|
|
self.lines.append(line)
|
def skip_lines(self): """Internal: skip lines until outer boundary if defined.""" if not self.outerboundary or self.done: return next = "--" + self.outerboundary last = next + "--" while 1: line = self.fp.readline() if not line: self.done = -1 break self.lines.append(line) if line[:2] == "--": strippedline = string.strip(line) if strippedline == next: break if strippedline == last: self.done = 1 break
|
|
home = os.environ['HOME']
|
home = os.environ.get('HOME')
|
def addpackage(sitedir, name): global _dirs_in_sys_path if _dirs_in_sys_path is None: _init_pathinfo() reset = 1 else: reset = 0 fullname = os.path.join(sitedir, name) try: f = open(fullname) except IOError: return while 1: dir = f.readline() if not dir: break if dir[0] == '#': continue if dir.startswith("import"): exec dir continue if dir[-1] == '\n': dir = dir[:-1] dir, dircase = makepath(sitedir, dir) if not dircase in _dirs_in_sys_path and os.path.exists(dir): sys.path.append(dir) _dirs_in_sys_path[dircase] = 1 if reset: _dirs_in_sys_path = None
|
new_sources.append(base + ".c")
|
new_sources.append(base + target_ext)
|
def swig_sources (self, sources):
|
swig_cmd = [swig, "-python", "-dnone", "-ISWIG"]
|
swig_cmd = [swig, "-python", "-dnone", "-ISWIG"] if self.swig_cpp: swig_cmd.append ("-c++")
|
def swig_sources (self, sources):
|
self.announce ("swigging %s to %s" % (src, obj)) self.spawn(swig_cmd + ["-o", swig_targets[source], source])
|
target = swig_targets[source] self.announce ("swigging %s to %s" % (source, target)) self.spawn(swig_cmd + ["-o", target, source])
|
def swig_sources (self, sources):
|
startofline = tell()
|
try: startofline = tell() except IOError: startofline = tell = None self.seekable = 0
|
def readheaders(self): """Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file). """ self.dict = {} self.unixfrom = '' self.headers = list = [] self.status = '' headerseen = "" firstline = 1 startofline = unread = tell = None if hasattr(self.fp, 'unread'): unread = self.fp.unread elif self.seekable: tell = self.fp.tell while 1: if tell: startofline = tell() line = self.fp.readline() if not line: self.status = 'EOF in headers' break # Skip unix From name time lines if firstline and line[:5] == 'From ': self.unixfrom = self.unixfrom + line continue firstline = 0 if headerseen and line[0] in ' \t': # It's a continuation line. list.append(line) x = (self.dict[headerseen] + "\n " + string.strip(line)) self.dict[headerseen] = string.strip(x) continue elif self.iscomment(line): # It's a comment. Ignore it. continue elif self.islast(line): # Note! No pushback here! The delimiter line gets eaten. break headerseen = self.isheader(line) if headerseen: # It's a legal header line, save it. list.append(line) self.dict[headerseen] = string.strip(line[len(headerseen)+1:]) continue else: # It's not a header line; throw it back and stop here. if not self.dict: self.status = 'No headers' else: self.status = 'Non-header line where header expected' # Try to undo the read. if unread: unread(line) elif tell: self.fp.seek(startofline) else: self.status = self.status + '; bad seek' break
|
if auth: h.putheader('Authorization', 'Basic %s' % auth)
|
if proxy_auth: h.putheader('Proxy-Authorization: Basic %s' % proxy_auth) if auth: h.putheader('Authorization: Basic %s' % auth)
|
def open_https(self, url, data=None): """Use HTTPS protocol.""" import httplib user_passwd = None if isinstance(url, str): host, selector = splithost(url) if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host else: host, selector = url urltype, rest = splittype(selector) url = rest user_passwd = None if urltype.lower() != 'https': realhost = None else: realhost, rest = splithost(rest) if realhost: user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) #print "proxy via https:", host, selector if not host: raise IOError, ('https error', 'no host given') if user_passwd: import base64 auth = base64.encodestring(user_passwd).strip() else: auth = None h = httplib.HTTPS(host, 0, key_file=self.key_file, cert_file=self.cert_file) if data is not None: h.putrequest('POST', selector) h.putheader('Content-type', 'application/x-www-form-urlencoded') h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', selector) if auth: h.putheader('Authorization', 'Basic %s' % auth) if realhost: h.putheader('Host', realhost) for args in self.addheaders: h.putheader(*args) h.endheaders() if data is not None: h.send(data) errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == 200: return addinfourl(fp, headers, "https:" + url) else: if data is None: return self.http_error(url, fp, errcode, errmsg, headers) else: return self.http_error(url, fp, errcode, errmsg, headers, data)
|
See this URL for a description of the basic authentication scheme: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt"""
|
This function supports Basic authentication only."""
|
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): """Error 401 -- authentication required. See this URL for a description of the basic authentication scheme: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt""" if not 'www-authenticate' in headers: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if not match: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) scheme, realm = match.groups() if scheme.lower() != 'basic': URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
|
def http_error_407(self, url, fp, errcode, errmsg, headers, data=None): """Error 407 -- proxy authentication required. This function supports Basic authentication only.""" if not 'proxy-authenticate' in headers: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) stuff = headers['proxy-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if not match: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) scheme, realm = match.groups() if scheme.lower() != 'basic': URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) name = 'retry_proxy_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data) def retry_proxy_http_basic_auth(self, url, realm, data=None): host, selector = splithost(url) newurl = 'http://' + host + selector proxy = self.proxies['http'] urltype, proxyhost = splittype(proxy) proxyhost, proxyselector = splithost(proxyhost) i = proxyhost.find('@') + 1 proxyhost = proxyhost[i:] user, passwd = self.get_user_passwd(proxyhost, realm, i) if not (user or passwd): return None proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost self.proxies['http'] = 'http://' + proxyhost + proxyselector if data is None: return self.open(newurl) else: return self.open(newurl, data) def retry_proxy_https_basic_auth(self, url, realm, data=None): host, selector = splithost(url) newurl = 'https://' + host + selector proxy = self.proxies['https'] urltype, proxyhost = splittype(proxy) proxyhost, proxyselector = splithost(proxyhost) i = proxyhost.find('@') + 1 proxyhost = proxyhost[i:] user, passwd = self.get_user_passwd(proxyhost, realm, i) if not (user or passwd): return None proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost self.proxies['https'] = 'https://' + proxyhost + proxyselector if data is None: return self.open(newurl) else: return self.open(newurl, data)
|
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): """Error 401 -- authentication required. See this URL for a description of the basic authentication scheme: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt""" if not 'www-authenticate' in headers: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if not match: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) scheme, realm = match.groups() if scheme.lower() != 'basic': URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
|
|
newurl = '//' + host + selector return self.open_https(newurl, data)
|
newurl = 'https://' + host + selector if data is None: return self.open(newurl) else: return self.open(newurl, data)
|
def retry_https_basic_auth(self, url, realm, data=None): host, selector = splithost(url) i = host.find('@') + 1 host = host[i:] user, passwd = self.get_user_passwd(host, realm, i) if not (user or passwd): return None host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host newurl = '//' + host + selector return self.open_https(newurl, data)
|
self.announce ("making hard links in %s..." % base_dir)
|
try: link = os.link msg = "making hard links in %s..." % base_dir except AttributeError: link = 0 msg = "copying files to %s..." % base_dir self.announce (msg)
|
def make_release_tree (self, base_dir, files):
|
if not os.path.exists (dest): self.execute (os.link, (file, dest), "linking %s -> %s" % (file, dest))
|
if link: if not os.path.exists (dest): self.execute (os.link, (file, dest), "linking %s -> %s" % (file, dest)) else: self.copy_file (file, dest)
|
def make_release_tree (self, base_dir, files):
|
class Konquerer: """Controller for the KDE File Manager (kfm, or Konquerer).
|
class Konqueror: """Controller for the KDE File Manager (kfm, or Konqueror).
|
def open_new(self, url): self._remote("openURL(%s, new-window)" % url)
|
for more information on the Konquerer remote-control interface.
|
for more information on the Konqueror remote-control interface.
|
def open_new(self, url): self._remote("openURL(%s, new-window)" % url)
|
register("kfm", Konquerer)
|
register("kfm", Konqueror)
|
def open_new(self, url): self._remote("openURL %s" % url)
|
self.unixsocket = 1
|
def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): """ Initialize a handler.
|
|
self.unixsocket = 0
|
def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): """ Initialize a handler.
|
|
if not os.isatty(slave_fd): raise TestFailed, "slave_fd is not a tty"
|
def debug(msg): pass
|
|
args['compiler_so'] = compiler
|
(ccshared,) = sysconfig.get_config_vars('CCSHARED') args['compiler_so'] = compiler + ' ' + ccshared
|
def build_extensions(self):
|
self.temp_files.append(prog)
|
def _link (self, body, headers, include_dirs, libraries, library_dirs, lang): (src, obj) = self._compile(body, headers, include_dirs, lang) prog = os.path.splitext(os.path.basename(src))[0] self.temp_files.append(prog) # XXX should be prog + exe_ext self.compiler.link_executable([obj], prog, libraries=libraries, library_dirs=library_dirs) return (src, obj, prog)
|
|
self.openedurl = '%s:%s' % (type, url)
|
def open(self, fullurl, data=None): fullurl = unwrap(fullurl) if self.tempcache and self.tempcache.has_key(fullurl): filename, headers = self.tempcache[fullurl] fp = open(filename, 'rb') return addinfourl(fp, headers, fullurl) type, url = splittype(fullurl) if not type: type = 'file' self.openedurl = '%s:%s' % (type, url) if self.proxies.has_key(type): proxy = self.proxies[type] type, proxy = splittype(proxy) host, selector = splithost(proxy) url = (host, fullurl) # Signal special case to open_*() name = 'open_' + type if '-' in name: # replace - with _ name = string.join(string.split(name, '-'), '_') if not hasattr(self, name): if data is None: return self.open_unknown(fullurl) else: return self.open_unknown(fullurl, data) try: if data is None: return getattr(self, name)(url) else: return getattr(self, name)(url, data) except socket.error, msg: raise IOError, ('socket error', msg), sys.exc_info()[2]
|
|
self.openedurl = url
|
def retrieve(self, url, filename=None): url = unwrap(url) self.openedurl = url if self.tempcache and self.tempcache.has_key(url): return self.tempcache[url] type, url1 = splittype(url) if not filename and (not type or type == 'file'): try: fp = self.open_local_file(url1) del fp return url2pathname(splithost(url1)[1]), None except IOError, msg: pass fp = self.open(url) headers = fp.info() if not filename: import tempfile filename = tempfile.mktemp() self.__tempfiles.append(filename) result = filename, headers if self.tempcache is not None: self.tempcache[url] = result tfp = open(filename, 'wb') bs = 1024*8 block = fp.read(bs) while block: tfp.write(block) block = fp.read(bs) fp.close() tfp.close() del fp del tfp return result
|
|
return addinfourl(fp, headers, self.openedurl)
|
return addinfourl(fp, headers, "http:" + url)
|
def open_http(self, url, data=None): import httplib if type(url) is type(""): host, selector = splithost(url) user_passwd, host = splituser(host) realhost = host else: host, selector = url urltype, rest = splittype(selector) user_passwd = None if string.lower(urltype) != 'http': realhost = None else: realhost, rest = splithost(rest) user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) #print "proxy via http:", host, selector if not host: raise IOError, ('http error', 'no host given') if user_passwd: import base64 auth = string.strip(base64.encodestring(user_passwd)) else: auth = None h = httplib.HTTP(host) if data is not None: h.putrequest('POST', selector) h.putheader('Content-type', 'application/x-www-form-urlencoded') h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', selector) if auth: h.putheader('Authorization', 'Basic %s' % auth) if realhost: h.putheader('Host', realhost) for args in self.addheaders: apply(h.putheader, args) h.endheaders() if data is not None: h.send(data + '\r\n') errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == 200: return addinfourl(fp, headers, self.openedurl) else: return self.http_error(url, fp, errcode, errmsg, headers)
|
return addinfourl(fp, noheaders(), self.openedurl)
|
return addinfourl(fp, noheaders(), "gopher:" + url)
|
def open_gopher(self, url): import gopherlib host, selector = splithost(url) if not host: raise IOError, ('gopher error', 'no host given') type, selector = splitgophertype(selector) selector, query = splitquery(selector) selector = unquote(selector) if query: query = unquote(query) fp = gopherlib.send_query(selector, query, host) else: fp = gopherlib.send_selector(selector, host) return addinfourl(fp, noheaders(), self.openedurl)
|
noheaders(), self.openedurl)
|
noheaders(), "ftp:" + url)
|
def open_ftp(self, url): host, path = splithost(url) if not host: raise IOError, ('ftp error', 'no host given') host, port = splitport(host) user, host = splituser(host) if user: user, passwd = splitpasswd(user) else: passwd = None host = socket.gethostbyname(host) if not port: import ftplib port = ftplib.FTP_PORT else: port = int(port) path, attrs = splitattr(path) dirs = string.splitfields(path, '/') dirs, file = dirs[:-1], dirs[-1] if dirs and not dirs[0]: dirs = dirs[1:] key = (user, host, port, string.joinfields(dirs, '/')) if len(self.ftpcache) > MAXFTPCACHE: # Prune the cache, rather arbitrarily for k in self.ftpcache.keys(): if k != key: v = self.ftpcache[k] del self.ftpcache[k] v.close() try: if not self.ftpcache.has_key(key): self.ftpcache[key] = \ ftpwrapper(user, passwd, host, port, dirs) if not file: type = 'D' else: type = 'I' for attr in attrs: attr, value = splitvalue(attr) if string.lower(attr) == 'type' and \ value in ('a', 'A', 'i', 'I', 'd', 'D'): type = string.upper(value) return addinfourl( self.ftpcache[key].retrfile(file, type), noheaders(), self.openedurl) except ftperrors(), msg: raise IOError, ('ftp error', msg), sys.exc_info()[2]
|
return addinfourl(fp, headers, self.openedurl)
|
return addinfourl(fp, headers, "http:" + url)
|
def http_error_default(self, url, fp, errcode, errmsg, headers): return addinfourl(fp, headers, self.openedurl)
|
log.warn(("warngin: no files found matching '%s' " +
|
log.warn(("warning: no files found matching '%s' " +
|
def process_template_line (self, line):
|
self.lineno = self.lineno + string.count(rawdata[i:i], '\n')
|
self.lineno = self.lineno + string.count(rawdata[i:k], '\n')
|
def goahead(self, end): rawdata = self.rawdata i = 0 n = len(rawdata) while i < n: if i > 0: self.__at_start = 0 if self.nomoretags: data = rawdata[i:n] self.handle_data(data) self.lineno = self.lineno + string.count(data, '\n') i = n break res = interesting.search(rawdata, i) if res: j = res.start(0) else: j = n if i < j: data = rawdata[i:j] if self.__at_start and space.match(data) is None: self.syntax_error('illegal data at start of file') self.__at_start = 0 if not self.stack and space.match(data) is None: self.syntax_error('data not in content') if not self.__accept_utf8 and illegal.search(data): self.syntax_error('illegal character in content') self.handle_data(data) self.lineno = self.lineno + string.count(data, '\n') i = j if i == n: break if rawdata[i] == '<': if starttagopen.match(rawdata, i): if self.literal: data = rawdata[i] self.handle_data(data) self.lineno = self.lineno + string.count(data, '\n') i = i+1 continue k = self.parse_starttag(i) if k < 0: break self.__seen_starttag = 1 self.lineno = self.lineno + string.count(rawdata[i:k], '\n') i = k continue if endtagopen.match(rawdata, i): k = self.parse_endtag(i) if k < 0: break self.lineno = self.lineno + string.count(rawdata[i:k], '\n') i = k continue if commentopen.match(rawdata, i): if self.literal: data = rawdata[i] self.handle_data(data) self.lineno = self.lineno + string.count(data, '\n') i = i+1 continue k = self.parse_comment(i) if k < 0: break self.lineno = self.lineno + string.count(rawdata[i:k], '\n') i = k continue if cdataopen.match(rawdata, i): k = self.parse_cdata(i) if k < 0: break self.lineno = self.lineno + string.count(rawdata[i:i], '\n') i = k continue res = xmldecl.match(rawdata, i) if res: if not self.__at_start: self.syntax_error("<?xml?> declaration not at start of document") version, encoding, standalone = res.group('version', 'encoding', 'standalone') if version[1:-1] != '1.0': raise RuntimeError, 'only XML version 1.0 supported' if encoding: encoding = encoding[1:-1] if standalone: standalone = standalone[1:-1] self.handle_xml(encoding, standalone) i = res.end(0) continue res = procopen.match(rawdata, i) if res: k = self.parse_proc(i) if k < 0: break self.lineno = self.lineno + string.count(rawdata[i:k], '\n') i = k continue res = doctype.match(rawdata, i) if res: if self.literal: data = rawdata[i] self.handle_data(data) self.lineno = self.lineno + string.count(data, '\n') i = i+1 continue if self.__seen_doctype: self.syntax_error('multiple DOCTYPE elements') if self.__seen_starttag: self.syntax_error('DOCTYPE not at beginning of document') k = self.parse_doctype(res) if k < 0: break self.__seen_doctype = res.group('name') if self.__map_case: self.__seen_doctype = string.lower(self.__seen_doctype) self.lineno = self.lineno + string.count(rawdata[i:k], '\n') i = k continue elif rawdata[i] == '&': if self.literal: data = rawdata[i] self.handle_data(data) i = i+1 continue res = charref.match(rawdata, i) if res is not None: i = res.end(0) if rawdata[i-1] != ';': self.syntax_error("`;' missing in charref") i = i-1 if not self.stack: self.syntax_error('data not in content') self.handle_charref(res.group('char')[:-1]) self.lineno = self.lineno + string.count(res.group(0), '\n') continue res = entityref.match(rawdata, i) if res is not None: i = res.end(0) if rawdata[i-1] != ';': self.syntax_error("`;' missing in entityref") i = i-1 name = res.group('name') if self.__map_case: name = string.lower(name) if self.entitydefs.has_key(name): self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:] n = len(rawdata) i = res.start(0) else: self.unknown_entityref(name) self.lineno = self.lineno + string.count(res.group(0), '\n') continue elif rawdata[i] == ']': if self.literal: data = rawdata[i] self.handle_data(data) i = i+1 continue if n-i < 3: break if cdataclose.match(rawdata, i): self.syntax_error("bogus `]]>'") self.handle_data(rawdata[i]) i = i+1 continue else: raise RuntimeError, 'neither < nor & ??' # We get here only if incomplete matches but # nothing else break # end while if i > 0: self.__at_start = 0 if end and i < n: data = rawdata[i] self.syntax_error("bogus `%s'" % data) if not self.__accept_utf8 and illegal.search(data): self.syntax_error('illegal character in content') self.handle_data(data) self.lineno = self.lineno + string.count(data, '\n') self.rawdata = rawdata[i+1:] return self.goahead(end) self.rawdata = rawdata[i:] if end: if not self.__seen_starttag: self.syntax_error('no elements in file') if self.stack: self.syntax_error('missing end tags') while self.stack: self.finish_endtag(self.stack[-1][0])
|
method = self.elements.get(tag, (None, None))[1] if method is not None: self.handle_endtag(tag, method) else: self.unknown_endtag(tag)
|
def finish_endtag(self, tag): if not tag: self.syntax_error('name-less end tag') found = len(self.stack) - 1 if found < 0: self.unknown_endtag(tag) return else: found = -1 for i in range(len(self.stack)): if tag == self.stack[i][0]: found = i if found == -1: self.syntax_error('unopened end tag') method = self.elements.get(tag, (None, None))[1] if method is not None: self.handle_endtag(tag, method) else: self.unknown_endtag(tag) return while len(self.stack) > found: if found < len(self.stack) - 1: self.syntax_error('missing close tag for %s' % self.stack[-1][2]) nstag = self.stack[-1][2] method = self.elements.get(nstag, (None, None))[1] if method is not None: self.handle_endtag(nstag, method) else: self.unknown_endtag(nstag) if self.__use_namespaces == len(self.stack): self.__use_namespaces = 0 del self.stack[-1]
|
|
def maketables():
|
def maketables(trace=0):
|
def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp)
|
index1, index2, shift = splitbins(index)
|
index1, index2, shift = splitbins(index, trace)
|
def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp)
|
index1, index2, shift = splitbins(decomp_index)
|
index1, index2, shift = splitbins(decomp_index, trace)
|
def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp)
|
print len(table), "ctype entries"
|
def maketables(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) # 1) database properties for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" item = ( category, combining, bidirectional, mirrored ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # 2) decomposition data # FIXME: <fl> using the encoding stuff from unidb would save # another 50k or so, but I'll leave that for 2.1... decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i FILE = "Modules/unicodedata_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique database records */" print >>fp, \ "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print >>fp, " {%d, %d, %d, %d}," % item print >>fp, "};" print >>fp # FIXME: the following tables should be made static, and # the support code moved into unicodedatabase.c print >>fp, "/* string literals */" print >>fp, "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" print >>fp, "static const char *decomp_data[] = {" for name in decomp_data: print >>fp, " \"%s\"," % name print >>fp, " NULL" print >>fp, "};" # split record index table index1, index2, shift = splitbins(index) print >>fp, "/* index tables for the database records */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp) # split decomposition index table index1, index2, shift = splitbins(decomp_index) print >>fp, "/* index tables for the decomposition data */" print >>fp, "#define DECOMP_SHIFT", shift Array("decomp_index1", index1).dump(fp) Array("decomp_index2", index2).dump(fp) # # 3) unicode type data # extract unicode types dummy = (0, 0, 0, 0, 0, 0) table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = record[2] bidirectional = record[4] flags = 0 if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]: flags |= ALPHA_MASK if category == "Ll": flags |= LOWER_MASK if category == "Zl" or bidirectional == "B": flags |= LINEBREAK_MASK if category == "Zs" or bidirectional in ("WS", "B", "S"): flags |= SPACE_MASK if category == "Lt": flags |= TITLE_MASK if category == "Lu": flags |= UPPER_MASK # use delta predictor for upper/lower/title if record[12]: upper = (int(record[12], 16) - char) & 0xffff else: upper = 0 if record[13]: lower = (int(record[13], 16) - char) & 0xffff else: lower = 0 if record[14]: title = (int(record[14], 16) - char) & 0xffff else: title = 0 # decimal digit, integer digit decimal = 0 if record[6]: flags |= DECIMAL_MASK decimal = int(record[6]) digit = 0 if record[7]: flags |= DIGIT_MASK digit = int(record[7]) item = ( flags, upper, lower, title, decimal, digit ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i print len(table), "ctype entries" FILE = "Objects/unicodetype_db.h" fp = open(FILE, "w") print >>fp, "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print >>fp print >>fp, "/* a list of unique character type descriptors */" print >>fp, "const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[] = {" for item in table: print >>fp, " {%d, %d, %d, %d, %d, %d}," % item print >>fp, "};" print >>fp # split decomposition index table index1, index2, shift = splitbins(index) print >>fp, "/* type indexes */" print >>fp, "#define SHIFT", shift Array("index1", index1).dump(fp) Array("index2", index2).dump(fp)
|
|
def __init__(self, filename):
|
def __init__(self, filename, expand=1):
|
def __init__(self, filename): file = open(filename) table = [None] * 65536 while 1: s = file.readline() if not s: break s = string.split(string.strip(s), ";") char = string.atoi(s[0], 16) table[char] = s
|
If optional arg trace is true (default false), progress info is printed to sys.stderr.
|
If optional arg trace is non-zero (default zero), progress info is printed to sys.stderr. The higher the value, the more info you'll get.
|
def splitbins(t, trace=0): """t, trace=0 -> (t1, t2, shift). Split a table to save space. t is a sequence of ints. This function can be useful to save space if many of the ints are the same. t1 and t2 are lists of ints, and shift is an int, chosen to minimize the combined size of t1 and t2 (in C code), and where for each i in range(len(t)), t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] where mask is a bitmask isolating the last "shift" bits. If optional arg trace is true (default false), progress info is printed to sys.stderr. """ import sys if trace: def dump(t1, t2, shift, bytes): print >>sys.stderr, "%d+%d bins at shift %d; %d bytes" % ( len(t1), len(t2), shift, bytes) print >>sys.stderr, "Size of original table:", len(t)*getsize(t), \ "bytes" n = len(t)-1 # last valid index maxshift = 0 # the most we can shift n and still have something left if n > 0: while n >> 1: n >>= 1 maxshift += 1 del n bytes = sys.maxint # smallest total size so far t = tuple(t) # so slices can be dict keys for shift in range(maxshift + 1): t1 = [] t2 = [] size = 2**shift bincache = {} for i in range(0, len(t), size): bin = t[i:i+size] index = bincache.get(bin) if index is None: index = len(t2) bincache[bin] = index t2.extend(bin) t1.append(index >> shift) # determine memory size b = len(t1)*getsize(t1) + len(t2)*getsize(t2) if trace: dump(t1, t2, shift, b) if b < bytes: best = t1, t2, shift bytes = b t1, t2, shift = best if trace: print >>sys.stderr, "Best:", dump(t1, t2, shift, bytes) if __debug__: # exhaustively verify that the decomposition is correct mask = ~((~0) << shift) # i.e., low-bit mask of shift bits for i in xrange(len(t)): assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] return best
|
if trace:
|
if trace > 1:
|
def dump(t1, t2, shift, bytes): print >>sys.stderr, "%d+%d bins at shift %d; %d bytes" % ( len(t1), len(t2), shift, bytes)
|
maketables()
|
maketables(1)
|
def dump(t1, t2, shift, bytes): print >>sys.stderr, "%d+%d bins at shift %d; %d bytes" % ( len(t1), len(t2), shift, bytes)
|
gen.startElementNS((ns_uri, "doc"), "ns:doc", {}) gen.endElementNS((ns_uri, "doc"), "ns:doc")
|
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {}) gen.endElementNS((ns_uri, "doc"), "ns1:doc")
|
def test_xmlgen_ns(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startPrefixMapping("ns1", ns_uri) gen.startElementNS((ns_uri, "doc"), "ns:doc", {}) gen.endElementNS((ns_uri, "doc"), "ns:doc") gen.endPrefixMapping("ns1") gen.endDocument() return result.getvalue() == start + ('<ns1:doc xmlns:ns1="%s"></ns1:doc>' % ns_uri)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.