rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
test(r"""sre.match('(a)|(b)', 'b').start(1)""", -1) test(r"""sre.match('(a)|(b)', 'b').end(1)""", -1) test(r"""sre.match('(a)|(b)', 'b').span(1)""", (-1, -1))
|
test(r"""sre.match(r'(a)|(b)', 'b').start(1)""", -1) test(r"""sre.match(r'(a)|(b)', 'b').end(1)""", -1) test(r"""sre.match(r'(a)|(b)', 'b').span(1)""", (-1, -1))
|
def test(expression, result, exception=None): try: r = eval(expression) except: if exception: if not isinstance(sys.exc_value, exception): print expression, "FAILED" # display name, not actual value if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got", sys.exc_type.__name__, str(sys.exc_value) else: print expression, "FAILED" traceback.print_exc(file=sys.stdout) else: if exception: print expression, "FAILED" if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got result", repr(r) else: if r != result: print expression, "FAILED" print "expected", repr(result) print "got result", repr(r)
|
test(r"""sre.sub("(?i)b+", "x", "bbbb BBBB")""", 'x x')
|
test(r"""sre.sub(r"(?i)b+", "x", "bbbb BBBB")""", 'x x')
|
def test(expression, result, exception=None): try: r = eval(expression) except: if exception: if not isinstance(sys.exc_value, exception): print expression, "FAILED" # display name, not actual value if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got", sys.exc_type.__name__, str(sys.exc_value) else: print expression, "FAILED" traceback.print_exc(file=sys.stdout) else: if exception: print expression, "FAILED" if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got result", repr(r) else: if r != result: print expression, "FAILED" print "expected", repr(result) print "got result", repr(r)
|
test(r"""sre.sub('.', lambda m: r"\n", 'x')""", '\\n') test(r"""sre.sub('.', r"\n", 'x')""", '\n')
|
test(r"""sre.sub(r'.', lambda m: r"\n", 'x')""", '\\n') test(r"""sre.sub(r'.', r"\n", 'x')""", '\n')
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.sub('(.)', s, 'x')""", 'xx') test(r"""sre.sub('(.)', sre.escape(s), 'x')""", s) test(r"""sre.sub('(.)', lambda m: s, 'x')""", s) test(r"""sre.sub('(?P<a>x)', '\g<a>\g<a>', 'xx')""", 'xxxx') test(r"""sre.sub('(?P<a>x)', '\g<a>\g<1>', 'xx')""", 'xxxx') test(r"""sre.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx')""", 'xxxx') test(r"""sre.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx')""", 'xxxx') test(r"""sre.sub('a', r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D', 'a')""", '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D') test(r"""sre.sub('a', '\t\n\v\r\f\a', 'a')""", '\t\n\v\r\f\a') test(r"""sre.sub('a', '\t\n\v\r\f\a', 'a')""", (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7))) test(r"""sre.sub('^\s*', 'X', 'test')""", 'Xtest')
|
test(r"""sre.sub(r'(.)', s, 'x')""", 'xx') test(r"""sre.sub(r'(.)', sre.escape(s), 'x')""", s) test(r"""sre.sub(r'(.)', lambda m: s, 'x')""", s) test(r"""sre.sub(r'(?P<a>x)', '\g<a>\g<a>', 'xx')""", 'xxxx') test(r"""sre.sub(r'(?P<a>x)', '\g<a>\g<1>', 'xx')""", 'xxxx') test(r"""sre.sub(r'(?P<unk>x)', '\g<unk>\g<unk>', 'xx')""", 'xxxx') test(r"""sre.sub(r'(?P<unk>x)', '\g<1>\g<1>', 'xx')""", 'xxxx') test(r"""sre.sub(r'a', r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D', 'a')""", '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D') test(r"""sre.sub(r'a', '\t\n\v\r\f\a', 'a')""", '\t\n\v\r\f\a') test(r"""sre.sub(r'a', '\t\n\v\r\f\a', 'a')""", (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7))) test(r"""sre.sub(r'^\s*', 'X', 'test')""", 'Xtest')
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.sub('a', 'b', 'aaaaa')""", 'bbbbb') test(r"""sre.sub('a', 'b', 'aaaaa', 1)""", 'baaaa')
|
test(r"""sre.sub(r'a', 'b', 'aaaaa')""", 'bbbbb') test(r"""sre.sub(r'a', 'b', 'aaaaa', 1)""", 'baaaa')
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.sub('(?P<a>x)', '\g<a', 'xx')""", None, sre.error) test(r"""sre.sub('(?P<a>x)', '\g<', 'xx')""", None, sre.error) test(r"""sre.sub('(?P<a>x)', '\g', 'xx')""", None, sre.error) test(r"""sre.sub('(?P<a>x)', '\g<a a>', 'xx')""", None, sre.error) test(r"""sre.sub('(?P<a>x)', '\g<1a1>', 'xx')""", None, sre.error) test(r"""sre.sub('(?P<a>x)', '\g<ab>', 'xx')""", None, IndexError) test(r"""sre.sub('(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')""", None, sre.error) test(r"""sre.sub('(?P<a>x)|(?P<b>y)', '\\2', 'xx')""", None, sre.error)
|
test(r"""sre.sub(r'(?P<a>x)', '\g<a', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<a a>', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<1a1>', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)', '\g<ab>', 'xx')""", None, IndexError) test(r"""sre.sub(r'(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')""", None, sre.error) test(r"""sre.sub(r'(?P<a>x)|(?P<b>y)', '\\2', 'xx')""", None, sre.error)
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.subn("(?i)b+", "x", "bbbb BBBB")""", ('x x', 2)) test(r"""sre.subn("b+", "x", "bbbb BBBB")""", ('x BBBB', 1)) test(r"""sre.subn("b+", "x", "xyz")""", ('xyz', 0)) test(r"""sre.subn("b*", "x", "xyz")""", ('xxxyxzx', 4)) test(r"""sre.subn("b*", "x", "xyz", 2)""", ('xxxyz', 2))
|
test(r"""sre.subn(r"(?i)b+", "x", "bbbb BBBB")""", ('x x', 2)) test(r"""sre.subn(r"b+", "x", "bbbb BBBB")""", ('x BBBB', 1)) test(r"""sre.subn(r"b+", "x", "xyz")""", ('xyz', 0)) test(r"""sre.subn(r"b*", "x", "xyz")""", ('xxxyxzx', 4)) test(r"""sre.subn(r"b*", "x", "xyz", 2)""", ('xxxyz', 2))
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.split(":", ":a:b::c")""", ['', 'a', 'b', '', 'c']) test(r"""sre.split(":*", ":a:b::c")""", ['', 'a', 'b', 'c']) test(r"""sre.split("(:*)", ":a:b::c")""", ['', ':', 'a', ':', 'b', '::', 'c']) test(r"""sre.split("(?::*)", ":a:b::c")""", ['', 'a', 'b', 'c']) test(r"""sre.split("(:)*", ":a:b::c")""", ['', ':', 'a', ':', 'b', ':', 'c']) test(r"""sre.split("([b:]+)", ":a:b::c")""", ['', ':', 'a', ':b::', 'c']) test(r"""sre.split("(b)|(:+)", ":a:b::c")""",
|
test(r"""sre.split(r":", ":a:b::c")""", ['', 'a', 'b', '', 'c']) test(r"""sre.split(r":*", ":a:b::c")""", ['', 'a', 'b', 'c']) test(r"""sre.split(r"(:*)", ":a:b::c")""", ['', ':', 'a', ':', 'b', '::', 'c']) test(r"""sre.split(r"(?::*)", ":a:b::c")""", ['', 'a', 'b', 'c']) test(r"""sre.split(r"(:)*", ":a:b::c")""", ['', ':', 'a', ':', 'b', ':', 'c']) test(r"""sre.split(r"([b:]+)", ":a:b::c")""", ['', ':', 'a', ':b::', 'c']) test(r"""sre.split(r"(b)|(:+)", ":a:b::c")""",
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.split("(?:b)|(?::+)", ":a:b::c")""", ['', 'a', '', '', 'c']) test(r"""sre.split(":", ":a:b::c", 2)""", ['', 'a', 'b::c']) test(r"""sre.split(':', 'a:b:c:d', 2)""", ['a', 'b', 'c:d']) test(r"""sre.split("(:)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c']) test(r"""sre.split("(:*)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c'])
|
test(r"""sre.split(r"(?:b)|(?::+)", ":a:b::c")""", ['', 'a', '', '', 'c']) test(r"""sre.split(r":", ":a:b::c", 2)""", ['', 'a', 'b::c']) test(r"""sre.split(r':', 'a:b:c:d', 2)""", ['a', 'b', 'c:d']) test(r"""sre.split(r"(:)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c']) test(r"""sre.split(r"(:*)", ":a:b::c", 2)""", ['', ':', 'a', ':', 'b::c'])
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.findall(":+", "abc")""", []) test(r"""sre.findall(":+", "a:b::c:::d")""", [":", "::", ":::"]) test(r"""sre.findall("(:+)", "a:b::c:::d")""", [":", "::", ":::"]) test(r"""sre.findall("(:)(:*)", "a:b::c:::d")""",
|
test(r"""sre.findall(r":+", "abc")""", []) test(r"""sre.findall(r":+", "a:b::c:::d")""", [":", "::", ":::"]) test(r"""sre.findall(r"(:+)", "a:b::c:::d")""", [":", "::", ":::"]) test(r"""sre.findall(r"(:)(:*)", "a:b::c:::d")""",
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.findall("(a)|(b)", "abc")""", [("a", ""), ("", "b")])
|
test(r"""sre.findall(r"(a)|(b)", "abc")""", [("a", ""), ("", "b")])
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.match('a', 'a').groups()""", ()) test(r"""sre.match('(a)', 'a').groups()""", ('a',)) test(r"""sre.match('(a)', 'a').group(0)""", 'a') test(r"""sre.match('(a)', 'a').group(1)""", 'a') test(r"""sre.match('(a)', 'a').group(1, 1)""", ('a', 'a')) pat = sre.compile('((a)|(b))(c)?')
|
test(r"""sre.match(r'a', 'a').groups()""", ()) test(r"""sre.match(r'(a)', 'a').groups()""", ('a',)) test(r"""sre.match(r'(a)', 'a').group(0)""", 'a') test(r"""sre.match(r'(a)', 'a').group(1)""", 'a') test(r"""sre.match(r'(a)', 'a').group(1, 1)""", ('a', 'a')) pat = sre.compile(r'((a)|(b))(c)?')
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
pat = sre.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
|
pat = sre.compile(r'(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
pat = sre.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
|
pat = sre.compile(r'a(?:b|(c|e){1,2}?|d)+?(.)')
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
test(r"""sre.match('(x)*', 50000*'x').span()""", (0, 50000), RuntimeError) test(r"""sre.match('(x)*y', 50000*'x'+'y').span()""", (0, 50001), RuntimeError) test(r"""sre.match('(x)*?y', 50000*'x'+'y').span()""", (0, 50001), RuntimeError)
|
test(r"""sre.match(r'(x)*', 50000*'x').span()""", (0, 50000), RuntimeError) test(r"""sre.match(r'(x)*y', 50000*'x'+'y').span()""", (0, 50001), RuntimeError) test(r"""sre.match(r'(x)*?y', 50000*'x'+'y').span()""", (0, 50001), RuntimeError)
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
pass
|
print '=== Compiled incorrectly', t
|
def bump_num(matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1)
|
try: r, w, e = select.select(r, w, e, timeout) except select.error, err: if err[0] != EINTR: raise
|
if [] == r == w == e: time.sleep(timeout) else: try: r, w, e = select.select(r, w, e, timeout) except select.error, err: if err[0] not in (EINTR, ENOENT): raise
|
def poll(timeout=0.0, map=None): if map is None: map = socket_map if map: r = []; w = []; e = [] for fd, obj in map.items(): if obj.readable(): r.append(fd) if obj.writable(): w.append(fd) try: r, w, e = select.select(r, w, e, timeout) except select.error, err: if err[0] != EINTR: raise for fd in r: obj = map.get(fd) if obj is None: continue read(obj) for fd in w: obj = map.get(fd) if obj is None: continue write(obj)
|
('%w', '%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
|
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
|
def strftest(now): if verbose: print "strftime test for", time.ctime(now) nowsecs = str(long(now))[:-1] gmt = time.gmtime(now) now = time.localtime(now) if now[3] < 12: ampm='AM' else: ampm='PM' jan1 = time.localtime(time.mktime((now[0], 1, 1) + (0,)*6)) try: if now[8]: tz = time.tzname[1] else: tz = time.tzname[0] except AttributeError: tz = '' if now[3] > 12: clock12 = now[3] - 12 elif now[3] > 0: clock12 = now[3] else: clock12 = 12 expectations = ( ('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'), ('%A', calendar.day_name[now[6]], 'full weekday name'), ('%b', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%B', calendar.month_name[now[1]], 'full month name'), # %c see below ('%d', '%02d' % now[2], 'day of month as number (00-31)'), ('%H', '%02d' % now[3], 'hour (00-23)'), ('%I', '%02d' % clock12, 'hour (01-12)'), ('%j', '%03d' % now[7], 'julian day (001-366)'), ('%m', '%02d' % now[1], 'month as number (01-12)'), ('%M', '%02d' % now[4], 'minute, (00-59)'), ('%p', ampm, 'AM or PM as appropriate'), ('%S', '%02d' % now[5], 'seconds of current time (00-60)'), ('%U', '%02d' % ((now[7] + jan1[6])/7), 'week number of the year (Sun 1st)'), ('%w', '%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'), ('%W', '%02d' % ((now[7] + (jan1[6] - 1)%7)/7), 'week number of the year (Mon 1st)'), # %x see below ('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%y', '%02d' % (now[0]%100), 'year without century'), ('%Y', '%d' % now[0], 'year with century'), # %Z see below ('%%', '%', 'single percent sign'), ) nonstandard_expectations = ( # These are standard but don't have predictable output ('%c', fixasctime(time.asctime(now)), 'near-asctime() format'), ('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), '%m/%d/%y %H:%M:%S'), ('(%Z)', '(%s)' % tz, 'time zone name'), # These are some platform specific extensions ('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'), ('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'), ('%h', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'), ('%n', '\n', 'newline character'), ('%r', '%02d:%02d:%02d %s' % (clock12, now[4], now[5], ampm), '%I:%M:%S %p'), ('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'), ('%s', nowsecs, 'seconds since the Epoch in UCT'), ('%t', '\t', 'tab character'), ('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%3y', '%03d' % (now[0]%100), 'year without century rendered using fieldwidth'), ) if verbose: print "Strftime test, platform: %s, Python version: %s" % \ (sys.platform, string.split(sys.version)[0]) for e in expectations: try: result = time.strftime(e[0], now) except ValueError, error: print "Standard '%s' format gave error:" % e[0], error continue if result == e[1]: continue if result[0] == '%': print "Does not support standard '%s' format (%s)" % (e[0], e[2]) else: print "Conflict for %s (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result) for e in nonstandard_expectations: try: result = time.strftime(e[0], now) except ValueError, result: if verbose: print "Error for nonstandard '%s' format (%s): %s" % \ (e[0], e[2], str(result)) continue if result == e[1]: if verbose: print "Supports nonstandard '%s' format (%s)" % (e[0], e[2]) elif result[0] == '%': if verbose: print "Does not appear to support '%s' format (%s)" % (e[0], e[2]) else: if verbose: print "Conflict for nonstandard '%s' format (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result)
|
('(%Z)', '(%s)' % tz, 'time zone name'),
|
('%Z', '%s' % tz, 'time zone name'),
|
def strftest(now): if verbose: print "strftime test for", time.ctime(now) nowsecs = str(long(now))[:-1] gmt = time.gmtime(now) now = time.localtime(now) if now[3] < 12: ampm='AM' else: ampm='PM' jan1 = time.localtime(time.mktime((now[0], 1, 1) + (0,)*6)) try: if now[8]: tz = time.tzname[1] else: tz = time.tzname[0] except AttributeError: tz = '' if now[3] > 12: clock12 = now[3] - 12 elif now[3] > 0: clock12 = now[3] else: clock12 = 12 expectations = ( ('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'), ('%A', calendar.day_name[now[6]], 'full weekday name'), ('%b', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%B', calendar.month_name[now[1]], 'full month name'), # %c see below ('%d', '%02d' % now[2], 'day of month as number (00-31)'), ('%H', '%02d' % now[3], 'hour (00-23)'), ('%I', '%02d' % clock12, 'hour (01-12)'), ('%j', '%03d' % now[7], 'julian day (001-366)'), ('%m', '%02d' % now[1], 'month as number (01-12)'), ('%M', '%02d' % now[4], 'minute, (00-59)'), ('%p', ampm, 'AM or PM as appropriate'), ('%S', '%02d' % now[5], 'seconds of current time (00-60)'), ('%U', '%02d' % ((now[7] + jan1[6])/7), 'week number of the year (Sun 1st)'), ('%w', '%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'), ('%W', '%02d' % ((now[7] + (jan1[6] - 1)%7)/7), 'week number of the year (Mon 1st)'), # %x see below ('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%y', '%02d' % (now[0]%100), 'year without century'), ('%Y', '%d' % now[0], 'year with century'), # %Z see below ('%%', '%', 'single percent sign'), ) nonstandard_expectations = ( # These are standard but don't have predictable output ('%c', fixasctime(time.asctime(now)), 'near-asctime() format'), ('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), '%m/%d/%y %H:%M:%S'), ('(%Z)', '(%s)' % tz, 'time zone name'), # These are some platform specific extensions ('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'), ('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'), ('%h', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'), ('%n', '\n', 'newline character'), ('%r', '%02d:%02d:%02d %s' % (clock12, now[4], now[5], ampm), '%I:%M:%S %p'), ('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'), ('%s', nowsecs, 'seconds since the Epoch in UCT'), ('%t', '\t', 'tab character'), ('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%3y', '%03d' % (now[0]%100), 'year without century rendered using fieldwidth'), ) if verbose: print "Strftime test, platform: %s, Python version: %s" % \ (sys.platform, string.split(sys.version)[0]) for e in expectations: try: result = time.strftime(e[0], now) except ValueError, error: print "Standard '%s' format gave error:" % e[0], error continue if result == e[1]: continue if result[0] == '%': print "Does not support standard '%s' format (%s)" % (e[0], e[2]) else: print "Conflict for %s (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result) for e in nonstandard_expectations: try: result = time.strftime(e[0], now) except ValueError, result: if verbose: print "Error for nonstandard '%s' format (%s): %s" % \ (e[0], e[2], str(result)) continue if result == e[1]: if verbose: print "Supports nonstandard '%s' format (%s)" % (e[0], e[2]) elif result[0] == '%': if verbose: print "Does not appear to support '%s' format (%s)" % (e[0], e[2]) else: if verbose: print "Conflict for nonstandard '%s' format (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result)
|
if result == e[1]: continue
|
if re.match(e[1], result): continue
|
def strftest(now): if verbose: print "strftime test for", time.ctime(now) nowsecs = str(long(now))[:-1] gmt = time.gmtime(now) now = time.localtime(now) if now[3] < 12: ampm='AM' else: ampm='PM' jan1 = time.localtime(time.mktime((now[0], 1, 1) + (0,)*6)) try: if now[8]: tz = time.tzname[1] else: tz = time.tzname[0] except AttributeError: tz = '' if now[3] > 12: clock12 = now[3] - 12 elif now[3] > 0: clock12 = now[3] else: clock12 = 12 expectations = ( ('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'), ('%A', calendar.day_name[now[6]], 'full weekday name'), ('%b', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%B', calendar.month_name[now[1]], 'full month name'), # %c see below ('%d', '%02d' % now[2], 'day of month as number (00-31)'), ('%H', '%02d' % now[3], 'hour (00-23)'), ('%I', '%02d' % clock12, 'hour (01-12)'), ('%j', '%03d' % now[7], 'julian day (001-366)'), ('%m', '%02d' % now[1], 'month as number (01-12)'), ('%M', '%02d' % now[4], 'minute, (00-59)'), ('%p', ampm, 'AM or PM as appropriate'), ('%S', '%02d' % now[5], 'seconds of current time (00-60)'), ('%U', '%02d' % ((now[7] + jan1[6])/7), 'week number of the year (Sun 1st)'), ('%w', '%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'), ('%W', '%02d' % ((now[7] + (jan1[6] - 1)%7)/7), 'week number of the year (Mon 1st)'), # %x see below ('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%y', '%02d' % (now[0]%100), 'year without century'), ('%Y', '%d' % now[0], 'year with century'), # %Z see below ('%%', '%', 'single percent sign'), ) nonstandard_expectations = ( # These are standard but don't have predictable output ('%c', fixasctime(time.asctime(now)), 'near-asctime() format'), ('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), '%m/%d/%y %H:%M:%S'), ('(%Z)', '(%s)' % tz, 'time zone name'), # These are some platform specific extensions ('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'), ('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'), ('%h', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'), ('%n', '\n', 'newline character'), ('%r', '%02d:%02d:%02d %s' % (clock12, now[4], now[5], ampm), '%I:%M:%S %p'), ('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'), ('%s', nowsecs, 'seconds since the Epoch in UCT'), ('%t', '\t', 'tab character'), ('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%3y', '%03d' % (now[0]%100), 'year without century rendered using fieldwidth'), ) if verbose: print "Strftime test, platform: %s, Python version: %s" % \ (sys.platform, string.split(sys.version)[0]) for e in expectations: try: result = time.strftime(e[0], now) except ValueError, error: print "Standard '%s' format gave error:" % e[0], error continue if result == e[1]: continue if result[0] == '%': print "Does not support standard '%s' format (%s)" % (e[0], e[2]) else: print "Conflict for %s (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result) for e in nonstandard_expectations: try: result = time.strftime(e[0], now) except ValueError, result: if verbose: print "Error for nonstandard '%s' format (%s): %s" % \ (e[0], e[2], str(result)) continue if result == e[1]: if verbose: print "Supports nonstandard '%s' format (%s)" % (e[0], e[2]) elif result[0] == '%': if verbose: print "Does not appear to support '%s' format (%s)" % (e[0], e[2]) else: if verbose: print "Conflict for nonstandard '%s' format (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result)
|
if result == e[1]:
|
if re.match(e[1], result):
|
def strftest(now): if verbose: print "strftime test for", time.ctime(now) nowsecs = str(long(now))[:-1] gmt = time.gmtime(now) now = time.localtime(now) if now[3] < 12: ampm='AM' else: ampm='PM' jan1 = time.localtime(time.mktime((now[0], 1, 1) + (0,)*6)) try: if now[8]: tz = time.tzname[1] else: tz = time.tzname[0] except AttributeError: tz = '' if now[3] > 12: clock12 = now[3] - 12 elif now[3] > 0: clock12 = now[3] else: clock12 = 12 expectations = ( ('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'), ('%A', calendar.day_name[now[6]], 'full weekday name'), ('%b', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%B', calendar.month_name[now[1]], 'full month name'), # %c see below ('%d', '%02d' % now[2], 'day of month as number (00-31)'), ('%H', '%02d' % now[3], 'hour (00-23)'), ('%I', '%02d' % clock12, 'hour (01-12)'), ('%j', '%03d' % now[7], 'julian day (001-366)'), ('%m', '%02d' % now[1], 'month as number (01-12)'), ('%M', '%02d' % now[4], 'minute, (00-59)'), ('%p', ampm, 'AM or PM as appropriate'), ('%S', '%02d' % now[5], 'seconds of current time (00-60)'), ('%U', '%02d' % ((now[7] + jan1[6])/7), 'week number of the year (Sun 1st)'), ('%w', '%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'), ('%W', '%02d' % ((now[7] + (jan1[6] - 1)%7)/7), 'week number of the year (Mon 1st)'), # %x see below ('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%y', '%02d' % (now[0]%100), 'year without century'), ('%Y', '%d' % now[0], 'year with century'), # %Z see below ('%%', '%', 'single percent sign'), ) nonstandard_expectations = ( # These are standard but don't have predictable output ('%c', fixasctime(time.asctime(now)), 'near-asctime() format'), ('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), '%m/%d/%y %H:%M:%S'), ('(%Z)', '(%s)' % tz, 'time zone name'), # These are some platform specific extensions ('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'), ('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'), ('%h', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'), ('%n', '\n', 'newline character'), ('%r', '%02d:%02d:%02d %s' % (clock12, now[4], now[5], ampm), '%I:%M:%S %p'), ('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'), ('%s', nowsecs, 'seconds since the Epoch in UCT'), ('%t', '\t', 'tab character'), ('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%3y', '%03d' % (now[0]%100), 'year without century rendered using fieldwidth'), ) if verbose: print "Strftime test, platform: %s, Python version: %s" % \ (sys.platform, string.split(sys.version)[0]) for e in expectations: try: result = time.strftime(e[0], now) except ValueError, error: print "Standard '%s' format gave error:" % e[0], error continue if result == e[1]: continue if result[0] == '%': print "Does not support standard '%s' format (%s)" % (e[0], e[2]) else: print "Conflict for %s (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result) for e in nonstandard_expectations: try: result = time.strftime(e[0], now) except ValueError, result: if verbose: print "Error for nonstandard '%s' format (%s): %s" % \ (e[0], e[2], str(result)) continue if result == e[1]: if verbose: print "Supports nonstandard '%s' format (%s)" % (e[0], e[2]) elif result[0] == '%': if verbose: print "Does not appear to support '%s' format (%s)" % (e[0], e[2]) else: if verbose: print "Conflict for nonstandard '%s' format (%s):" % (e[0], e[2]) print " Expected %s, but got %s" % (e[1], result)
|
text.bind('<<close-window', self.close_event)
|
text.bind('<<close-window>>', self.close_event)
|
def __init__(self, flist=None, filename=None, key=None, root=None): if EditorWindow.help_url is None: dochome = os.path.join(sys.prefix, 'Doc', 'index.html') if sys.platform.count('linux'): # look for html docs in a couple of standard places pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3] if os.path.isdir('/var/www/html/python/'): # "python2" rpm dochome = '/var/www/html/python/index.html' else: basepath = '/usr/share/doc/' # standard location dochome = os.path.join(basepath, pyver, 'Doc', 'index.html') elif sys.platform[:3] == 'win': chmfile = os.path.join(sys.prefix, 'Doc', 'Python%d%d.chm' % sys.version_info[:2]) if os.path.isfile(chmfile): dochome = chmfile
|
netloc = params = query = fragment = ''
|
netloc = query = fragment = ''
|
def urlparse(url, scheme = '', allow_fragments = 1): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" key = url, scheme, allow_fragments cached = _parse_cache.get(key, None) if cached: return cached if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = params = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': i = url.find('/', 2) if i < 0: i = len(url) netloc = url[2:i] url = url[i:] if allow_fragments: i = url.rfind('#') if i >= 0: fragment = url[i+1:] url = url[:i] i = url.find('?') if i >= 0: query = url[i+1:] url = url[:i] i = url.find(';') if i >= 0: params = url[i+1:] url = url[:i] tuple = scheme, netloc, url, params, query, fragment _parse_cache[key] = tuple return tuple for c in url[:i]: if c not in scheme_chars: break else: scheme, url = url[:i].lower(), url[i+1:] if scheme in uses_netloc: if url[:2] == '//': i = url.find('/', 2) if i < 0: i = len(url) netloc, url = url[2:i], url[i:] if allow_fragments and scheme in uses_fragment: i = url.rfind('#') if i >= 0: url, fragment = url[:i], url[i+1:] if scheme in uses_query: i = url.find('?') if i >= 0: url, query = url[:i], url[i+1:] if scheme in uses_params: i = url.find(';') if i >= 0: url, params = url[:i], url[i+1:] tuple = scheme, netloc, url, params, query, fragment _parse_cache[key] = tuple return tuple
|
if allow_fragments: i = url.rfind(' if i >= 0: fragment = url[i+1:] url = url[:i] i = url.find('?') if i >= 0: query = url[i+1:] url = url[:i] i = url.find(';') if i >= 0: params = url[i+1:] url = url[:i] tuple = scheme, netloc, url, params, query, fragment
|
if allow_fragments and ' url, fragment = url.split(' if '?' in url: url, query = url.split('?', 1) tuple = scheme, netloc, url, query, fragment
|
def urlparse(url, scheme = '', allow_fragments = 1): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" key = url, scheme, allow_fragments cached = _parse_cache.get(key, None) if cached: return cached if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = params = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': i = url.find('/', 2) if i < 0: i = len(url) netloc = url[2:i] url = url[i:] if allow_fragments: i = url.rfind('#') if i >= 0: fragment = url[i+1:] url = url[:i] i = url.find('?') if i >= 0: query = url[i+1:] url = url[:i] i = url.find(';') if i >= 0: params = url[i+1:] url = url[:i] tuple = scheme, netloc, url, params, query, fragment _parse_cache[key] = tuple return tuple for c in url[:i]: if c not in scheme_chars: break else: scheme, url = url[:i].lower(), url[i+1:] if scheme in uses_netloc: if url[:2] == '//': i = url.find('/', 2) if i < 0: i = len(url) netloc, url = url[2:i], url[i:] if allow_fragments and scheme in uses_fragment: i = url.rfind('#') if i >= 0: url, fragment = url[:i], url[i+1:] if scheme in uses_query: i = url.find('?') if i >= 0: url, query = url[:i], url[i+1:] if scheme in uses_params: i = url.find(';') if i >= 0: url, params = url[:i], url[i+1:] tuple = scheme, netloc, url, params, query, fragment _parse_cache[key] = tuple return tuple
|
if allow_fragments and scheme in uses_fragment: i = url.rfind(' if i >= 0: url, fragment = url[:i], url[i+1:] if scheme in uses_query: i = url.find('?') if i >= 0: url, query = url[:i], url[i+1:] if scheme in uses_params: i = url.find(';') if i >= 0: url, params = url[:i], url[i+1:] tuple = scheme, netloc, url, params, query, fragment
|
if allow_fragments and scheme in uses_fragment and ' url, fragment = url.split(' if scheme in uses_query and '?' in url: url, query = url.split('?', 1) tuple = scheme, netloc, url, query, fragment
|
def urlparse(url, scheme = '', allow_fragments = 1): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" key = url, scheme, allow_fragments cached = _parse_cache.get(key, None) if cached: return cached if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth clear_cache() netloc = params = query = fragment = '' i = url.find(':') if i > 0: if url[:i] == 'http': # optimize the common case scheme = url[:i].lower() url = url[i+1:] if url[:2] == '//': i = url.find('/', 2) if i < 0: i = len(url) netloc = url[2:i] url = url[i:] if allow_fragments: i = url.rfind('#') if i >= 0: fragment = url[i+1:] url = url[:i] i = url.find('?') if i >= 0: query = url[i+1:] url = url[:i] i = url.find(';') if i >= 0: params = url[i+1:] url = url[:i] tuple = scheme, netloc, url, params, query, fragment _parse_cache[key] = tuple return tuple for c in url[:i]: if c not in scheme_chars: break else: scheme, url = url[:i].lower(), url[i+1:] if scheme in uses_netloc: if url[:2] == '//': i = url.find('/', 2) if i < 0: i = len(url) netloc, url = url[2:i], url[i:] if allow_fragments and scheme in uses_fragment: i = url.rfind('#') if i >= 0: url, fragment = url[:i], url[i+1:] if scheme in uses_query: i = url.find('?') if i >= 0: url, query = url[:i], url[i+1:] if scheme in uses_params: i = url.find(';') if i >= 0: url, params = url[:i], url[i+1:] tuple = scheme, netloc, url, params, query, fragment _parse_cache[key] = tuple return tuple
|
if params: url = url + ';' + params
|
def urlunparse((scheme, netloc, url, params, query, fragment)): """Put a parsed URL back together again. This may result in a slightly different, but equivalent URL, if the URL that was parsed originally had redundant delimiters, e.g. a ? with an empty query (the draft states that these are equivalent).""" if netloc or (scheme in uses_netloc and url[:2] == '//'): if url and url[:1] != '/': url = '/' + url url = '//' + (netloc or '') + url if scheme: url = scheme + ':' + url if params: url = url + ';' + params if query: url = url + '?' + query if fragment: url = url + '#' + fragment return url
|
|
s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag
|
if ' s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag else: return url, ''
|
def urldefrag(url): """Removes any existing fragment from URL. Returns a tuple of the defragmented URL and the fragment. If the URL contained no fragments, the second element is the empty string. """ s, n, p, a, q, frag = urlparse(url) defrag = urlunparse((s, n, p, a, q, '')) return defrag, frag
|
verify(f4.func_code.co_varnames == ('two', '.2', 'compound', 'argument', 'list')) verify(f5.func_code.co_varnames == ('.0', 'two', 'compound', 'first'))
|
if sys.platform.startswith('java'): verify(f4.func_code.co_varnames == ('two', '(compound, (argument, list))',)) verify(f5.func_code.co_varnames == ('(compound, first)', 'two', 'compound', 'first')) else: verify(f4.func_code.co_varnames == ('two', '.2', 'compound', 'argument', 'list')) verify(f5.func_code.co_varnames == ('.0', 'two', 'compound', 'first'))
|
def f5((compound, first), two): pass
|
verify(v3.func_code.co_varnames == ('a', '.2', 'rest', 'b', 'c'))
|
if sys.platform.startswith('java'): verify(v3.func_code.co_varnames == ('a', '(b, c)', 'rest', 'b', 'c')) else: verify(v3.func_code.co_varnames == ('a', '.2', 'rest', 'b', 'c'))
|
def v3(a, (b, c), *rest): return a, b, c, rest
|
def seek(self, pos, mode = 0):
|
def seek(self, pos, whence = 0):
|
def seek(self, pos, mode = 0): """Seek to specified position into the chunk. Default position is 0 (start of chunk). If the file is not seekable, this will result in an error.
|
if mode == 1:
|
if whence == 1:
|
def seek(self, pos, mode = 0): """Seek to specified position into the chunk. Default position is 0 (start of chunk). If the file is not seekable, this will result in an error.
|
elif mode == 2:
|
elif whence == 2:
|
def seek(self, pos, mode = 0): """Seek to specified position into the chunk. Default position is 0 (start of chunk). If the file is not seekable, this will result in an error.
|
def read(self, n = -1): """Read at most n bytes from the chunk. If n is omitted or negative, read until the end
|
def read(self, size = -1): """Read at most size bytes from the chunk. If size is omitted or negative, read until the end
|
def read(self, n = -1): """Read at most n bytes from the chunk. If n is omitted or negative, read until the end of the chunk.
|
if n < 0: n = self.chunksize - self.size_read if n > self.chunksize - self.size_read: n = self.chunksize - self.size_read data = self.file.read(n)
|
if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size)
|
def read(self, n = -1): """Read at most n bytes from the chunk. If n is omitted or negative, read until the end of the chunk.
|
'/usr/include/db3',
|
'/usr/local/include/db3',
|
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
|
'/usr/local/include/db3',
|
'/usr/include/db3',
|
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
|
'/usr/lib',
|
'/usr/local/lib',
|
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
|
'libdirs': ('/usr/lib', '/sw/lib', '/lib'), 'incdirs': ('/usr/include/db2', '/usr/local/include/db2', '/sw/include/db2'),
|
'libdirs': ('/usr/local/lib', '/sw/lib', '/usr/lib', '/lib'), 'incdirs': ('/usr/local/include/db2', '/sw/include/db2', '/usr/include/db2'),
|
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
|
try: port = int(host[i+1:]) except ValueError, msg: raise socket.error, str(msg)
|
port = int(host[i+1:])
|
def _set_hostport(self, host, port): if port is None: i = host.find(':') if i >= 0: try: port = int(host[i+1:]) except ValueError, msg: raise socket.error, str(msg) host = host[:i] else: port = self.default_port self.host = host self.port = port
|
add_dir_to_list(self.compiler.library_dirs, sysconfig.get_config_var("LIBDIR")) add_dir_to_list(self.compiler.include_dirs, sysconfig.get_config_var("INCLUDEDIR"))
|
if os.path.normpath(sys.prefix) != '/usr': add_dir_to_list(self.compiler.library_dirs, sysconfig.get_config_var("LIBDIR")) add_dir_to_list(self.compiler.include_dirs, sysconfig.get_config_var("INCLUDEDIR"))
|
def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
|
def check_split(self, wrapper, text, expect): result = wrapper._split(text)
|
def check_split(self, text, expect): result = self.wrapper._split(text)
|
def check_split(self, wrapper, text, expect): result = wrapper._split(text) self.assertEquals(result, expect, "\nexpected %r\n" "but got %r" % (expect, result))
|
self.check_split(self.wrapper, text, expect)
|
self.check_split(text, expect)
|
def test_em_dash(self): # Test text with em-dashes text = "Em-dashes should be written -- thus." self.check_wrap(text, 25, ["Em-dashes should be", "written -- thus."])
|
self.check_split(self.wrapper, text, expect)
|
self.check_split(text, expect) def test_funky_hyphens (self): self.check_split("what the--hey!", ["what", " ", "the", "--", "hey!"]) self.check_split("what the--", ["what", " ", "the--"]) self.check_split("what the--.", ["what", " ", "the--."]) self.check_split("--text--.", ["--text--."]) self.check_split("--option", ["--option"]) self.check_split("--option-opt", ["--option-", "opt"])
|
def test_unix_options (self): # Test that Unix-style command-line options are wrapped correctly. # Both Optik (OptionParser) and Docutils rely on this behaviour!
|
l.grid(row=self.row, col=0, sticky="w")
|
l.grid(row=self.row, column=0, sticky="w")
|
def make_entry(self, label, var): l = Label(self.top, text=label) l.grid(row=self.row, col=0, sticky="w") e = Entry(self.top, textvariable=var, exportselection=0) e.grid(row=self.row, col=1, sticky="we") self.row = self.row + 1 return e
|
e.grid(row=self.row, col=1, sticky="we")
|
e.grid(row=self.row, column=1, sticky="we")
|
def make_entry(self, label, var): l = Label(self.top, text=label) l.grid(row=self.row, col=0, sticky="w") e = Entry(self.top, textvariable=var, exportselection=0) e.grid(row=self.row, col=1, sticky="we") self.row = self.row + 1 return e
|
f.grid(row=self.row, col=0, columnspan=2, sticky="we")
|
f.grid(row=self.row, column=0, columnspan=2, sticky="we")
|
def make_frame(self): f = Frame(self.top) f.grid(row=self.row, col=0, columnspan=2, sticky="we") self.row = self.row + 1 return f
|
s = "\\pdfoutline goto name{page.%dx}" % pageno
|
s = "\\pdfoutline goto name{page%03d}" % pageno
|
def write_toc_entry(entry, fp, layer): stype, snum, title, pageno, toc = entry s = "\\pdfoutline goto name{page.%dx}" % pageno if toc: s = "%s count -%d" % (s, len(toc)) if snum: title = "%s %s" % (snum, title) s = "%s {%s}\n" % (s, title) fp.write(s) for entry in toc: write_toc_entry(entry, fp, layer + 1)
|
self.installaehandler('aevt', '****', self.other)
|
self.installaehandler('****', '****', self.other)
|
def __init__(self): MiniApplication.__init__(self) AEServer.__init__(self) self.installaehandler('aevt', 'oapp', self.open_app) self.installaehandler('aevt', 'quit', self.quit) self.installaehandler('aevt', '****', self.other) self.mainloop()
|
user_passwd, host = splituser(host)
|
if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host
|
def open_https(self, url, data=None): """Use HTTPS protocol.""" import httplib if type(url) is type(""): host, selector = splithost(url) user_passwd, host = splituser(host) else: host, selector = url urltype, rest = splittype(selector) if string.lower(urltype) == 'https': realhost, rest = splithost(rest) user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) #print "proxy via https:", host, selector if not host: raise IOError, ('https error', 'no host given') if user_passwd: import base64 auth = string.strip(base64.encodestring(user_passwd)) else: auth = None h = httplib.HTTPS(host, 0, key_file=self.key_file, cert_file=self.cert_file) if data is not None: h.putrequest('POST', selector) h.putheader('Content-type', 'application/x-www-form-urlencoded') h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', selector) if auth: h.putheader('Authorization: Basic %s' % auth) for args in self.addheaders: apply(h.putheader, args) h.endheaders() if data is not None: h.send(data + '\r\n') errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == 200: return addinfourl(fp, headers, url) else: return self.http_error(url, fp, errcode, errmsg, headers)
|
if string.lower(urltype) == 'https':
|
url = rest user_passwd = None if string.lower(urltype) != 'https': realhost = None else:
|
def open_https(self, url, data=None): """Use HTTPS protocol.""" import httplib if type(url) is type(""): host, selector = splithost(url) user_passwd, host = splituser(host) else: host, selector = url urltype, rest = splittype(selector) if string.lower(urltype) == 'https': realhost, rest = splithost(rest) user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) #print "proxy via https:", host, selector if not host: raise IOError, ('https error', 'no host given') if user_passwd: import base64 auth = string.strip(base64.encodestring(user_passwd)) else: auth = None h = httplib.HTTPS(host, 0, key_file=self.key_file, cert_file=self.cert_file) if data is not None: h.putrequest('POST', selector) h.putheader('Content-type', 'application/x-www-form-urlencoded') h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', selector) if auth: h.putheader('Authorization: Basic %s' % auth) for args in self.addheaders: apply(h.putheader, args) h.endheaders() if data is not None: h.send(data + '\r\n') errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == 200: return addinfourl(fp, headers, url) else: return self.http_error(url, fp, errcode, errmsg, headers)
|
user_passwd, realhost = splituser(realhost)
|
if realhost: user_passwd, realhost = splituser(realhost)
|
def open_https(self, url, data=None): """Use HTTPS protocol.""" import httplib if type(url) is type(""): host, selector = splithost(url) user_passwd, host = splituser(host) else: host, selector = url urltype, rest = splittype(selector) if string.lower(urltype) == 'https': realhost, rest = splithost(rest) user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) #print "proxy via https:", host, selector if not host: raise IOError, ('https error', 'no host given') if user_passwd: import base64 auth = string.strip(base64.encodestring(user_passwd)) else: auth = None h = httplib.HTTPS(host, 0, key_file=self.key_file, cert_file=self.cert_file) if data is not None: h.putrequest('POST', selector) h.putheader('Content-type', 'application/x-www-form-urlencoded') h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', selector) if auth: h.putheader('Authorization: Basic %s' % auth) for args in self.addheaders: apply(h.putheader, args) h.endheaders() if data is not None: h.send(data + '\r\n') errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == 200: return addinfourl(fp, headers, url) else: return self.http_error(url, fp, errcode, errmsg, headers)
|
return self.http_error(url, fp, errcode, errmsg, headers)
|
if data is None: return self.http_error(url, fp, errcode, errmsg, headers) else: return self.http_error(url, fp, errcode, errmsg, headers, data)
|
def open_https(self, url, data=None): """Use HTTPS protocol.""" import httplib if type(url) is type(""): host, selector = splithost(url) user_passwd, host = splituser(host) else: host, selector = url urltype, rest = splittype(selector) if string.lower(urltype) == 'https': realhost, rest = splithost(rest) user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) #print "proxy via https:", host, selector if not host: raise IOError, ('https error', 'no host given') if user_passwd: import base64 auth = string.strip(base64.encodestring(user_passwd)) else: auth = None h = httplib.HTTPS(host, 0, key_file=self.key_file, cert_file=self.cert_file) if data is not None: h.putrequest('POST', selector) h.putheader('Content-type', 'application/x-www-form-urlencoded') h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', selector) if auth: h.putheader('Authorization: Basic %s' % auth) for args in self.addheaders: apply(h.putheader, args) h.endheaders() if data is not None: h.send(data + '\r\n') errcode, errmsg, headers = h.getreply() fp = h.getfile() if errcode == 200: return addinfourl(fp, headers, url) else: return self.http_error(url, fp, errcode, errmsg, headers)
|
if match: return match.group(1, 2)
|
if match: return map(unquote, match.group(1, 2))
|
def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" global _userprog if _userprog is None: import re _userprog = re.compile('^([^@]*)@(.*)$') match = _userprog.match(host) if match: return match.group(1, 2) return None, host
|
u2 = random()
|
u2 = 1.0 - random()
|
def normalvariate(self, mu, sigma): """Normal distribution.
|
u2 = random()
|
if not 1e-7 < u1 < .9999999: continue u2 = 1.0 - random()
|
def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function!
|
u = self.random()
|
u = 1.0 - self.random()
|
def paretovariate(self, alpha): """Pareto distribution. alpha is the shape parameter.""" # Jain, pg. 495
|
u = self.random()
|
u = 1.0 - self.random()
|
def weibullvariate(self, alpha, beta): """Weibull distribution.
|
if self.runtime_library_dirs:
|
if runtime_library_dirs:
|
def link_shared_object (self, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, debug=0, extra_preargs=None, extra_postargs=None):
|
if type (output_dir) not in (StringType, NoneType): raise TypeError, "'output_dir' must be a string or None"
|
def link_shared_object (self, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, debug=0, extra_preargs=None, extra_postargs=None):
|
|
log.warn("'%s' does not exist -- can't clean it", self.build_temp)
|
log.debug("'%s' does not exist -- can't clean it", self.build_temp)
|
def run(self): # remove the build/temp.<plat> directory (unless it's already # gone) if os.path.exists(self.build_temp): remove_tree(self.build_temp, dry_run=self.dry_run) else: log.warn("'%s' does not exist -- can't clean it", self.build_temp)
|
self.create_system = 0
|
if sys.platform == 'win32': self.create_system = 0 else: self.create_system = 3
|
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): self.orig_filename = filename # Original file name in archive
|
assert sCursor.set('red')
|
tmp = sCursor.set('red') assert tmp
|
def test01_join(self): if verbose: print '\n', '-=' * 30 print "Running %s.test01_join..." % \ self.__class__.__name__
|
def hexrepr(t):
|
def hexrepr(t, precision=4):
|
def hexrepr(t): if t is None: return 'None' try: len(t) except: return '0x%04x' % t try: return '(' + ', '.join(map(lambda t: '0x%04x' % t, t)) + ')' except TypeError, why: print '* failed to convert %r: %s' % (t, why) raise
|
return '0x%04x' % t
|
return '0x%0*X' % (precision, t)
|
def hexrepr(t): if t is None: return 'None' try: len(t) except: return '0x%04x' % t try: return '(' + ', '.join(map(lambda t: '0x%04x' % t, t)) + ')' except TypeError, why: print '* failed to convert %r: %s' % (t, why) raise
|
return '(' + ', '.join(map(lambda t: '0x%04x' % t, t)) + ')'
|
return '(' + ', '.join(['0x%0*X' % (precision, item) for item in t]) + ')'
|
def hexrepr(t): if t is None: return 'None' try: len(t) except: return '0x%04x' % t try: return '(' + ', '.join(map(lambda t: '0x%04x' % t, t)) + ')' except TypeError, why: print '* failed to convert %r: %s' % (t, why) raise
|
def python_mapdef_code(varname, map, comments=1):
|
def python_mapdef_code(varname, map, comments=1, precisions=(2, 4)):
|
def python_mapdef_code(varname, map, comments=1): l = [] append = l.append if map.has_key("IDENTITY"): append("%s = codecs.make_identity_dict(range(%d))" % (varname, map["IDENTITY"])) append("%s.update({" % varname) splits = 1 del map["IDENTITY"] identity = 1 else: append("%s = {" % varname) splits = 0 identity = 0 mappings = map.items() mappings.sort() i = 0 for mapkey, mapvalue in mappings: mapcomment = '' if isinstance(mapkey, tuple): (mapkey, mapcomment) = mapkey if isinstance(mapvalue, tuple): (mapvalue, mapcomment) = mapvalue if mapkey is None: continue if (identity and mapkey == mapvalue and mapkey < 256): # No need to include identity mappings, since these # are already set for the first 256 code points. continue key = hexrepr(mapkey) value = hexrepr(mapvalue) if mapcomment and comments: append(' %s: %s,\t# %s' % (key, value, mapcomment)) else: append(' %s: %s,' % (key, value)) i += 1 if i == 4096: # Split the definition into parts to that the Python # parser doesn't dump core if splits == 0: append('}') else: append('})') append('%s.update({' % varname) i = 0 splits = splits + 1 if splits == 0: append('}') else: append('})') return l
|
key = hexrepr(mapkey) value = hexrepr(mapvalue)
|
key = hexrepr(mapkey, key_precision) value = hexrepr(mapvalue, value_precision)
|
def python_mapdef_code(varname, map, comments=1): l = [] append = l.append if map.has_key("IDENTITY"): append("%s = codecs.make_identity_dict(range(%d))" % (varname, map["IDENTITY"])) append("%s.update({" % varname) splits = 1 del map["IDENTITY"] identity = 1 else: append("%s = {" % varname) splits = 0 identity = 0 mappings = map.items() mappings.sort() i = 0 for mapkey, mapvalue in mappings: mapcomment = '' if isinstance(mapkey, tuple): (mapkey, mapcomment) = mapkey if isinstance(mapvalue, tuple): (mapvalue, mapcomment) = mapvalue if mapkey is None: continue if (identity and mapkey == mapvalue and mapkey < 256): # No need to include identity mappings, since these # are already set for the first 256 code points. continue key = hexrepr(mapkey) value = hexrepr(mapvalue) if mapcomment and comments: append(' %s: %s,\t# %s' % (key, value, mapcomment)) else: append(' %s: %s,' % (key, value)) i += 1 if i == 4096: # Split the definition into parts to that the Python # parser doesn't dump core if splits == 0: append('}') else: append('})') append('%s.update({' % varname) i = 0 splits = splits + 1 if splits == 0: append('}') else: append('})') return l
|
def python_tabledef_code(varname, map, comments=1):
|
def python_tabledef_code(varname, map, comments=1, key_precision=2):
|
def python_tabledef_code(varname, map, comments=1): l = [] append = l.append append('%s = (' % varname) # Analyze map and create table dict mappings = map.items() mappings.sort() table = {} maxkey = 0 if map.has_key('IDENTITY'): for key in range(256): table[key] = (key, '') maxkey = 255 del map['IDENTITY'] for mapkey, mapvalue in mappings: mapcomment = '' if isinstance(mapkey, tuple): (mapkey, mapcomment) = mapkey if isinstance(mapvalue, tuple): (mapvalue, mapcomment) = mapvalue if mapkey is None: continue table[mapkey] = (mapvalue, mapcomment) if mapkey > maxkey: maxkey = mapkey if maxkey > MAX_TABLE_SIZE: # Table too large return None # Create table code for key in range(maxkey + 1): if key not in table: mapvalue = None mapcomment = 'UNDEFINED' else: mapvalue, mapcomment = table[key] if mapvalue is None: mapchar = UNI_UNDEFINED else: if isinstance(mapvalue, tuple): # 1-n mappings not supported return None else: mapchar = unichr(mapvalue) if mapcomment and comments: append(' %r\t# %s -> %s' % (mapchar, hexrepr(key), mapcomment)) else: append(' %r' % mapchar) append(')') return l
|
hexrepr(key),
|
hexrepr(key, key_precision),
|
def python_tabledef_code(varname, map, comments=1): l = [] append = l.append append('%s = (' % varname) # Analyze map and create table dict mappings = map.items() mappings.sort() table = {} maxkey = 0 if map.has_key('IDENTITY'): for key in range(256): table[key] = (key, '') maxkey = 255 del map['IDENTITY'] for mapkey, mapvalue in mappings: mapcomment = '' if isinstance(mapkey, tuple): (mapkey, mapcomment) = mapkey if isinstance(mapvalue, tuple): (mapvalue, mapcomment) = mapvalue if mapkey is None: continue table[mapkey] = (mapvalue, mapcomment) if mapkey > maxkey: maxkey = mapkey if maxkey > MAX_TABLE_SIZE: # Table too large return None # Create table code for key in range(maxkey + 1): if key not in table: mapvalue = None mapcomment = 'UNDEFINED' else: mapvalue, mapcomment = table[key] if mapvalue is None: mapchar = UNI_UNDEFINED else: if isinstance(mapvalue, tuple): # 1-n mappings not supported return None else: mapchar = unichr(mapvalue) if mapcomment and comments: append(' %r\t# %s -> %s' % (mapchar, hexrepr(key), mapcomment)) else: append(' %r' % mapchar) append(')') return l
|
comments=comments)
|
comments=comments, precisions=(4, 2))
|
def codegen(name, map, comments=1): """ Returns Python source for the given map. Comments are included in the source, if comments is true (default). """ # Generate code decoding_map_code = python_mapdef_code( 'decoding_map', map, comments=comments) decoding_table_code = python_tabledef_code( 'decoding_table', map, comments=comments) encoding_map_code = python_mapdef_code( 'encoding_map', codecs.make_encoding_map(map), comments=comments) l = [ '''\
|
l.extend(decoding_map_code) if decoding_table_code:
|
l.extend(decoding_map_code) else:
|
def getregentry(): return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
|
path, lastcomp = os.path.split(path) if not path: break removed[0:0] = [lastcomp] path = patharg while 1:
|
def match(self, patharg): removed = [] # First check the include directory path = patharg while 1: if self.idict.has_key(path): # We know of this path (or initial piece of path) dstpath = self.idict[path] # We do want it distributed. Tack on the tail. while removed: dstpath = os.path.join(dstpath, removed[0]) removed = removed[1:] # Finally, if the resultant string ends in a separator # tack on our input filename if dstpath[-1] == os.sep: dir, file = os.path.split(path) dstpath = os.path.join(dstpath, file) if DEBUG: print 'include', patharg, dstpath return dstpath path, lastcomp = os.path.split(path) if not path: break removed[0:0] = [lastcomp] # Next check the exclude directory path = patharg while 1: if self.edict.has_key(path): if DEBUG: print 'exclude', patharg, path return '' path, lastcomp = os.path.split(path) if not path: break removed[0:0] = [lastcomp] if DEBUG: print 'nomatch', patharg return None
|
|
class Maildir: def __init__(self, dirname): import string self.dirname = dirname self.boxes = [] newdir = os.path.join(self.dirname, 'new') for file in os.listdir(newdir): if len(string.split(file, '.')) > 2: self.boxes.append(os.path.join(newdir, file)) curdir = os.path.join(self.dirname, 'cur') for file in os.listdir(curdir): if len(string.split(file, '.')) > 2: self.boxes.append(os.path.join(curdir, file)) def next(self): if not self.boxes: return None fn = self.boxes[0] del self.boxes[0] fp = open(os.path.join(self.dirname, fn)) return rfc822.Message(fp)
|
def next(self): if not self.boxes: return None fn = self.boxes[0] del self.boxes[0] fp = open(os.path.join(self.dirname, fn)) return rfc822.Message(fp)
|
|
for key in 'MAIL', 'LOGNAME', 'USER':
|
for key in 'MAILDIR', 'MAIL', 'LOGNAME', 'USER':
|
def _test(): import time import sys import string import os args = sys.argv[1:] if not args: for key in 'MAIL', 'LOGNAME', 'USER': if os.environ.has_key(key): mbox = os.environ[key] break else: print "$MAIL, $LOGNAME nor $USER set -- who are you?" return else: mbox = args[0] if mbox[:1] == '+': mbox = os.environ['HOME'] + '/Mail/' + mbox[1:] elif not '/' in mbox: mbox = '/usr/mail/' + mbox if os.path.isdir(mbox): mb = MHMailbox(mbox) else: fp = open(mbox, 'r') mb = UnixMailbox(fp) msgs = [] while 1: msg = mb.next() if msg is None: break msgs.append(msg) msg.fp = None if len(args) > 1: num = string.atoi(args[1]) print 'Message %d body:'%num msg = msgs[num-1] msg.rewindbody() sys.stdout.write(msg.fp.read()) else: print 'Mailbox',mbox,'has',len(msgs),'messages:' for msg in msgs: f = msg.getheader('from') or "" s = msg.getheader('subject') or "" d = msg.getheader('date') or "" print '%20.20s %18.18s %-30.30s'%(f, d[5:], s)
|
mb = MHMailbox(mbox)
|
if os.path.isdir(os.path.join(mbox, 'cur')): mb = Maildir(mbox) else: mb = MHMailbox(mbox)
|
def _test(): import time import sys import string import os args = sys.argv[1:] if not args: for key in 'MAIL', 'LOGNAME', 'USER': if os.environ.has_key(key): mbox = os.environ[key] break else: print "$MAIL, $LOGNAME nor $USER set -- who are you?" return else: mbox = args[0] if mbox[:1] == '+': mbox = os.environ['HOME'] + '/Mail/' + mbox[1:] elif not '/' in mbox: mbox = '/usr/mail/' + mbox if os.path.isdir(mbox): mb = MHMailbox(mbox) else: fp = open(mbox, 'r') mb = UnixMailbox(fp) msgs = [] while 1: msg = mb.next() if msg is None: break msgs.append(msg) msg.fp = None if len(args) > 1: num = string.atoi(args[1]) print 'Message %d body:'%num msg = msgs[num-1] msg.rewindbody() sys.stdout.write(msg.fp.read()) else: print 'Mailbox',mbox,'has',len(msgs),'messages:' for msg in msgs: f = msg.getheader('from') or "" s = msg.getheader('subject') or "" d = msg.getheader('date') or "" print '%20.20s %18.18s %-30.30s'%(f, d[5:], s)
|
new = re.pcre_expand(m, repl)
|
new = self._expand(m, repl)
|
def replace_all(self, event=None): prog = self.engine.getprog() if not prog: return repl = self.replvar.get() text = self.text res = self.engine.search_text(text, prog) if not res: text.bell() return text.tag_remove("sel", "1.0", "end") text.tag_remove("hit", "1.0", "end") line = res[0] col = res[1].start() if self.engine.iswrap(): line = 1 col = 0 ok = 1 first = last = None # XXX ought to replace circular instead of top-to-bottom when wrapping text.undo_block_start() while 1: res = self.engine.search_forward(text, prog, line, col, 0, ok) if not res: break line, m = res chars = text.get("%d.0" % line, "%d.0" % (line+1)) orig = m.group() new = re.pcre_expand(m, repl) i, j = m.span() first = "%d.%d" % (line, i) last = "%d.%d" % (line, j) if new == orig: text.mark_set("insert", last) else: text.mark_set("insert", first) if first != last: text.delete(first, last) if new: text.insert(first, new) col = i + len(new) ok = 0 text.undo_block_stop() if first and last: self.show_hit(first, last) self.close()
|
new = re.pcre_expand(m, self.replvar.get())
|
new = self._expand(m, self.replvar.get())
|
def do_replace(self): prog = self.engine.getprog() if not prog: return 0 text = self.text try: first = pos = text.index("sel.first") last = text.index("sel.last") except TclError: pos = None if not pos: first = last = pos = text.index("insert") line, col = SearchEngine.get_line_col(pos) chars = text.get("%d.0" % line, "%d.0" % (line+1)) m = prog.match(chars, col) if not prog: return 0 new = re.pcre_expand(m, self.replvar.get()) text.mark_set("insert", first) text.undo_block_start() if m.group(): text.delete(first, last) if new: text.insert(first, new) text.undo_block_stop() self.show_hit(first, text.index("insert")) self.ok = 0 return 1
|
self.enter() self.dispatch(t.body) self.leave()
|
self.write(")") self.enter() self.dispatch(t.body) self.leave() def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave
|
self.fill("def "+t.name + "(")
|
def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter)
|
def _While(self, t): self.fill("while ") self.dispatch(t.test)
|
def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave
|
"RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", "FloorDiv":"//"}
|
"LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", "FloorDiv":"//", "Pow": "**"}
|
def _UnaryOp(self, t): self.write(self.unop[t.op.__class__.__name__]) self.write("(") self.dispatch(t.operand) self.write(")")
|
self.dispatch(t.stararg)
|
self.dispatch(t.starargs)
|
def _Call(self, t): self.dispatch(t.func) self.write("(") comma = False for e in t.args: if comma: self.write(", ") else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(", ") else: comma = True self.dispatch(e) if t.starargs: if comma: self.write(", ") else: comma = True self.write("*") self.dispatch(t.stararg) if t.kwargs: if comma: self.write(", ") else: comma = True self.write("**") self.dispatch(t.stararg) self.write(")")
|
self.dispatch(t.stararg)
|
self.dispatch(t.kwargs)
|
def _Call(self, t): self.dispatch(t.func) self.write("(") comma = False for e in t.args: if comma: self.write(", ") else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(", ") else: comma = True self.dispatch(e) if t.starargs: if comma: self.write(", ") else: comma = True self.write("*") self.dispatch(t.stararg) if t.kwargs: if comma: self.write(", ") else: comma = True self.write("**") self.dispatch(t.stararg) self.write(")")
|
self.write("**"+self.kwarg) self.write(")") def roundtrip(filename):
|
self.write("**"+t.kwarg) def _keyword(self, t): self.write(t.arg) self.write("=") self.dispatch(t.value) def _Lambda(self, t): self.write("lambda ") self.dispatch(t.args) self.write(": ") self.dispatch(t.body) def roundtrip(filename, output=sys.stdout):
|
def _arguments(self, t): first = True nonDef = len(t.args)-len(t.defaults) for a in t.args[0:nonDef]: if first:first = False else: self.write(", ") self.dispatch(a) for a,d in zip(t.args[nonDef:], t.defaults): if first:first = False else: self.write(", ") self.dispatch(a), self.write("=") self.dispatch(d) if t.vararg: if first:first = False else: self.write(", ") self.write("*"+t.vararg) if t.kwarg: if first:first = False else: self.write(", ") self.write("**"+self.kwarg) self.write(")")
|
Unparser(tree)
|
Unparser(tree, output) def testdir(a): try: names = [n for n in os.listdir(a) if n.endswith('.py')] except OSError: print >> sys.stderr, "Directory not readable: %s" % a else: for n in names: fullname = os.path.join(a, n) if os.path.isfile(fullname): output = cStringIO.StringIO() print 'Testing %s' % fullname try: roundtrip(fullname, output) except Exception, e: print ' Failed to compile, exception is %s' % repr(e) elif os.path.isdir(fullname): testdir(fullname) def main(args): if args[0] == '--testdir': for a in args[1:]: testdir(a) else: for a in args: roundtrip(a)
|
def roundtrip(filename): source = open(filename).read() tree = compile(source, filename, "exec", 0x400) Unparser(tree)
|
roundtrip(sys.argv[1])
|
main(sys.argv[1:])
|
def roundtrip(filename): source = open(filename).read() tree = compile(source, filename, "exec", 0x400) Unparser(tree)
|
def __init__(self):
|
_locator = None document = None def __init__(self, documentFactory=None): self.documentFactory = documentFactory
|
def __init__(self): self.firstEvent = [None, None] self.lastEvent = self.firstEvent self._ns_contexts = [{}] # contains uri -> prefix dicts self._current_context = self._ns_contexts[-1]
|
def setDocumentLocator(self, locator): pass
|
def setDocumentLocator(self, locator): self._locator = locator
|
def setDocumentLocator(self, locator): pass
|
self._current_context[uri] = prefix
|
self._current_context[uri] = prefix or ''
|
def startPrefixMapping(self, prefix, uri): self._ns_contexts.append(self._current_context.copy()) self._current_context[uri] = prefix
|
del self._ns_contexts[-1]
|
self._current_context = self._ns_contexts.pop()
|
def endPrefixMapping(self, prefix): del self._ns_contexts[-1]
|
parent = self.curNode node.parentNode = parent
|
node.parentNode = self.curNode
|
def startElementNS(self, name, tagName , attrs): uri,localname = name if uri: # When using namespaces, the reader may or may not # provide us with the original name. If not, create # *a* valid tagName from the current context. if tagName is None: tagName = self._current_context[uri] + ":" + localname node = self.document.createElementNS(uri, tagName) else: # When the tagname is not prefixed, it just appears as # localname node = self.document.createElement(localname)
|
self.curNode = node.parentNode
|
self.curNode = self.curNode.parentNode
|
def endElementNS(self, name, tagName): node = self.curNode self.lastEvent[1] = [(END_ELEMENT, node), None] self.lastEvent = self.lastEvent[1] #self.events.append((END_ELEMENT, node)) self.curNode = node.parentNode
|
parent = self.curNode node.parentNode = parent
|
node.parentNode = self.curNode
|
def startElement(self, name, attrs): node = self.document.createElement(name)
|
node = self.document.createTextNode(chars[start:start + length])
|
node = self.document.createTextNode(chars)
|
def ignorableWhitespace(self, chars): node = self.document.createTextNode(chars[start:start + length]) parent = self.curNode node.parentNode = parent self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None] self.lastEvent = self.lastEvent[1] #self.events.append((IGNORABLE_WHITESPACE, node))
|
node = self.curNode = self.document = minidom.Document() node.parentNode = None
|
publicId = systemId = None if self._locator: publicId = self._locator.getPublicId() systemId = self._locator.getSystemId() if self.documentFactory is None: import xml.dom.minidom self.documentFactory = xml.dom.minidom.Document.implementation node = self.documentFactory.createDocument(None, publicId, systemId) self.curNode = self.document = node
|
def startDocument(self): node = self.curNode = self.document = minidom.Document() node.parentNode = None self.lastEvent[1] = [(START_DOCUMENT, node), None] self.lastEvent = self.lastEvent[1] #self.events.append((START_DOCUMENT, node))
|
assert not self.curNode.parentNode for node in self.curNode.childNodes: if node.nodeType == node.ELEMENT_NODE: self.document.documentElement = node
|
assert self.curNode.parentNode is None, \ "not all elements have been properly closed" assert self.curNode.documentElement is not None, \ "document does not contain a root element" node = self.curNode.documentElement
|
def endDocument(self): assert not self.curNode.parentNode for node in self.curNode.childNodes: if node.nodeType == node.ELEMENT_NODE: self.document.documentElement = node #if not self.document.documentElement: # raise Error, "No document element"
|
def parse(stream_or_string, parser=None, bufsize=default_bufsize): if type(stream_or_string) is type(""):
|
def parse(stream_or_string, parser=None, bufsize=None): if bufsize is None: bufsize = default_bufsize if type(stream_or_string) in [type(""), type(u"")]:
|
def parse(stream_or_string, parser=None, bufsize=default_bufsize): if type(stream_or_string) is type(""): stream = open(stream_or_string) else: stream = stream_or_string if not parser: parser = xml.sax.make_parser() return DOMEventStream(stream, parser, bufsize)
|
if headers.has_key('www-authenticate'): stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if match: scheme, realm = match.groups() if scheme.lower() == 'basic': name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
|
if not headers.has_key('www-authenticate'): URLopener.http_error_default(self, url, fp, errmsg, headers) stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if not match: URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) scheme, realm = match.groups() if scheme.lower() != 'basic': URLopener.http_error_default(self, url, fp, errcode, errmsg, headers) name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
|
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): """Error 401 -- authentication required. See this URL for a description of the basic authentication scheme: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt""" if headers.has_key('www-authenticate'): stuff = headers['www-authenticate'] import re match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) if match: scheme, realm = match.groups() if scheme.lower() == 'basic': name = 'retry_' + self.type + '_basic_auth' if data is None: return getattr(self,name)(url, realm) else: return getattr(self,name)(url, realm, data)
|
preprocessor=cc + " -E",
|
preprocessor=cpp,
|
def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. Mainly needed on Unix, so we can plug in the information that varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": (cc, opt, ccshared, ldshared, so_ext) = \ get_config_vars('CC', 'OPT', 'CCSHARED', 'LDSHARED', 'SO') cc_cmd = cc + ' ' + opt compiler.set_executables( preprocessor=cc + " -E", # not always! compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, linker_so=ldshared, linker_exe=cc) compiler.shared_lib_extension = so_ext
|
if self.tkconsole.closing: return
|
def poll_subprocess(self): clt = self.rpcclt if clt is None: return try: response = clt.pollresponse(self.active_seq, wait=0.05) except (EOFError, IOError, KeyboardInterrupt): # lost connection or subprocess terminated itself, restart # [the KBI is from rpc.SocketIO.handle_EOF()] if self.tkconsole.closing: return response = None self.restart_subprocess() self.tkconsole.endexecuting() if response: self.tkconsole.resetoutput() self.active_seq = None how, what = response console = self.tkconsole.console if how == "OK": if what is not None: print >>console, `what` elif how == "EXCEPTION": if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.remote_stack_viewer() elif how == "ERROR": errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n" print >>sys.__stderr__, errmsg, what print >>console, errmsg, what # we received a response to the currently active seq number: self.tkconsole.endexecuting() # Reschedule myself in 50 ms self.tkconsole.text.after(50, self.poll_subprocess)
|
|
self.tkconsole.text.after(50, self.poll_subprocess)
|
if not self.tkconsole.closing: self.tkconsole.text.after(self.tkconsole.pollinterval, self.poll_subprocess)
|
def poll_subprocess(self): clt = self.rpcclt if clt is None: return try: response = clt.pollresponse(self.active_seq, wait=0.05) except (EOFError, IOError, KeyboardInterrupt): # lost connection or subprocess terminated itself, restart # [the KBI is from rpc.SocketIO.handle_EOF()] if self.tkconsole.closing: return response = None self.restart_subprocess() self.tkconsole.endexecuting() if response: self.tkconsole.resetoutput() self.active_seq = None how, what = response console = self.tkconsole.console if how == "OK": if what is not None: print >>console, `what` elif how == "EXCEPTION": if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"): self.remote_stack_viewer() elif how == "ERROR": errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n" print >>sys.__stderr__, errmsg, what print >>console, errmsg, what # we received a response to the currently active seq number: self.tkconsole.endexecuting() # Reschedule myself in 50 ms self.tkconsole.text.after(50, self.poll_subprocess)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.