rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
assert 'a%b' == 'a%%b' % {}
|
d = {} assert 'a%b' == 'a%%b' % d
|
def test_format_percent(self): assert 'a%b' == 'a%%b' % {}
|
assert '' % {} == '' raises(TypeError, "'' % 5")
|
d = {} assert '' % d == '' n = 5 raises(TypeError, "'' % n")
|
def test_format_error(self): assert '' % {} == '' raises(TypeError, "'' % 5") class MyMapping(object): def __getitem__(self, key): py.test.fail('should not be here') assert '' % MyMapping() == '' class MyMapping2(object): def __getitem__(self, key): return key assert '%(key)s'%MyMapping2() == 'key' assert u'%(key)s'%MyMapping2() == u'key'
|
assert 'a23b' == 'a%sb' % 23 assert '23b' == '%sb' % 23 assert 'a23' == 'a%s' % 23 assert '23' == '%s' % 23
|
n = 23 assert 'a23b' == 'a%sb' % n assert '23b' == '%sb' % n assert 'a23' == 'a%s' % n assert '23' == '%s' % n
|
def test_format_item(self): assert 'a23b' == 'a%sb' % 23 assert '23b' == '%sb' % 23 assert 'a23' == 'a%s' % 23 assert '23' == '%s' % 23
|
assert 'a%b' == 'a%%b' % () assert '%b' == '%%b' % () assert 'a%' == 'a%%' % () assert '%' == '%%' % ()
|
t = () assert 'a%b' == 'a%%b' % t assert '%b' == '%%b' % t assert 'a%' == 'a%%' % t assert '%' == '%%' % t
|
def test_format_percent(self): assert 'a%b' == 'a%%b' % () assert '%b' == '%%b' % () assert 'a%' == 'a%%' % () assert '%' == '%%' % ()
|
assert '23' == '%s' % '23' assert "'23'" == '%r' % '23' raises(TypeError, '%d'.__mod__, "23")
|
s = '23' assert '23' == '%s' % s assert "'23'" == '%r' % s raises(TypeError, '%d'.__mod__, s)
|
def test_format_string(self): assert '23' == '%s' % '23' assert "'23'" == '%r' % '23' raises(TypeError, '%d'.__mod__, "23")
|
assert '23' == '%d' % 23.456 assert '17' == '%x' % 23.456 assert '23.456' == '%s' % 23.456
|
f = 23.456 assert '23' == '%d' % f assert '17' == '%x' % f assert '23.456' == '%s' % f
|
def test_format_float(self): assert '23' == '%d' % 23.456 assert '17' == '%x' % 23.456 assert '23.456' == '%s' % 23.456 # for 'r' use a float that has an exact decimal rep: assert '23.125' == '%r' % 23.125 assert '0.028' == '%.3f' % 0.0276 # should work on most platforms... assert ' inf' == '%6g' % (1E200 * 1E200)
|
assert '23.125' == '%r' % 23.125 assert '0.028' == '%.3f' % 0.0276 assert ' inf' == '%6g' % (1E200 * 1E200)
|
g = 23.125 assert '23.125' == '%r' % g h = 0.0276 assert '0.028' == '%.3f' % h big = 1E200 assert ' inf' == '%6g' % (big * big)
|
def test_format_float(self): assert '23' == '%d' % 23.456 assert '17' == '%x' % 23.456 assert '23.456' == '%s' % 23.456 # for 'r' use a float that has an exact decimal rep: assert '23.125' == '%r' % 23.125 assert '0.028' == '%.3f' % 0.0276 # should work on most platforms... assert ' inf' == '%6g' % (1E200 * 1E200)
|
assert '23' == '%d' % 23 assert '17' == '%x' % 23 assert '0x17' == '% assert '0x0' == '% assert '23' == '%s' % 23 assert '23' == '%r' % 23
|
n = 23 z = 0 assert '23' == '%d' % n assert '17' == '%x' % n assert '0x17' == '% assert '0x0' == '% assert '23' == '%s' % n assert '23' == '%r' % n
|
def test_format_int(self): import sys assert '23' == '%d' % 23 assert '17' == '%x' % 23 assert '0x17' == '%#x' % 23 assert '0x0' == '%#x' % 0 assert '23' == '%s' % 23 assert '23' == '%r' % 23 assert ('%d' % (-sys.maxint-1,) == '-' + str(sys.maxint+1) == '-%d' % (sys.maxint+1,)) assert '1C' == '%X' % 28 assert '0X1C' == '%#X' % 28 assert '10' == '%o' % 8 assert '010' == '%#o' % 8 assert '-010' == '%#o' % -8 assert '0' == '%o' % 0 assert '0' == '%#o' % 0
|
assert '1C' == '%X' % 28 assert '0X1C' == '% assert '10' == '%o' % 8 assert '010' == '% assert '-010' == '% assert '0' == '%o' % 0 assert '0' == '% assert '-0x017' == '% assert '0' == '%
|
n = 28 m = 8 assert '1C' == '%X' % n assert '0X1C' == '% assert '10' == '%o' % m assert '010' == '% assert '-010' == '% assert '0' == '%o' % z assert '0' == '% n = 23 f = 5 assert '-0x017' == '% assert '0' == '%
|
def test_format_int(self): import sys assert '23' == '%d' % 23 assert '17' == '%x' % 23 assert '0x17' == '%#x' % 23 assert '0x0' == '%#x' % 0 assert '23' == '%s' % 23 assert '23' == '%r' % 23 assert ('%d' % (-sys.maxint-1,) == '-' + str(sys.maxint+1) == '-%d' % (sys.maxint+1,)) assert '1C' == '%X' % 28 assert '0X1C' == '%#X' % 28 assert '10' == '%o' % 8 assert '010' == '%#o' % 8 assert '-010' == '%#o' % -8 assert '0' == '%o' % 0 assert '0' == '%#o' % 0
|
assert '<[1, 2]>' == '<%s>' % [1,2] assert '<[1, 2]-[3, 4]>' == '<%s-%s>' % ([1,2], [3,4])
|
l = [1,2] assert '<[1, 2]>' == '<%s>' % l assert '<[1, 2]-[3, 4]>' == '<%s-%s>' % (l, [3,4])
|
def test_format_list(self): assert '<[1, 2]>' == '<%s>' % [1,2] assert '<[1, 2]-[3, 4]>' == '<%s-%s>' % ([1,2], [3,4])
|
assert '<(1, 2)>' == '<%s>' % ((1,2),) assert '<(1, 2)-(3, 4)>' == '<%s-%s>' % ((1,2), (3,4))
|
t = (1,2) assert '<(1, 2)>' == '<%s>' % (t,) assert '<(1, 2)-(3, 4)>' == '<%s-%s>' % (t, (3,4))
|
def test_format_tuple(self): assert '<(1, 2)>' == '<%s>' % ((1,2),) assert '<(1, 2)-(3, 4)>' == '<%s-%s>' % ((1,2), (3,4))
|
assert '<{1: 2}>' == '<%s>' % {1:2} assert '<{1: 2}-{3: 4}>' == '<%s-%s>' % ({1:2}, {3:4})
|
d = {1:2} assert '<{1: 2}>' == '<%s>' % d assert '<{1: 2}-{3: 4}>' == '<%s-%s>' % (d, {3:4})
|
def test_format_dict(self):
|
assert '%c' % 65 == 'A' assert '%c' % 'e' == 'e'
|
A = 65 e = 'e' assert '%c' % A == 'A' assert '%c' % e == 'e'
|
def test_format_char(self): import sys assert '%c' % 65 == 'A' assert '%c' % 'e' == 'e' raises(OverflowError, '%c'.__mod__, (256,)) raises(OverflowError, '%c'.__mod__, (-1,)) raises(OverflowError, u'%c'.__mod__, (sys.maxunicode+1,)) raises(TypeError, '%c'.__mod__, ("bla",)) raises(TypeError, '%c'.__mod__, ("",)) raises(TypeError, '%c'.__mod__, (['c'],))
|
assert "%3s" %'a' == ' a' assert "%-3s"%'a' == 'a '
|
a = 'a' assert "%3s" % a == ' a' assert "%-3s"% a == 'a '
|
def test_width(self): assert "%3s" %'a' == ' a' assert "%-3s"%'a' == 'a '
|
assert "%.0x" % 0 == '' assert "%.x" % 0 == '' assert "%.0d" % 0 == '' assert "%.i" % 0 == '' assert "%.0o" % 0 == '' assert "%.o" % 0 == ''
|
z = 0 assert "%.0x" % z == '' assert "%.x" % z == '' assert "%.0d" % z == '' assert "%.i" % z == '' assert "%.0o" % z == '' assert "%.o" % z == ''
|
def test_prec_cornercase(self): assert "%.0x" % 0 == '' assert "%.x" % 0 == '' assert "%.0d" % 0 == '' assert "%.i" % 0 == '' assert "%.0o" % 0 == '' assert "%.o" % 0 == ''
|
assert "%.3s"%'a' == 'a' assert "%.3s"%'abcde' == 'abc'
|
a = 'a' abcde = 'abcde' assert "%.3s"% a == 'a' assert "%.3s"% abcde == 'abc'
|
def test_prec_string(self): assert "%.3s"%'a' == 'a' assert "%.3s"%'abcde' == 'abc'
|
assert "%5.3s" %'a' == ' a' assert "%5.3s" %'abcde' == ' abc' assert "%-5.3s"%'a' == 'a ' assert "%-5.3s"%'abcde' == 'abc '
|
a = 'a' abcde = 'abcde' assert "%5.3s" % a == ' a' assert "%5.3s" % abcde == ' abc' assert "%-5.3s"% a == 'a ' assert "%-5.3s"% abcde == 'abc '
|
def test_prec_width_string(self): assert "%5.3s" %'a' == ' a' assert "%5.3s" %'abcde' == ' abc' assert "%-5.3s"%'a' == 'a ' assert "%-5.3s"%'abcde' == 'abc '
|
assert "%02d"%1 == "01" assert "%05d"%1 == "00001" assert "%-05d"%1 == "1 " assert "%04f"%2.25 == "2.250000" assert "%05g"%2.25 == "02.25" assert "%-05g"%2.25 =="2.25 " assert "%05s"%2.25 == " 2.25"
|
one = 1 ttf = 2.25 assert "%02d" % one == "01" assert "%05d" % one == "00001" assert "%-05d" % one == "1 " assert "%04f" % ttf == "2.250000" assert "%05g" % ttf == "02.25" assert "%-05g" % ttf =="2.25 " assert "%05s" % ttf == " 2.25"
|
def test_zero_pad(self): assert "%02d"%1 == "01" assert "%05d"%1 == "00001" assert "%-05d"%1 == "1 " assert "%04f"%2.25 == "2.250000" assert "%05g"%2.25 == "02.25" assert "%-05g"%2.25 =="2.25 " assert "%05s"%2.25 == " 2.25"
|
assert "%*s" %( 5, 'abc') == ' abc' assert "%*s" %(-5, 'abc') == 'abc ' assert "%-*s"%( 5, 'abc') == 'abc ' assert "%-*s"%(-5, 'abc') == 'abc '
|
f = 5 assert "%*s" %( f, 'abc') == ' abc' assert "%*s" %(-f, 'abc') == 'abc ' assert "%-*s"%( f, 'abc') == 'abc ' assert "%-*s"%(-f, 'abc') == 'abc '
|
def test_star_width(self): assert "%*s" %( 5, 'abc') == ' abc' assert "%*s" %(-5, 'abc') == 'abc ' assert "%-*s"%( 5, 'abc') == 'abc ' assert "%-*s"%(-5, 'abc') == 'abc '
|
assert "%.*s"%( 3, 'abc') == 'abc' assert "%.*s"%( 3, 'abcde') == 'abc' assert "%.*s"%(-3, 'abc') == ''
|
t = 3 assert "%.*s"%( t, 'abc') == 'abc' assert "%.*s"%( t, 'abcde') == 'abc' assert "%.*s"%(-t, 'abc') == ''
|
def test_star_prec(self): assert "%.*s"%( 3, 'abc') == 'abc' assert "%.*s"%( 3, 'abcde') == 'abc' assert "%.*s"%(-3, 'abc') == ''
|
assert "%*.*s"%( 5, 3, 'abc') == ' abc' assert "%*.*s"%( 5, 3, 'abcde') == ' abc' assert "%*.*s"%(-5, 3, 'abcde') == 'abc '
|
f = 5 assert "%*.*s"%( f, 3, 'abc') == ' abc' assert "%*.*s"%( f, 3, 'abcde') == ' abc' assert "%*.*s"%(-f, 3, 'abcde') == 'abc '
|
def test_star_width_prec(self): assert "%*.*s"%( 5, 3, 'abc') == ' abc' assert "%*.*s"%( 5, 3, 'abcde') == ' abc' assert "%*.*s"%(-5, 3, 'abcde') == 'abc '
|
assert isinstance("%s" % (u"x"), unicode)
|
u = u"x" assert isinstance("%s" % u, unicode)
|
def test_unicode_convert(self): assert isinstance("%s" % (u"x"), unicode)
|
result = "%s" % u'\x80'
|
u = u'\x80' result = "%s" % u
|
def test_unicode_nonascii(self): """ Interpolating a unicode string with non-ascii characters in it into a string format should decode the format string as ascii and return unicode. """ result = "%s" % u'\x80' assert isinstance(result, unicode) assert result == u'\x80'
|
assert result == u'\x80'
|
assert result == u
|
def test_unicode_nonascii(self): """ Interpolating a unicode string with non-ascii characters in it into a string format should decode the format string as ascii and return unicode. """ result = "%s" % u'\x80' assert isinstance(result, unicode) assert result == u'\x80'
|
assert u"%.1d" % 3 == '3'
|
t = 3 assert u"%.1d" % t == '3'
|
def test_unicode_d(self): assert u"%.1d" % 3 == '3'
|
assert u'%x' % 10L == 'a'
|
ten = 10L assert u'%x' % ten == 'a'
|
def test_unicode_format_a(self): assert u'%x' % 10L == 'a'
|
assert "%x" % 100000000000L == "174876e800"
|
big = 100000000000L assert "%x" % big == "174876e800"
|
def test_long_no_overflow(self): assert "%x" % 100000000000L == "174876e800"
|
print '%032d' % -123456789012345678901234567890L assert '%032d' % -123456789012345678901234567890L == '-0123456789012345678901234567890'
|
big = -123456789012345678901234567890L print '%032d' % big assert '%032d' % big == '-0123456789012345678901234567890'
|
def test_missing_cases(self): print '%032d' % -123456789012345678901234567890L assert '%032d' % -123456789012345678901234567890L == '-0123456789012345678901234567890'
|
assert self.space.getattr(w_obj, self.space.wrap('x')) is None
|
assert self.space.getattr(w_obj, self.space.wrap('x')) is self.space.w_None
|
W_SomeType.typedef = typedef.TypeDef( 'some_type', x=prop)
|
def __init__(self, cpu=None, jitdrivers_sd=[]):
|
def __init__(self, cpu=None, jitdrivers_sd=[], debug=False):
|
def __init__(self, cpu=None, jitdrivers_sd=[]): self.cpu = cpu self.assembler = Assembler() self.callcontrol = CallControl(cpu, jitdrivers_sd) self._seen_files = set()
|
self.print_ssa_repr(ssarepr, portal_jd, verbose)
|
if self.debug: self.print_ssa_repr(ssarepr, portal_jd, verbose)
|
def transform_graph_to_jitcode(self, graph, jitcode, verbose): """Transform a graph into a JitCode containing the same bytecode in a different format. """ portal_jd = self.callcontrol.jitdriver_sd_from_portal_graph(graph) graph = copygraph(graph, shallowvars=True) # # step 1: mangle the graph so that it contains the final instructions # that we want in the JitCode, but still as a control flow graph transform_graph(graph, self.cpu, self.callcontrol, portal_jd) # # step 2: perform register allocation on it regallocs = {} for kind in KINDS: regallocs[kind] = perform_register_allocation(graph, kind) # # step 3: flatten the graph to produce human-readable "assembler", # which means mostly producing a linear list of operations and # inserting jumps or conditional jumps. This is a list of tuples # of the shape ("opname", arg1, ..., argN) or (Label(...),). ssarepr = flatten_graph(graph, regallocs) # # step 3b: compute the liveness around certain operations compute_liveness(ssarepr) # # step 4: "assemble" it into a JitCode, which contains a sequence # of bytes and lists of constants. It's during this step that # constants are cast to their normalized type (Signed, GCREF or # Float). self.assembler.assemble(ssarepr, jitcode) # # print the resulting assembler self.print_ssa_repr(ssarepr, portal_jd, verbose)
|
return space.wrap(compile_ast(space, ast, info))
|
try: result = compile_ast(space, ast, info) except error.IndentationError, e: raise OperationError(space.w_IndentationError, e.wrap_info(space)) except error.SyntaxError, e: raise OperationError(space.w_SyntaxError, e.wrap_info(space)) return space.wrap(result)
|
def descr_compile(self, space, filename="<syntax-tree>"): info = pyparse.CompileInfo(filename, self.mode) ast = ast_from_node(space, self.tree, info) return space.wrap(compile_ast(space, ast, info))
|
def compile_module(modname, **kwds):
|
def compile_module(space, modname, **kwds):
|
def compile_module(modname, **kwds): """ Build an extension module and return the filename of the resulting native code file. modname is the name of the module, possibly including dots if it is a module inside a package. Any extra keyword arguments are passed on to ExternalCompilationInfo to build the module (so specify your source with one of those). """ modname = modname.split('.')[-1] eci = ExternalCompilationInfo( export_symbols=['init%s' % (modname,)], include_dirs=api.include_dirs, **kwds ) eci = eci.convert_sources_to_files() dirname = (udir/uniquemodulename('module')).ensure(dir=1) soname = platform.platform.compile( [], eci, outputfilename=str(dirname/modname), standalone=False) if sys.platform == 'win32': pydname = soname.new(purebasename=modname, ext='.pyd') else: pydname = soname.new(purebasename=modname, ext='.so') soname.rename(pydname) return str(pydname)
|
if sys.platform == 'win32': pydname = soname.new(purebasename=modname, ext='.pyd') else: pydname = soname.new(purebasename=modname, ext='.so')
|
from pypy.module.imp.importing import get_so_extension pydname = soname.new(purebasename=modname, ext=get_so_extension(space))
|
def compile_module(modname, **kwds): """ Build an extension module and return the filename of the resulting native code file. modname is the name of the module, possibly including dots if it is a module inside a package. Any extra keyword arguments are passed on to ExternalCompilationInfo to build the module (so specify your source with one of those). """ modname = modname.split('.')[-1] eci = ExternalCompilationInfo( export_symbols=['init%s' % (modname,)], include_dirs=api.include_dirs, **kwds ) eci = eci.convert_sources_to_files() dirname = (udir/uniquemodulename('module')).ensure(dir=1) soname = platform.platform.compile( [], eci, outputfilename=str(dirname/modname), standalone=False) if sys.platform == 'win32': pydname = soname.new(purebasename=modname, ext='.pyd') else: pydname = soname.new(purebasename=modname, ext='.so') soname.rename(pydname) return str(pydname)
|
return compile_module(name, **kwds)
|
return compile_module(self.space, name, **kwds)
|
def compile_module(self, name, **kwds): """ Build an extension module linked against the cpyext api library. """ state = self.space.fromcache(State) api_library = state.api_lib if sys.platform == 'win32': kwds["libraries"] = [api_library] # '%s' undefined; assuming extern returning int kwds["compile_extra"] = ["/we4013"] else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform == 'linux2': kwds["compile_extra"]=["-Werror=implicit-function-declaration"] return compile_module(name, **kwds)
|
self.optimize_loop(ops, 'Not, Not, Not', ops)
|
self.optimize_loop(ops, 'Not, Not, Not', expected, preamble)
|
def test_p123_array(self): ops = """ [i1, p2, p3] i3 = getarrayitem_gc(p3, 0, descr=arraydescr) escape(i3) p1 = new_array(1, descr=arraydescr) setarrayitem_gc(p1, 0, i1, descr=arraydescr) jump(i1, p1, p2) """ # We cannot track virtuals that survive for more than two iterations. self.optimize_loop(ops, 'Not, Not, Not', ops)
|
expected = """ [i1, i2]
|
preamble = """ [i1, p2] i2 = getfield_gc(p2, descr=adescr)
|
def test_vstruct_1(self): ops = """ [i1, p2] i2 = getfield_gc(p2, descr=adescr) escape(i2) p3 = new(descr=ssize) setfield_gc(p3, i1, descr=adescr) jump(i1, p3) """ expected = """ [i1, i2] escape(i2) jump(i1, i1) """ self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected)
|
jump(i1, i1) """ self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected)
|
jump(i1) """ expected = """ [i1] escape(i1) jump(i1) """ self.optimize_loop(ops, 'Not, Not', expected, preamble)
|
def test_vstruct_1(self): ops = """ [i1, p2] i2 = getfield_gc(p2, descr=adescr) escape(i2) p3 = new(descr=ssize) setfield_gc(p3, i1, descr=adescr) jump(i1, p3) """ expected = """ [i1, i2] escape(i2) jump(i1, i1) """ self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected)
|
self.optimize_loop(ops, 'Not, Not, Not', ops)
|
self.optimize_loop(ops, 'Not, Not, Not', expected, preamble)
|
def test_p123_vstruct(self): ops = """ [i1, p2, p3] i3 = getfield_gc(p3, descr=adescr) escape(i3) p1 = new(descr=ssize) setfield_gc(p1, i1, descr=adescr) jump(i1, p1, p2) """ # We cannot track virtuals that survive for more than two iterations. self.optimize_loop(ops, 'Not, Not, Not', ops)
|
assert not self.borrowed_objects and not self.borrow_mapping
|
assert not self.borrowed_objects assert self.borrow_mapping == {None: {}}
|
def _freeze_(self): assert not self.borrowed_objects and not self.borrow_mapping self.py_objects_r2w.clear() # is not valid anymore after translation return False
|
for fn in [self.posix.unlink, self.posix.remove, self.posix.chdir, self.posix.mkdir, self.posix.rmdir, self.posix.listdir, self.posix.readlink, self.posix.chroot]: try: fn('qowieuqw/oeiu') except OSError, e: assert e.filename == 'qowieuqw/oeiu' else: assert 0
|
for fname in ['unlink', 'remove', 'chdir', 'mkdir', 'rmdir', 'listdir', 'readlink', 'chroot']: if hasattr(self.posix, fname): func = getattr(self.posix, fname) try: func('qowieuqw/oeiu') except OSError, e: assert e.filename == 'qowieuqw/oeiu' else: assert 0
|
def test_filename_exception(self): for fn in [self.posix.unlink, self.posix.remove, self.posix.chdir, self.posix.mkdir, self.posix.rmdir, self.posix.listdir, self.posix.readlink, self.posix.chroot]: try: fn('qowieuqw/oeiu') except OSError, e: assert e.filename == 'qowieuqw/oeiu' else: assert 0
|
try: self.posix.chown('qowieuqw/oeiu', 0, 0) except OSError, e: assert e.filename == 'qowieuqw/oeiu' else: assert 0
|
if hasattr(self.posix, 'chown'): try: self.posix.chown('qowieuqw/oeiu', 0, 0) except OSError, e: assert e.filename == 'qowieuqw/oeiu' else: assert 0
|
def test_chown_exception(self): try: self.posix.chown('qowieuqw/oeiu', 0, 0) except OSError, e: assert e.filename == 'qowieuqw/oeiu' else: assert 0
|
return cls.ll_search(s1, s2, start, end, FAST_COUNT)
|
res = cls.ll_search(s1, s2, start, end, FAST_COUNT) if res < 0: res = 0 return res
|
def ll_count(cls, s1, s2, start, end): if start < 0: start = 0 if end > len(s1.chars): end = len(s1.chars) if end - start < 0: return 0
|
x = r_longlong(maxint32+1) y = r_longlong(maxint32+2) zero = longlong2float(r_longlong(0))
|
if IS_32_BIT: x = r_longlong(maxint32+1) y = r_longlong(maxint32+2) zero = longlong2float(r_longlong(0)) else: x = maxint32+1 y = maxint32+2 zero = 0
|
def test_slonglong_args(self): """ long long sum_xy_longlong(long long x, long long y) { return x+y; } """ maxint32 = 2147483647 # we cannot really go above maxint on 64 bits # (and we would not test anything, as there long # is the same as long long) libfoo = self.get_libfoo() func = (libfoo, 'sum_xy_longlong', [types.slonglong, types.slonglong], types.slonglong) x = r_longlong(maxint32+1) y = r_longlong(maxint32+2) zero = longlong2float(r_longlong(0)) res = self.call(func, [x, y], rffi.LONGLONG, init_result=zero) if IS_32_BIT: # obscure, on 32bit it's really a long long, so it returns a # DOUBLE because of the JIT hack res = float2longlong(res) expected = maxint32*2 + 3 assert res == expected
|
sp_patch_location = self.mc.curraddr() for _ in range((self.mc.size_of_gen_load_int+WORD)//WORD): self.mc.MOV_rr(r.r0.value, r.r0.value)
|
sp_patch_location = self._prepare_sp_patch_location()
|
def assemble_loop(self, inputargs, operations, looptoken): longevity = compute_vars_longevity(inputargs, operations) regalloc = ARMRegisterManager(longevity, assembler=self, frame_manager=ARMFrameManager()) self.align() loop_start=self.mc.curraddr() self.gen_func_prolog() # Generate NOP as placeholder to patch the instruction(s) to update the # sp according to the number of spilled variables sp_patch_location = self.mc.curraddr() for _ in range((self.mc.size_of_gen_load_int+WORD)//WORD): self.mc.MOV_rr(r.r0.value, r.r0.value) # END self.gen_bootstrap_code(inputargs, regalloc, looptoken) loop_head=self.mc.curraddr() looptoken._arm_bootstrap_code = loop_start looptoken._arm_loop_code = loop_head fcond=c.AL print inputargs, operations for op in operations: # XXX consider merging ops with next one if it is an adecuate guard opnum = op.getopnum() fcond = self.operations[opnum](self, op, regalloc, fcond)
|
return w_result
|
def descr_new_zipimporter(space, w_type, name): w = space.wrap w_ZipImportError = space.getattr(space.getbuiltinmodule('zipimport'), w('ZipImportError')) ok = False parts_ends = [i for i in range(0, len(name)) if name[i] == os.path.sep or name[i] == ZIPSEP] parts_ends.append(len(name)) filename = "" # make annotator happy for i in parts_ends: filename = name[:i] if not filename: filename = os.path.sep try: s = os.stat(filename) except OSError: raise operationerrfmt(w_ZipImportError, "Cannot find name %s", filename) if not stat.S_ISDIR(s.st_mode): ok = True break if not ok: raise operationerrfmt(w_ZipImportError, "Did not find %s to be a valid zippath", name) try: w_result = zip_cache.get(filename) if w_result is None: raise operationerrfmt(w_ZipImportError, "Cannot import %s from zipfile, recursion detected or" "already tried and failed", name) return w_result except KeyError: zip_cache.cache[filename] = None try: zip_file = RZipFile(filename, 'r') except (BadZipfile, OSError): raise operationerrfmt(w_ZipImportError, "%s seems not to be a zipfile", filename) zip_file.close() prefix = name[len(filename):] if prefix.startswith(os.path.sep) or prefix.startswith(ZIPSEP): prefix = prefix[1:] w_result = space.wrap(W_ZipImporter(space, name, filename, zip_file.NameToInfo, prefix)) zip_cache.set(filename, w_result) return w_result
|
|
exename = static_platform.compile( [cfile], ExternalCompilationInfo(), outputfilename = "dosmaperr", standalone=True) output = os.popen(str(exename)) errors = dict(map(int, line.split()) for line in output)
|
try: exename = static_platform.compile( [cfile], ExternalCompilationInfo(), outputfilename = "dosmaperr", standalone=True) except WindowsError: errors = { 2: 2, 3: 2, 4: 24, 5: 13, 6: 9, 7: 12, 8: 12, 9: 12, 10: 7, 11: 8, 15: 2, 16: 13, 17: 18, 18: 2, 19: 13, 20: 13, 21: 13, 22: 13, 23: 13, 24: 13, 25: 13, 26: 13, 27: 13, 28: 13, 29: 13, 30: 13, 31: 13, 32: 13, 33: 13, 34: 13, 35: 13, 36: 13, 53: 2, 65: 13, 67: 2, 80: 17, 82: 13, 83: 13, 89: 11, 108: 13, 109: 32, 112: 28, 114: 9, 128: 10, 129: 10, 130: 9, 132: 13, 145: 41, 158: 13, 161: 2, 164: 11, 167: 13, 183: 17, 188: 8, 189: 8, 190: 8, 191: 8, 192: 8, 193: 8, 194: 8, 195: 8, 196: 8, 197: 8, 198: 8, 199: 8, 200: 8, 201: 8, 202: 8, 206: 2, 215: 11, 1816: 12, } else: output = os.popen(str(exename)) errors = dict(map(int, line.split()) for line in output)
|
def build_winerror_to_errno(): """Build a dictionary mapping windows error numbers to POSIX errno. The function returns the dict, and the default value for codes not in the dict.""" # Prior to Visual Studio 8, the MSVCRT dll doesn't export the # _dosmaperr() function, which is available only when compiled # against the static CRT library. from pypy.translator.platform import platform, Windows static_platform = Windows() if static_platform.name == 'msvc': static_platform.cflags = ['/MT'] # static CRT static_platform.version = 0 # no manifest cfile = udir.join('dosmaperr.c') cfile.write(r''' #include <errno.h> int main() { int i; for(i=1; i < 65000; i++) { _dosmaperr(i); if (errno == EINVAL) continue; printf("%d\t%d\n", i, errno); } return 0; }''') exename = static_platform.compile( [cfile], ExternalCompilationInfo(), outputfilename = "dosmaperr", standalone=True) output = os.popen(str(exename)) errors = dict(map(int, line.split()) for line in output) return errors, errno.EINVAL
|
pass
|
continue
|
def finite(x): return not isinf(x) and not isnan(x)
|
guard_class(p2, ConstClass(node_vtable)) [] guard_class(p3, ConstClass(node_vtable)) []
|
def test_bug_3bis(self): ops = """ [p1] guard_nonnull(p1) [] guard_class(p1, ConstClass(node_vtable2)) [] p2 = getfield_gc(p1, descr=nextdescr) guard_nonnull(12) [] guard_class(p2, ConstClass(node_vtable)) [] p3 = getfield_gc(p1, descr=otherdescr) guard_nonnull(12) [] guard_class(p3, ConstClass(node_vtable)) [] p1a = new_with_vtable(ConstClass(node_vtable2)) p2a = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p3, p2a, descr=otherdescr) p3a = new_with_vtable(ConstClass(node_vtable)) escape(p3a) setfield_gc(p1a, p2a, descr=nextdescr) setfield_gc(p1a, p3a, descr=otherdescr) jump(p1a) """ expected = """ [p2, p3] guard_class(p2, ConstClass(node_vtable)) [] guard_class(p3, ConstClass(node_vtable)) [] p2a = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p3, p2a, descr=otherdescr) p3a = new_with_vtable(ConstClass(node_vtable)) escape(p3a) jump(p2a, p3a) """ self.optimize_loop(ops, expected) # XXX was Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)
|
|
self.optimize_loop(ops, expected)
|
self.optimize_loop(ops, expected, preamble)
|
def test_bug_3bis(self): ops = """ [p1] guard_nonnull(p1) [] guard_class(p1, ConstClass(node_vtable2)) [] p2 = getfield_gc(p1, descr=nextdescr) guard_nonnull(12) [] guard_class(p2, ConstClass(node_vtable)) [] p3 = getfield_gc(p1, descr=otherdescr) guard_nonnull(12) [] guard_class(p3, ConstClass(node_vtable)) [] p1a = new_with_vtable(ConstClass(node_vtable2)) p2a = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p3, p2a, descr=otherdescr) p3a = new_with_vtable(ConstClass(node_vtable)) escape(p3a) setfield_gc(p1a, p2a, descr=nextdescr) setfield_gc(p1a, p3a, descr=otherdescr) jump(p1a) """ expected = """ [p2, p3] guard_class(p2, ConstClass(node_vtable)) [] guard_class(p3, ConstClass(node_vtable)) [] p2a = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p3, p2a, descr=otherdescr) p3a = new_with_vtable(ConstClass(node_vtable)) escape(p3a) jump(p2a, p3a) """ self.optimize_loop(ops, expected) # XXX was Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)
|
except InvalidLoop as e:
|
except InvalidLoop:
|
def compile_new_loop(metainterp, old_loop_tokens, greenkey, start, full_preamble_needed=True): """Try to compile a new loop by closing the current history back to the first operation. """ history = metainterp.history loop = create_empty_loop(metainterp) loop.inputargs = history.inputargs for box in loop.inputargs: assert isinstance(box, Box) # make a copy, because optimize_loop can mutate the ops and descrs h_ops = history.operations loop.operations = [h_ops[i].clone() for i in range(start, len(h_ops))] metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd) loop.token = loop_token loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP loop.preamble = create_empty_loop(metainterp, 'Preamble ') loop.preamble.inputargs = loop.inputargs loop.preamble.token = make_loop_token(len(loop.inputargs), jitdriver_sd) try: old_loop_token = jitdriver_sd.warmstate.optimize_loop( metainterp_sd, old_loop_tokens, loop) except InvalidLoop as e: #import traceback; import pdb; pdb.set_trace() return None if old_loop_token is not None: metainterp.staticdata.log("reusing old loop") return old_loop_token if loop.preamble.operations is not None: send_loop_to_backend(metainterp_sd, loop, "loop") record_loop_or_bridge(metainterp_sd, loop) token = loop.preamble.token if full_preamble_needed or not loop.preamble.token.short_preamble: send_loop_to_backend(metainterp_sd, loop.preamble, "entry bridge") insert_loop_token(old_loop_tokens, loop.preamble.token) jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( greenkey, loop.preamble.token) record_loop_or_bridge(metainterp_sd, loop.preamble) return token else: send_loop_to_backend(metainterp_sd, loop, "loop") insert_loop_token(old_loop_tokens, loop_token) jitdriver_sd.warmstate.attach_unoptimized_bridge_from_interp( greenkey, loop.token) record_loop_or_bridge(metainterp_sd, loop) return loop_token
|
self.optimize_loop(ops, 'Not, Not, Not, Not', ops)
|
preamble = """ [p1, i1, i2, i3] setfield_gc(p1, i1, descr=valuedescr) guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) jump(p1, i1, i2, i4) """ expected = """ [p1, i1, i2, i4] setfield_gc(p1, i1, descr=valuedescr) guard_true(i4) [] setfield_gc(p1, i2, descr=valuedescr) jump(p1, i1, i2, 1) """ self.optimize_loop(ops, 'Not, Not, Not, Not', expected, preamble)
|
def test_duplicate_setfield_residual_guard_1(self): ops = """ [p1, i1, i2, i3] setfield_gc(p1, i1, descr=valuedescr) guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) jump(p1, i1, i2, i4) """ self.optimize_loop(ops, 'Not, Not, Not, Not', ops)
|
expected = """
|
preamble = """
|
def test_duplicate_setfield_residual_guard_2(self): # the difference with the previous test is that the field value is # a virtual, which we try hard to keep virtual ops = """ [p1, i2, i3] p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1, p2, descr=nextdescr) guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ expected = """ [p1, i2, i3] guard_true(i3) [p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ self.optimize_loop(ops, 'Not, Not, Not', expected)
|
self.optimize_loop(ops, 'Not, Not, Not', expected)
|
expected = """ [p1, i2, i4] guard_true(i4) [p1] setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, 1) """ self.optimize_loop(ops, 'Not, Not, Not', expected, preamble)
|
def test_duplicate_setfield_residual_guard_2(self): # the difference with the previous test is that the field value is # a virtual, which we try hard to keep virtual ops = """ [p1, i2, i3] p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1, p2, descr=nextdescr) guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ expected = """ [p1, i2, i3] guard_true(i3) [p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ self.optimize_loop(ops, 'Not, Not, Not', expected)
|
expected = """
|
preamble = """
|
def test_duplicate_setfield_residual_guard_3(self): ops = """ [p1, i2, i3] p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i2, descr=valuedescr) setfield_gc(p1, p2, descr=nextdescr) guard_true(i3) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ expected = """ [p1, i2, i3] guard_true(i3) [i2, p1] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ self.optimize_loop(ops, 'Not, Not, Not', expected)
|
self.optimize_loop(ops, 'Not, Not, Not, Not', ops)
|
preamble = ops expected = """ [p1, i1, i2, i4] setfield_gc(p1, i1, descr=valuedescr) i5 = int_eq(i4, 5) guard_true(i5) [] setfield_gc(p1, i2, descr=valuedescr) jump(p1, i1, i2, 5) """ self.optimize_loop(ops, 'Not, Not, Not, Not', expected, preamble)
|
def test_duplicate_setfield_residual_guard_4(self): # test that the setfield_gc does not end up between int_eq and # the following guard_true ops = """ [p1, i1, i2, i3] setfield_gc(p1, i1, descr=valuedescr) i5 = int_eq(i3, 5) guard_true(i5) [] i4 = int_neg(i2) setfield_gc(p1, i2, descr=valuedescr) jump(p1, i1, i2, i4) """ self.optimize_loop(ops, 'Not, Not, Not, Not', ops)
|
PyObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, Py_TPFLAGS_READY
|
PyVarObjectFields, Py_ssize_t, Py_TPFLAGS_READYING, Py_TPFLAGS_READY
|
from pypy.interpreter.typedef import TypeDef
|
if index.step is not None: raise TypeError("3 arg slices not supported (for no reason)") check_bounds = hasattr(self, '_length_') if index.start is not None:
|
if hasattr(self, '_length_'): start, stop, step = index.indices(self._length_) else: step = index.step if step is None: step = 1
|
def array_get_slice_params(self, index): if index.step is not None: raise TypeError("3 arg slices not supported (for no reason)") check_bounds = hasattr(self, '_length_') if index.start is not None: start = index.start if check_bounds and start < 0: start = 0 else: start = 0 if index.stop is not None: stop = index.stop if check_bounds and stop > self._length_: stop = self._length_ else: stop = self._length_ return start, stop
|
if check_bounds and start < 0: start = 0
|
stop = index.stop if start is None: if step > 0: start = 0 else: raise ValueError("slice start is required for step < 0") if stop is None: raise ValueError("slice stop is required") return start, stop, step def array_slice_setitem(self, index, value): start, stop, step = self._get_slice_params(index) if ((step < 0 and stop >= start) or (step > 0 and start >= stop)): slicelength = 0 elif step < 0: slicelength = (stop - start + 1) / step + 1
|
def array_get_slice_params(self, index): if index.step is not None: raise TypeError("3 arg slices not supported (for no reason)") check_bounds = hasattr(self, '_length_') if index.start is not None: start = index.start if check_bounds and start < 0: start = 0 else: start = 0 if index.stop is not None: stop = index.stop if check_bounds and stop > self._length_: stop = self._length_ else: stop = self._length_ return start, stop
|
start = 0 if index.stop is not None: stop = index.stop if check_bounds and stop > self._length_: stop = self._length_ else: stop = self._length_ return start, stop def array_slice_setitem(self, index, value): start, stop = self._get_slice_params(index) if stop - start != len(value):
|
slicelength = (stop - start - 1) / step + 1; if slicelength != len(value):
|
def array_get_slice_params(self, index): if index.step is not None: raise TypeError("3 arg slices not supported (for no reason)") check_bounds = hasattr(self, '_length_') if index.start is not None: start = index.start if check_bounds and start < 0: start = 0 else: start = 0 if index.stop is not None: stop = index.stop if check_bounds and stop > self._length_: stop = self._length_ else: stop = self._length_ return start, stop
|
for i in range(start, stop): self[i] = value[i - start]
|
for i, j in enumerate(range(start, stop, step)): self[j] = value[j]
|
def array_slice_setitem(self, index, value): start, stop = self._get_slice_params(index) if stop - start != len(value): raise ValueError("Can only assign slices of the same length") for i in range(start, stop): self[i] = value[i - start]
|
start, stop = self._get_slice_params(index) l = [self[i] for i in range(start, stop)]
|
start, stop, step = self._get_slice_params(index) l = [self[i] for i in range(start, stop, step)]
|
def array_slice_getitem(self, index): start, stop = self._get_slice_params(index) l = [self[i] for i in range(start, stop)] letter = getattr(self._type_, '_type_', None) if letter == 'c': return "".join(l) if letter == 'u': return u"".join(l) return l
|
return ctypes.cast(self._storage, ctypes.c_void_p).value
|
return intmask(ctypes.cast(self._storage, ctypes.c_void_p).value)
|
def _addressof_storage(self): "Returns the storage address as an int" if self._storage is None or self._storage is True: raise ValueError("Not a ctypes allocated structure") return ctypes.cast(self._storage, ctypes.c_void_p).value
|
assert err == 0 or err == 10056
|
assert (err == 0 or err == 10056 or err == getattr(errno, 'EISCONN', '???'))
|
def test_simple(): serv = RSocket(AF_INET, SOCK_STREAM) serv.bind(INETAddress('127.0.0.1', INADDR_ANY)) serv.listen(1) servaddr = serv.getsockname() events = poll({serv.fileno(): POLLIN}, timeout=100) assert len(events) == 0 cli = RSocket(AF_INET, SOCK_STREAM) cli.setblocking(False) err = cli.connect_ex(servaddr) assert err != 0 events = poll({serv.fileno(): POLLIN}, timeout=500) assert len(events) == 1 assert events[0][0] == serv.fileno() assert events[0][1] & POLLIN servconn, cliaddr = serv.accept() events = poll({serv.fileno(): POLLIN, cli.fileno(): POLLOUT}, timeout=500) assert len(events) == 1 assert events[0][0] == cli.fileno() assert events[0][1] & POLLOUT err = cli.connect_ex(servaddr) # win32 oddity: returns WSAEISCONN when the connection finally succeed. assert err == 0 or err == 10056 events = poll({servconn.fileno(): POLLIN, cli.fileno(): POLLIN}, timeout=100) assert len(events) == 0 events = poll({servconn.fileno(): POLLOUT, cli.fileno(): POLLOUT}, timeout=100) assert len(events) >= 1 cli.close() servconn.close() serv.close()
|
items = [] if not f: return space.newtuple([])
|
def w_descr__framestack(space, self): assert isinstance(self, AppCoroutine) f = self.subctx.topframe items = [] if not f: return space.newtuple([]) while f is not None: items.append(space.wrap(f)) f = f.f_backref() items.reverse() return space.newtuple(items)
|
|
items.append(space.wrap(f))
|
counter += 1
|
def w_descr__framestack(space, self): assert isinstance(self, AppCoroutine) f = self.subctx.topframe items = [] if not f: return space.newtuple([]) while f is not None: items.append(space.wrap(f)) f = f.f_backref() items.reverse() return space.newtuple(items)
|
items.reverse()
|
items = [None] * counter f = self.subctx.topframe while f is not None: counter -= 1 assert counter >= 0 items[counter] = space.wrap(f) f = f.f_backref() assert counter == 0
|
def w_descr__framestack(space, self): assert isinstance(self, AppCoroutine) f = self.subctx.topframe items = [] if not f: return space.newtuple([]) while f is not None: items.append(space.wrap(f)) f = f.f_backref() items.reverse() return space.newtuple(items)
|
return None
|
return (None, 0)
|
def callinfo_for_oopspec(oopspecindex): """A function that returns the calldescr and the function address (as an int) of one of the OS_XYZ functions defined above. Don't use this if there might be several implementations of the same OS_XYZ specialized by type, e.g. OS_ARRAYCOPY.""" try: return _callinfo_for_oopspec[oopspecindex] except KeyError: return None
|
_, func_as_int = _callinfo_for_oopspec.get(oopspecindex, (None, 0))
|
_, func_as_int = callinfo_for_oopspec(oopspecindex)
|
def _funcptr_for_oopspec_memo(oopspecindex): from pypy.jit.codewriter import heaptracker _, func_as_int = _callinfo_for_oopspec.get(oopspecindex, (None, 0)) funcadr = heaptracker.int2adr(func_as_int) return funcadr.ptr
|
return r_class.BITS < LONG_BIT
|
return r_class.BITS < LONG_BIT or ( r_class.BITS == LONG_BIT and r_class.SIGNED)
|
def _should_widen_type(tp): from pypy.rpython.lltypesystem import lltype, rffi if tp is lltype.Bool: return True if tp is lltype.Signed: return False r_class = rffi.platform.numbertype_to_rclass[tp] assert issubclass(r_class, base_int) return r_class.BITS < LONG_BIT
|
isinf.unwrap_spec = [ObjSpace, float, float]
|
isinf.unwrap_spec = [ObjSpace, float]
|
def isinf(space, x): """Return True if x is infinity.""" return space.wrap(rarithmetic.isinf(x))
|
isnan.unwrap_spec = [ObjSpace, float, float]
|
isnan.unwrap_spec = [ObjSpace, float]
|
def isnan(space, x): """Return True if x is not a number.""" return space.wrap(rarithmetic.isnan(x))
|
assert descr2.repr_of_descr() == '<GcPtrFieldDescr %d>' % o
|
assert descr2.repr_of_descr() == '<GcPtrFieldDescr S.y %d>' % o
|
def test_repr_of_descr(): c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), ('y', lltype.Ptr(T)), ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) assert descr1.repr_of_descr() == '<SizeDescr %d>' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) assert descr2.repr_of_descr() == '<GcPtrFieldDescr %d>' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) assert descr2i.repr_of_descr() == '<CharFieldDescr %d>' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) assert descr3.repr_of_descr() == '<GcPtrArrayDescr>' # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) assert descr3i.repr_of_descr() == '<CharArrayDescr>' # cache = {} descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) assert 'GcPtrCallDescr' in descr4.repr_of_descr() # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) assert 'CharCallDescr' in descr4i.repr_of_descr() # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) assert 'FloatCallDescr' in descr4f.repr_of_descr()
|
assert descr2i.repr_of_descr() == '<CharFieldDescr %d>' % o
|
assert descr2i.repr_of_descr() == '<CharFieldDescr S.x %d>' % o
|
def test_repr_of_descr(): c0 = GcCache(False) T = lltype.GcStruct('T') S = lltype.GcStruct('S', ('x', lltype.Char), ('y', lltype.Ptr(T)), ('z', lltype.Ptr(T))) descr1 = get_size_descr(c0, S) s = symbolic.get_size(S, False) assert descr1.repr_of_descr() == '<SizeDescr %d>' % s # descr2 = get_field_descr(c0, S, 'y') o, _ = symbolic.get_field_token(S, 'y', False) assert descr2.repr_of_descr() == '<GcPtrFieldDescr %d>' % o # descr2i = get_field_descr(c0, S, 'x') o, _ = symbolic.get_field_token(S, 'x', False) assert descr2i.repr_of_descr() == '<CharFieldDescr %d>' % o # descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S))) assert descr3.repr_of_descr() == '<GcPtrArrayDescr>' # descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char)) assert descr3i.repr_of_descr() == '<CharArrayDescr>' # cache = {} descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S)) assert 'GcPtrCallDescr' in descr4.repr_of_descr() # descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char) assert 'CharCallDescr' in descr4i.repr_of_descr() # descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float) assert 'FloatCallDescr' in descr4f.repr_of_descr()
|
"""Return the largest item in a sequence. If more than one argument is passed, return the maximum of them.
|
"""max(iterable[, key=func]) -> value max(a, b, c, ...[, key=func]) -> value With a single iterable argument, return its largest item. With two or more arguments, return the largest argument.
|
def max(space, __args__): """Return the largest item in a sequence. If more than one argument is passed, return the maximum of them. """ return min_max(space, __args__, "max")
|
x=typedef.GetSetProperty(fget, use_closure=True))
|
x=prop)
|
W_SomeType.typedef = typedef.TypeDef( 'some_type', x=typedef.GetSetProperty(fget, use_closure=True))
|
if not self.annotate_hooks(**kwds_s): return None
|
self.annotate_hooks(**kwds_s)
|
def compute_result_annotation(self, **kwds_s): from pypy.annotation import model as annmodel
|
return (h(driver.get_jitcell_at, driver.greens, **kwds_s) and h(driver.set_jitcell_at, driver.greens, [s_jitcell], **kwds_s) and h(driver.get_printable_location, driver.greens, **kwds_s))
|
h(driver.get_jitcell_at, driver.greens, **kwds_s) h(driver.set_jitcell_at, driver.greens, [s_jitcell], **kwds_s) h(driver.get_printable_location, driver.greens, **kwds_s)
|
def annotate_hooks(self, **kwds_s): driver = self.instance.im_self s_jitcell = self.bookkeeper.valueoftype(BaseJitCell) h = self.annotate_hook return (h(driver.get_jitcell_at, driver.greens, **kwds_s) and h(driver.set_jitcell_at, driver.greens, [s_jitcell], **kwds_s) and h(driver.get_printable_location, driver.greens, **kwds_s))
|
return True
|
return
|
def annotate_hook(self, func, variables, args_s=[], **kwds_s): if func is None: return True bk = self.bookkeeper s_func = bk.immutablevalue(func) uniquekey = 'jitdriver.%s' % func.func_name args_s = args_s[:] for name in variables: if '.' not in name: s_arg = kwds_s['s_' + name] else: objname, fieldname = name.split('.') s_instance = kwds_s['s_' + objname] s_arg = s_instance.classdef.about_attribute(fieldname) if s_arg is None: return False # wrong order, try again later args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) return True
|
s_arg = s_instance.classdef.about_attribute(fieldname) if s_arg is None: return False
|
attrdef = s_instance.classdef.find_attribute(fieldname) position = self.bookkeeper.position_key attrdef.read_locations[position] = True s_arg = attrdef.getvalue() assert s_arg is not None
|
def annotate_hook(self, func, variables, args_s=[], **kwds_s): if func is None: return True bk = self.bookkeeper s_func = bk.immutablevalue(func) uniquekey = 'jitdriver.%s' % func.func_name args_s = args_s[:] for name in variables: if '.' not in name: s_arg = kwds_s['s_' + name] else: objname, fieldname = name.split('.') s_instance = kwds_s['s_' + objname] s_arg = s_instance.classdef.about_attribute(fieldname) if s_arg is None: return False # wrong order, try again later args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) return True
|
return True
|
def annotate_hook(self, func, variables, args_s=[], **kwds_s): if func is None: return True bk = self.bookkeeper s_func = bk.immutablevalue(func) uniquekey = 'jitdriver.%s' % func.func_name args_s = args_s[:] for name in variables: if '.' not in name: s_arg = kwds_s['s_' + name] else: objname, fieldname = name.split('.') s_instance = kwds_s['s_' + objname] s_arg = s_instance.classdef.about_attribute(fieldname) if s_arg is None: return False # wrong order, try again later args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) return True
|
|
assert liveboxes == [b2s, b4s]
|
assert liveboxes == [b2s, b4s] or liveboxes == [b4s, b2s]
|
def test_virtual_adder_pending_fields(): b2s, b4s = [BoxPtr(), BoxPtr()] storage = Storage() memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) modifier.liveboxes_from_env = {} modifier.liveboxes = {} modifier.vfieldboxes = {} v2 = OptValue(b2s) v4 = OptValue(b4s) modifier.register_box(b2s) modifier.register_box(b4s) values = {b4s: v4, b2s: v2} liveboxes = [] modifier._number_virtuals(liveboxes, values, 0) assert liveboxes == [b2s, b4s] modifier._add_pending_fields([(LLtypeMixin.nextdescr, b2s, b4s)]) storage.rd_consts = memo.consts[:] storage.rd_numb = None # resume demo55.next = lltype.nullptr(LLtypeMixin.NODE) b2t = BoxPtr(demo55o) b4t = BoxPtr(demo66o) newboxes = _resume_remap(liveboxes, [b2s, b4s], b2t, b4t) metainterp = MyMetaInterp() reader = ResumeDataReader(storage, newboxes, metainterp) assert reader.virtuals is None trace = metainterp.trace b2set = (rop.SETFIELD_GC, [b2t, b4t], None, LLtypeMixin.nextdescr) expected = [b2set] for x, y in zip(expected, trace): assert x == y assert demo55.next == demo66
|
cmd = "%s %s %d %s -S %s %s -v %s" %(
|
cmd = "%s %s %d %s -S %s %s %s -v" %(
|
def getinvocation(self, regrtest): fspath = regrtest.getfspath() python = sys.executable pypy_script = pypydir.join('bin', 'py.py') alarm_script = pypydir.join('tool', 'alarm.py') if sys.platform == 'win32': watchdog_name = 'watchdog_nt.py' else: watchdog_name = 'watchdog.py' watchdog_script = pypydir.join('tool', watchdog_name)
|
f.last_instr = intmask(jumpto) ec.bytecode_trace(f) jumpto = r_uint(f.last_instr) pypyjitdriver.can_enter_jit(frame=f, ec=ec, next_instr=jumpto, pycode=f.getcode())
|
self.last_instr = intmask(jumpto) ec.bytecode_trace(self) jumpto = r_uint(self.last_instr) pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, pycode=self.getcode())
|
def jump_absolute(self, jumpto, _, ec=None): if we_are_jitted(): f.last_instr = intmask(jumpto) ec.bytecode_trace(f) jumpto = r_uint(f.last_instr) pypyjitdriver.can_enter_jit(frame=f, ec=ec, next_instr=jumpto, pycode=f.getcode()) return jumpto
|
i = fullname.rfind('.') subname = fullname[i+1:]
|
startpos = fullname.rfind('.') + 1 assert startpos >= 0 subname = fullname[start:]
|
def make_filename(self, fullname): i = fullname.rfind('.') subname = fullname[i+1:] return self.prefix + subname
|
lltype.nullptr(PyObject.TO)
|
return lltype.nullptr(PyObject.TO)
|
def get_from_lifeline(self, w_obj): lifeline = self.lifeline_dict.get(w_obj) if lifeline is not None: # make old PyObject ready for use in C code py_obj = lifeline.pyo assert py_obj.c_ob_refcnt == 0 return py_obj else: lltype.nullptr(PyObject.TO)
|
res.append_multiple_char('\x00', length - res.getlength())
|
remaining = length - res.getlength() if remaining > 0: res.append_multiple_char('\x00', remaining)
|
def a2b_uu(space, ascii): "Decode a line of uuencoded data." if len(ascii) == 0: # obscure case, for compability with CPython length = (-0x20) & 0x3f else: length = (ord(ascii[0]) - 0x20) & 0x3f res = StringBuilder(length) for i in range(1, len(ascii), 4): A = _a2b_read(space, ascii, i) B = _a2b_read(space, ascii, i+1) C = _a2b_read(space, ascii, i+2) D = _a2b_read(space, ascii, i+3) # _a2b_write(space, res, length, A << 2 | B >> 4) _a2b_write(space, res, length, (B & 0xf) << 4 | C >> 2) _a2b_write(space, res, length, (C & 0x3) << 6 | D) res.append_multiple_char('\x00', length - res.getlength()) return space.wrap(res.build())
|
l0 = regalloc.make_sure_var_in_reg(arg0, [arg1], imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, [arg0])
|
l0 = regalloc.make_sure_var_in_reg(arg0, args, imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, args)
|
def f(self, op, regalloc, fcond): assert fcond is not None if not inverse: arg0 = op.getarg(0) arg1 = op.getarg(1) else: arg0 = op.getarg(1) arg1 = op.getarg(0) # XXX consider swapping argumentes if arg0 is const imm_a0 = self._check_imm_arg(arg0) imm_a1 = self._check_imm_arg(arg1) if imm_a1 and not imm_a0: l0 = regalloc.make_sure_var_in_reg(arg0, [arg1], imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, [arg0]) res = regalloc.force_allocate_reg(op.result) self.mc.CMP_ri(l0.value, imm=l1.getint(), cond=fcond) else: l0 = regalloc.make_sure_var_in_reg(arg0, [arg1], imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, [arg0], imm_fine=False) res = regalloc.force_allocate_reg(op.result) self.mc.CMP_rr(l0.value, l1.value, cond=fcond)
|
l0 = regalloc.make_sure_var_in_reg(arg0, [arg1], imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, [arg0], imm_fine=False)
|
l0 = regalloc.make_sure_var_in_reg(arg0, args, imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, args, imm_fine=False)
|
def f(self, op, regalloc, fcond): assert fcond is not None if not inverse: arg0 = op.getarg(0) arg1 = op.getarg(1) else: arg0 = op.getarg(1) arg1 = op.getarg(0) # XXX consider swapping argumentes if arg0 is const imm_a0 = self._check_imm_arg(arg0) imm_a1 = self._check_imm_arg(arg1) if imm_a1 and not imm_a0: l0 = regalloc.make_sure_var_in_reg(arg0, [arg1], imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, [arg0]) res = regalloc.force_allocate_reg(op.result) self.mc.CMP_ri(l0.value, imm=l1.getint(), cond=fcond) else: l0 = regalloc.make_sure_var_in_reg(arg0, [arg1], imm_fine=False) l1 = regalloc.make_sure_var_in_reg(arg1, [arg0], imm_fine=False) res = regalloc.force_allocate_reg(op.result) self.mc.CMP_rr(l0.value, l1.value, cond=fcond)
|
lst = rgc._get_referents(rgc.cast_instance_to_gcref(x1))
|
lst = rgc.get_rpy_referents(rgc.cast_instance_to_gcref(x1))
|
def test_get_referents(): class X(object): __slots__ = ['stuff'] x1 = X() x1.stuff = X() x2 = X() lst = rgc._get_referents(rgc.cast_instance_to_gcref(x1)) lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst] assert x1.stuff in lst2 assert x2 not in lst2
|
expected = """
|
preamble = """
|
def test_remove_duplicate_pure_op(self): ops = """ [p1, p2] i1 = ptr_eq(p1, p2) i2 = ptr_eq(p1, p2) i3 = int_add(i1, 1) i3b = int_is_true(i3) guard_true(i3b) [] i4 = int_add(i2, 1) i4b = int_is_true(i4) guard_true(i4b) [] escape(i3) escape(i4) guard_true(i1) [] guard_true(i2) [] jump(p1, p2) """ expected = """ [p1, p2] i1 = ptr_eq(p1, p2) i3 = int_add(i1, 1) i3b = int_is_true(i3) guard_true(i3b) [] escape(i3) escape(i3) guard_true(i1) [] jump(p1, p2) """ self.optimize_loop(ops, expected)
|
self.optimize_loop(ops, expected)
|
expected = """ [p1, p2] escape(2) escape(2) jump(p1, p2) """ self.optimize_loop(ops, expected, preamble)
|
def test_remove_duplicate_pure_op(self): ops = """ [p1, p2] i1 = ptr_eq(p1, p2) i2 = ptr_eq(p1, p2) i3 = int_add(i1, 1) i3b = int_is_true(i3) guard_true(i3b) [] i4 = int_add(i2, 1) i4b = int_is_true(i4) guard_true(i4b) [] escape(i3) escape(i4) guard_true(i1) [] guard_true(i2) [] jump(p1, p2) """ expected = """ [p1, p2] i1 = ptr_eq(p1, p2) i3 = int_add(i1, 1) i3b = int_is_true(i3) guard_true(i3b) [] escape(i3) escape(i3) guard_true(i1) [] jump(p1, p2) """ self.optimize_loop(ops, expected)
|
clsdef = s_type.descriptions.keys()[0].getuniqueclassdef()
|
clsdef = s_type.any_description().getuniqueclassdef()
|
clsdef = s_type.descriptions.keys()[0].getuniqueclassdef()
|
raise OperationError(space.w_ValueError, space.wrap("too many values to unpack"))
|
raise OperationError(self.w_ValueError, self.wrap("too many values to unpack"))
|
def unpackiterable(self, w_iterable, expected_length=-1): """Unpack an iterable object into a real (interpreter-level) list. Raise a real (subclass of) ValueError if the length is wrong.""" w_iterator = self.iter(w_iterable) items = [] while True: try: w_item = self.next(w_iterator) except OperationError, e: if not e.match(self, self.w_StopIteration): raise break # done if expected_length != -1 and len(items) == expected_length: raise OperationError(space.w_ValueError, space.wrap("too many values to unpack")) items.append(w_item) if expected_length != -1 and len(items) < expected_length: i = len(items) if i == 1: plural = "" else: plural = "s" raise OperationError(space.w_ValueError, space.wrap("need more than %d value%s to unpack" % (i, plural))) return items
|
raise OperationError(space.w_ValueError, space.wrap("need more than %d value%s to unpack" %
|
raise OperationError(self.w_ValueError, self.wrap("need more than %d value%s to unpack" %
|
def unpackiterable(self, w_iterable, expected_length=-1): """Unpack an iterable object into a real (interpreter-level) list. Raise a real (subclass of) ValueError if the length is wrong.""" w_iterator = self.iter(w_iterable) items = [] while True: try: w_item = self.next(w_iterator) except OperationError, e: if not e.match(self, self.w_StopIteration): raise break # done if expected_length != -1 and len(items) == expected_length: raise OperationError(space.w_ValueError, space.wrap("too many values to unpack")) items.append(w_item) if expected_length != -1 and len(items) < expected_length: i = len(items) if i == 1: plural = "" else: plural = "s" raise OperationError(space.w_ValueError, space.wrap("need more than %d value%s to unpack" % (i, plural))) return items
|
pkg_file.write("__path__ = ['%s']\n" % sub_dir)
|
pkg_file.write("__path__ = [%r]\n" % sub_dir)
|
def _make_pkg(self, source, depth): pkg_name = "__runpy_pkg__" init_fname = "__init__"+os.extsep+"py" test_fname = "runpy_test"+os.extsep+"py" pkg_dir = sub_dir = tempfile.mkdtemp() if verbose: print " Package tree in:", sub_dir sys.path.insert(0, pkg_dir) if verbose: print " Updated sys.path:", sys.path[0] for i in range(depth): sub_dir = os.path.join(sub_dir, pkg_name) os.mkdir(sub_dir) if verbose: print " Next level in:", sub_dir pkg_fname = os.path.join(sub_dir, init_fname) pkg_file = open(pkg_fname, "w") pkg_file.write("__path__ = ['%s']\n" % sub_dir) pkg_file.close() if verbose: print " Created:", pkg_fname mod_fname = os.path.join(sub_dir, test_fname) mod_file = open(mod_fname, "w") mod_file.write(source) mod_file.close() if verbose: print " Created:", mod_fname mod_name = (pkg_name+".")*depth + "runpy_test" return pkg_dir, mod_fname, mod_name
|
elif restype is libffi.types.pointer or restype.c_type == libffi.FFI_TYPE_STRUCT:
|
elif restype is libffi.types.pointer or libffi.types.is_struct(restype):
|
def _call_uint(self, space, argchain): # the same comment as above apply. Moreover, we need to be careful # when the return type is ULONG, because the value might not fit into # a signed LONG: this is the only case in which we cast the result to # something different than LONG; as a result, the applevel value will # be a <long>. # # Note that we check for ULONG before UINT: this is needed on 32bit # machines, where they are they same: if we checked for UINT before # ULONG, we would cast to the wrong type. Note that this also means # that on 32bit the UINT case will never be entered (because it is # handled by the ULONG case). restype = self.func.restype call = self.func.call if restype is libffi.types.ulong: # special case uintres = call(argchain, rffi.ULONG) return space.wrap(uintres) elif restype is libffi.types.pointer or restype.c_type == libffi.FFI_TYPE_STRUCT: uintres = rffi.cast(rffi.ULONG, call(argchain, rffi.VOIDP)) return space.wrap(uintres) elif restype is libffi.types.uint: intres = rffi.cast(rffi.LONG, call(argchain, rffi.UINT)) elif restype is libffi.types.ushort: intres = rffi.cast(rffi.LONG, call(argchain, rffi.USHORT)) elif restype is libffi.types.uchar: intres = rffi.cast(rffi.LONG, call(argchain, rffi.UCHAR)) else: raise OperationError(space.w_ValueError, space.wrap('Unsupported restype')) return space.wrap(intres)
|
name += str(self.knownclsbox.value.ptr).rpartition("_vtable")[0].rpartition('.')[2]
|
name += str(self.knownclsbox.value.adr.ptr).rpartition("_vtable")[0].rpartition('.')[2]
|
def _dot(self, seen): if self in seen: return seen.add(self) if self.knownclsbox: name = "Virtual " if isinstance(self.knownclsbox.value, int): name += str(self.knownclsbox.value) else: name += str(self.knownclsbox.value.ptr).rpartition("_vtable")[0].rpartition('.')[2] elif self.structdescr: name = "Struct " + str(self.structdescr) elif self.arraydescr: name = "Array" else: name = "Not" if self.escaped: name = "ESC " + name if self.fromstart: name = "START " + name if self.unique == optimizefindnode.UNIQUE_NO: color = "blue" else: color = "black"
|
x = complex(x, 0)
|
x = _to_complex(x)
|
def _prodi(x): x = complex(x, 0) real = -x.imag imag = x.real return complex(real, imag)
|
x = complex(x)
|
x = _to_complex(x)
|
def phase(x): x = complex(x) return math.atan2(x.imag, x.real)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.