rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
elif member_type == structmemberdefs.T_CHAR:
|
elif member_type == T_CHAR:
|
def PyMember_GetOne(space, obj, w_member): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset member_type = rffi.cast(lltype.Signed, w_member.c_type) for converter in integer_converters: typ, lltype, _ = converter if typ == member_type result = rffi.cast(rffi.CArrayPtr(lltype), addr) w_result = space.wrap(result[0]) return w_result if member_type == structmemberdefs.T_STRING: result = rffi.cast(rffi.CCHARPP, addr) if result[0]: w_result = PyString_FromString(space, result[0]) else: w_result = space.w_None elif member_type == structmemberdefs.T_STRING_INPLACE: result = rffi.cast(rffi.CCHARP, addr) w_result = PyString_FromString(space, result) elif member_type == structmemberdefs.T_CHAR: result = rffi.cast(rffi.CCHARP, addr) w_result = space.wrap(result[0]) elif member_type == structmemberdefs.T_OBJECT: obj_ptr = rffi.cast(PyObjectP, addr) if obj_ptr[0]: w_result = from_ref(space, obj_ptr[0]) else: w_result = space.w_None elif member_type == T_OBJECT_EX: obj_ptr = rffi.cast(PyObjectP, addr) if obj_ptr[0]: w_result = from_ref(space, obj_ptr[0]) else: w_name = space.wrap(rffi.charp2str(w_member.c_name)) raise OperationError(space.w_AttributeError, w_name) else: raise OperationError(space.w_SystemError, space.wrap("bad memberdescr type")) return w_result
|
elif member_type == structmemberdefs.T_OBJECT:
|
elif member_type == T_OBJECT:
|
def PyMember_GetOne(space, obj, w_member): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset member_type = rffi.cast(lltype.Signed, w_member.c_type) for converter in integer_converters: typ, lltype, _ = converter if typ == member_type result = rffi.cast(rffi.CArrayPtr(lltype), addr) w_result = space.wrap(result[0]) return w_result if member_type == structmemberdefs.T_STRING: result = rffi.cast(rffi.CCHARPP, addr) if result[0]: w_result = PyString_FromString(space, result[0]) else: w_result = space.w_None elif member_type == structmemberdefs.T_STRING_INPLACE: result = rffi.cast(rffi.CCHARP, addr) w_result = PyString_FromString(space, result) elif member_type == structmemberdefs.T_CHAR: result = rffi.cast(rffi.CCHARP, addr) w_result = space.wrap(result[0]) elif member_type == structmemberdefs.T_OBJECT: obj_ptr = rffi.cast(PyObjectP, addr) if obj_ptr[0]: w_result = from_ref(space, obj_ptr[0]) else: w_result = space.w_None elif member_type == T_OBJECT_EX: obj_ptr = rffi.cast(PyObjectP, addr) if obj_ptr[0]: w_result = from_ref(space, obj_ptr[0]) else: w_name = space.wrap(rffi.charp2str(w_member.c_name)) raise OperationError(space.w_AttributeError, w_name) else: raise OperationError(space.w_SystemError, space.wrap("bad memberdescr type")) return w_result
|
a = runicode.UNICHR(0x10000) if sys.maxunicode < 0x10000: assert len(a) == 2
|
if runicode.MAXUNICODE > 0xffff: a = runicode.UNICHR(0x10000) if sys.maxunicode < 0x10000: assert len(a) == 2 else: assert len(a) == 1
|
def test_unichr(): a = runicode.UNICHR(0xffff) assert a == u'\uffff' a = runicode.UNICHR(0x10000) if sys.maxunicode < 0x10000: assert len(a) == 2 # surrogates else: assert len(a) == 1
|
assert len(a) == 1
|
py.test.raises(ValueError, runicode.UNICHR, 0x10000)
|
def test_unichr(): a = runicode.UNICHR(0xffff) assert a == u'\uffff' a = runicode.UNICHR(0x10000) if sys.maxunicode < 0x10000: assert len(a) == 2 # surrogates else: assert len(a) == 1
|
if len(runicode.UNICHR(0x10000)) == 2:
|
if runicode.MAXUNICODE != sys.maxunicode:
|
def setup_class(cls): if len(runicode.UNICHR(0x10000)) == 2: py.test.skip("these tests cannot run on the llinterp")
|
ptr = rffi.ptradd(output_ptr, decpt_ptr[0])
|
ptr = rffi.ptradd(output_ptr, rffi.cast(lltype.Signed, decpt_ptr[0]))
|
def dtoa(value): mode = 2 precision = 3 builder = StringBuilder(20) with lltype.scoped_alloc(rffi.INTP.TO, 1) as decpt_ptr: with lltype.scoped_alloc(rffi.INTP.TO, 1) as sign_ptr: with lltype.scoped_alloc(rffi.CCHARPP.TO, 1) as end_ptr: output_ptr = dg_dtoa(value, mode, precision, decpt_ptr, sign_ptr, end_ptr) buflen = (rffi.cast(rffi.LONG, end_ptr[0]) - rffi.cast(rffi.LONG, output_ptr)) builder.append(rffi.charpsize2str(output_ptr, decpt_ptr[0])) builder.append('.') ptr = rffi.ptradd(output_ptr, decpt_ptr[0]) buflen -= decpt_ptr[0] builder.append(rffi.charpsize2str(ptr, buflen)) dg_freedtoa(output_ptr) return builder.build()
|
if ffi_type is types.void: return 'v' elif ffi_type is types.double: return 'f' elif ffi_type is types.pointer: return 'i' elif ffi_type is types.uchar: return 'i' elif ffi_type is types.uint8: return 'i' elif ffi_type is types.schar: return 'i' elif ffi_type is types.sint8: return 'i' elif ffi_type is types.uint16: return 'i' elif ffi_type is types.ushort: return 'i' elif ffi_type is types.sint16: return 'i' elif ffi_type is types.sshort: return 'i' elif ffi_type is types.uint: return 'i' elif ffi_type is types.uint32: return 'i' elif ffi_type is types.sint: return 'i' elif ffi_type is types.sint32: return 'i'
|
if ffi_type is types.void: return 'v' elif ffi_type is types.double: return 'f' elif ffi_type is types.pointer: return 'i' elif ffi_type is types.schar: return 'i' elif ffi_type is types.uchar: return 'i' elif ffi_type is types.sshort: return 'i' elif ffi_type is types.ushort: return 'i' elif ffi_type is types.sint: return 'i' elif ffi_type is types.uint: return 'i' elif ffi_type is types.slong: return 'i' elif ffi_type is types.ulong: return 'i' elif ffi_type is types.sint8: return 'i' elif ffi_type is types.uint8: return 'i' elif ffi_type is types.sint16: return 'i' elif ffi_type is types.uint16: return 'i' elif ffi_type is types.sint32: return 'i' elif ffi_type is types.uint32: return 'i'
|
def getkind(ffi_type): if ffi_type is types.void: return 'v' elif ffi_type is types.double: return 'f' elif ffi_type is types.pointer: return 'i' elif ffi_type is types.uchar: return 'i' elif ffi_type is types.uint8: return 'i' elif ffi_type is types.schar: return 'i' elif ffi_type is types.sint8: return 'i' elif ffi_type is types.uint16: return 'i' elif ffi_type is types.ushort: return 'i' elif ffi_type is types.sint16: return 'i' elif ffi_type is types.sshort: return 'i' elif ffi_type is types.uint: return 'i' elif ffi_type is types.uint32: return 'i' elif ffi_type is types.sint: return 'i' elif ffi_type is types.sint32: return 'i' ## elif ffi_type is types.uint64: ## return 'i' ## elif ffi_type is types.sint64: ## return 'i' raise KeyError
|
w_func = self.getattr(space, space.wrap('__int__'), False)
|
w_func = self.getattr(space, '__int__', False)
|
def descr_int(self, space): w_func = self.getattr(space, space.wrap('__int__'), False) if w_func is not None: return space.call_function(w_func)
|
w_func = self.getattr(space, space.wrap('__long__'), False)
|
w_func = self.getattr(space, '__long__', False)
|
def descr_long(self, space): w_func = self.getattr(space, space.wrap('__long__'), False) if w_func is not None: return space.call_function(w_func) return self.descr_int(space)
|
direct_bootstrap_code_size=100*WORD
|
def gen_bootstrap_code(self, inputargs, regalloc, looptoken): for i in range(len(inputargs)): loc = inputargs[i] reg = regalloc.force_allocate_reg(loc) if loc.type == REF: addr = self.fail_boxes_ptr.get_addr_for_num(i) elif loc.type == INT: addr = self.fail_boxes_int.get_addr_for_num(i) else: raise ValueError self.mc.gen_load_int(reg.value, addr) self.mc.LDR_ri(reg.value, reg.value) regalloc.possibly_free_var(loc) arglocs = [regalloc.loc(arg) for arg in inputargs] looptoken._arm_arglocs = arglocs return arglocs
|
|
if log:
|
if log and not we_are_translated():
|
def assemble_loop(self, inputargs, operations, looptoken, log): self.setup() longevity = compute_vars_longevity(inputargs, operations) regalloc = ARMRegisterManager(longevity, assembler=self, frame_manager=ARMFrameManager())
|
if log:
|
if log and not we_are_translated():
|
def assemble_bridge(self, faildescr, inputargs, operations, original_loop_token, log): self.setup() assert isinstance(faildescr, AbstractFailDescr) code = faildescr._failure_recovery_code enc = rffi.cast(rffi.CCHARP, code) longevity = compute_vars_longevity(inputargs, operations) regalloc = ARMRegisterManager(longevity, assembler=self, frame_manager=ARMFrameManager())
|
self.getvalue(arg).enum_forced_boxes(args, {})
|
args.append(self.getvalue(arg).force_box())
|
def inline(self, loop_operations, loop_args, jump_args): self.argmap = argmap = {} assert len(loop_args) == len(jump_args) for i in range(len(loop_args)): argmap[loop_args[i]] = jump_args[i]
|
boxes = jmp.getarglist()[:] newval.enum_forced_boxes(boxes, {})
|
boxes = jmp.getarglist() boxes.append(newval.force_box())
|
def inline(self, loop_operations, loop_args, jump_args): self.argmap = argmap = {} assert len(loop_args) == len(jump_args) for i in range(len(loop_args)): argmap[loop_args[i]] = jump_args[i]
|
ops = self.get_by_bytecode("LOOKUP_METHOD")
|
ops = self.get_by_bytecode("LOOKUP_METHOD", True)
|
def main(n): i = 0 a = A(1) while i < n: x = a.f(i) i = a.f(x) return i
|
assert len(ops[0].get_opnames("guard")) <= 2
|
assert len(ops[0].get_opnames("guard")) <= 3
|
def main(n): i = 0 a = A(1) while i < n: x = a.f(i) i = a.f(x) return i
|
ops = self.get_by_bytecode("CALL_METHOD")
|
ops = self.get_by_bytecode("LOOKUP_METHOD") assert not ops[0] assert not ops[1] ops = self.get_by_bytecode("CALL_METHOD", True)
|
def main(n): i = 0 a = A(1) while i < n: x = a.f(i) i = a.f(x) return i
|
ops = self.get_by_bytecode("LOAD_ATTR")
|
ops = self.get_by_bytecode("CALL_METHOD") assert len(ops) == 2 assert len(ops[0]) <= 1 assert len(ops[1]) <= 1 ops = self.get_by_bytecode("LOAD_ATTR", True)
|
def main(n): i = 0 a = A(1) while i < n: x = a.f(i) i = a.f(x) return i
|
def test_expand_fail_v_all_1(self): ops = """ [i1, p1a, i2] p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2) p7v = getfield_gc(p6s, descr=bdescr) p5s = new(descr=ssize) setfield_gc(p5s, i2, descr=adescr) setfield_gc(p5s, p7v, descr=bdescr) setarrayitem_gc(p1a, 1, p5s, descr=arraydescr2) guard_true(i1, descr=fdescr) [p1a] p2s = new(descr=ssize) p3v = new_with_vtable(ConstClass(node_vtable)) p4a = new_array(2, descr=arraydescr2) setfield_gc(p2s, i1, descr=adescr) setfield_gc(p2s, p3v, descr=bdescr) setfield_gc(p3v, i2, descr=valuedescr) setarrayitem_gc(p4a, 0, p2s, descr=arraydescr2) jump(i1, p4a, i2) """ expected = """ [i1, ia, iv, pnull, i2] guard_true(i1, descr=fdescr) [ia, iv, i2] jump(1, 1, i2, NULL, i2) """ xxx self.optimize_loop(ops, ''' Not, VArray(arraydescr2, VStruct(ssize, adescr=Not, bdescr=Virtual(node_vtable, valuedescr=Not)), Not), Not''', expected) self.check_expanded_fail_descr('''p1a where p1a is a varray arraydescr2: p6s, p5s where p6s is a vstruct ssize, adescr=ia, bdescr=p7v where p5s is a vstruct ssize, adescr=i2, bdescr=p7v where p7v is a node_vtable, valuedescr=iv ''', rop.GUARD_TRUE) def test_expand_fail_lazy_setfield_1(self): ops = """ [p1, i2, i3] p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i2, descr=valuedescr) setfield_gc(p1, p2, descr=nextdescr) guard_true(i3, descr=fdescr) [] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ expected = """ [p1, i2, i3] guard_true(i3, descr=fdescr) [p1, i2] i4 = int_neg(i2) setfield_gc(p1, NULL, descr=nextdescr) jump(p1, i2, i4) """ self.optimize_loop(ops, expected) self.loop.inputargs[0].value = self.nodebox.value self.check_expanded_fail_descr(''' p1.nextdescr = p2 where p2 is a node_vtable, valuedescr=i2 ''', rop.GUARD_TRUE) def test_expand_fail_lazy_setfield_2(self): ops = """ [i2, i3] p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, i2, descr=valuedescr) setfield_gc(ConstPtr(myptr), p2, descr=nextdescr) guard_true(i3, descr=fdescr) [] i4 = int_neg(i2) setfield_gc(ConstPtr(myptr), NULL, descr=nextdescr) jump(i2, i4) """ expected = """ [i2, i3] guard_true(i3, descr=fdescr) [i2] i4 = int_neg(i2) setfield_gc(ConstPtr(myptr), NULL, descr=nextdescr) jump(i2, i4) """ self.optimize_loop(ops, expected) self.check_expanded_fail_descr(''' ConstPtr(myptr).nextdescr = p2 where p2 is a node_vtable, valuedescr=i2 ''', rop.GUARD_TRUE)
|
def test_expand_fail_v_all_1(self): ops = """ [i1, p1a, i2] p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2) p7v = getfield_gc(p6s, descr=bdescr) p5s = new(descr=ssize) setfield_gc(p5s, i2, descr=adescr) setfield_gc(p5s, p7v, descr=bdescr) setarrayitem_gc(p1a, 1, p5s, descr=arraydescr2) guard_true(i1, descr=fdescr) [p1a] p2s = new(descr=ssize) p3v = new_with_vtable(ConstClass(node_vtable)) p4a = new_array(2, descr=arraydescr2) setfield_gc(p2s, i1, descr=adescr) setfield_gc(p2s, p3v, descr=bdescr) setfield_gc(p3v, i2, descr=valuedescr) setarrayitem_gc(p4a, 0, p2s, descr=arraydescr2) jump(i1, p4a, i2) """ expected = """ [i1, ia, iv, pnull, i2] guard_true(i1, descr=fdescr) [ia, iv, i2] jump(1, 1, i2, NULL, i2) """ xxx self.optimize_loop(ops, ''' Not, VArray(arraydescr2, VStruct(ssize, adescr=Not, bdescr=Virtual(node_vtable, valuedescr=Not)), Not), Not''', expected) self.check_expanded_fail_descr('''p1a where p1a is a varray arraydescr2: p6s, p5s where p6s is a vstruct ssize, adescr=ia, bdescr=p7v where p5s is a vstruct ssize, adescr=i2, bdescr=p7v where p7v is a node_vtable, valuedescr=iv ''', rop.GUARD_TRUE)
|
|
rffi.CArray(HCRYPTPROV), 1, zero=True, flavor='raw')
|
rffi.CArray(HCRYPTPROV), 1, zero=True, flavor='raw', immortal=True)
|
def __init__(self, space): self.space = space self.w_environ = space.newdict() if _WIN: self.cryptProviderPtr = lltype.malloc( rffi.CArray(HCRYPTPROV), 1, zero=True, flavor='raw')
|
def frame_pos(self, i, size): return i
|
def frame_pos(self, i, box_type): return FakeFramePos(i, box_type)
|
def frame_pos(self, i, size): return i
|
sp = fm.loc(b0, 1) assert sp == 0
|
sp = fm.loc(b0) assert sp.pos == 0
|
def test_make_sure_var_in_reg(self): boxes, longevity = boxes_and_longevity(5) fm = TFrameManager() rm = RegisterManager(longevity, frame_manager=fm, assembler=MockAsm()) rm.next_instruction() # allocate a stack position b0, b1, b2, b3, b4 = boxes sp = fm.loc(b0, 1) assert sp == 0 loc = rm.make_sure_var_in_reg(b0) assert isinstance(loc, FakeReg) rm._check_invariants()
|
fm.loc(b0, 1)
|
fm.loc(b0)
|
def test_force_result_in_reg_4(self): b0, b1 = newboxes(0, 0) longevity = {b0: (0, 1), b1: (0, 1)} fm = TFrameManager() asm = MockAsm() rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) rm.next_instruction() fm.loc(b0, 1) rm.force_result_in_reg(b1, b0) rm._check_invariants() loc = rm.loc(b1) assert isinstance(loc, FakeReg) loc = rm.loc(b0) assert isinstance(loc, int) assert len(asm.moves) == 1
|
assert isinstance(loc, int)
|
assert isinstance(loc, FakeFramePos)
|
def test_force_result_in_reg_4(self): b0, b1 = newboxes(0, 0) longevity = {b0: (0, 1), b1: (0, 1)} fm = TFrameManager() asm = MockAsm() rm = RegisterManager(longevity, frame_manager=fm, assembler=asm) rm.next_instruction() fm.loc(b0, 1) rm.force_result_in_reg(b1, b0) rm._check_invariants() loc = rm.loc(b1) assert isinstance(loc, FakeReg) loc = rm.loc(b0) assert isinstance(loc, int) assert len(asm.moves) == 1
|
reg_width = 2
|
pass
|
def test_different_frame_width(self): class XRegisterManager(RegisterManager): reg_width = 2
|
s.append_multiple_char('d', 4)
|
s.append_multiple_char(u'd', 4)
|
def test_unicode_builder(): s = UnicodeBuilder() s.append(u'a') s.append(u'abc') s.append_slice(u'abcdef', 1, 2) s.append_multiple_char('d', 4) assert s.build() == 'aabcbdddd' assert isinstance(s.build(), unicode)
|
self.buffer = None
|
self.buffer = lltype.nullptr(rffi.CCHARP.TO)
|
def __init__(self, space): W_IOBase.__init__(self, space) self.buffer = None self.lock = None
|
state.init_r2w_from_w2r()
|
refcountstate = space.fromcache(RefcountState) refcountstate.init_r2w_from_w2r()
|
def startup(self, space): state = space.fromcache(State) from pypy.module.cpyext.typeobject import setup_new_method_def setup_new_method_def(space) if not we_are_translated(): space.setattr(space.wrap(self), space.wrap('api_lib'), space.wrap(state.api_lib)) else: state.init_r2w_from_w2r()
|
if bom == 0x0000FEFF:
|
if bom == BOM32_DIRECT:
|
def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, byteorder="native"): if errorhandler is None: errorhandler = raise_unicode_exception_decode bo = 0 if BYTEORDER == 'little': iorder = [0, 1, 2, 3] else: iorder = [3, 2, 1, 0] # Check for BOM marks (U+FEFF) in the input and adjust current # byte order setting accordingly. In native mode, the leading BOM # mark is skipped, in all other modes, it is copied to the output # stream as-is (giving a ZWNBSP character). pos = 0 if byteorder == 'native': if size >= 4: bom = ((ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) | (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]])) if BYTEORDER == 'little': if bom == 0x0000FEFF: pos += 4 bo = -1 elif bom == 0xFFFE0000: pos += 4 bo = 1 else: if bom == 0x0000FEFF: pos += 2 bo = 1 elif bom == 0xFFFE0000: pos += 2 bo = -1 elif byteorder == 'little': bo = -1 else: bo = 1 if size == 0: return u'', 0, bo if bo == -1: # force little endian iorder = [0, 1, 2, 3] elif bo == 1: # force big endian iorder = [3, 2, 1, 0] result = UnicodeBuilder(size // 4) while pos < size: # remaining bytes at the end? (size should be divisible by 4) if len(s) - pos < 4: if not final: break r, pos = errorhandler(errors, 'utf-32', "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: break continue ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: r, pos = errorhandler(errors, 'utf-32', "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue if MAXUNICODE < 65536 and ch >= 0x10000: ch -= 0x10000L result.append(unichr(0xD800 + (ch >> 10))) result.append(unichr(0xDC00 + (ch & 0x03FF))) else: result.append(UNICHR(ch)) pos += 4 return result.build(), pos, bo
|
elif bom == 0xFFFE0000:
|
elif bom == BOM32_REVERSE:
|
def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, byteorder="native"): if errorhandler is None: errorhandler = raise_unicode_exception_decode bo = 0 if BYTEORDER == 'little': iorder = [0, 1, 2, 3] else: iorder = [3, 2, 1, 0] # Check for BOM marks (U+FEFF) in the input and adjust current # byte order setting accordingly. In native mode, the leading BOM # mark is skipped, in all other modes, it is copied to the output # stream as-is (giving a ZWNBSP character). pos = 0 if byteorder == 'native': if size >= 4: bom = ((ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) | (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]])) if BYTEORDER == 'little': if bom == 0x0000FEFF: pos += 4 bo = -1 elif bom == 0xFFFE0000: pos += 4 bo = 1 else: if bom == 0x0000FEFF: pos += 2 bo = 1 elif bom == 0xFFFE0000: pos += 2 bo = -1 elif byteorder == 'little': bo = -1 else: bo = 1 if size == 0: return u'', 0, bo if bo == -1: # force little endian iorder = [0, 1, 2, 3] elif bo == 1: # force big endian iorder = [3, 2, 1, 0] result = UnicodeBuilder(size // 4) while pos < size: # remaining bytes at the end? (size should be divisible by 4) if len(s) - pos < 4: if not final: break r, pos = errorhandler(errors, 'utf-32', "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: break continue ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: r, pos = errorhandler(errors, 'utf-32', "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue if MAXUNICODE < 65536 and ch >= 0x10000: ch -= 0x10000L result.append(unichr(0xD800 + (ch >> 10))) result.append(unichr(0xDC00 + (ch & 0x03FF))) else: result.append(UNICHR(ch)) pos += 4 return result.build(), pos, bo
|
if bom == 0x0000FEFF: pos += 2
|
if bom == BOM32_DIRECT: pos += 4
|
def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, byteorder="native"): if errorhandler is None: errorhandler = raise_unicode_exception_decode bo = 0 if BYTEORDER == 'little': iorder = [0, 1, 2, 3] else: iorder = [3, 2, 1, 0] # Check for BOM marks (U+FEFF) in the input and adjust current # byte order setting accordingly. In native mode, the leading BOM # mark is skipped, in all other modes, it is copied to the output # stream as-is (giving a ZWNBSP character). pos = 0 if byteorder == 'native': if size >= 4: bom = ((ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) | (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]])) if BYTEORDER == 'little': if bom == 0x0000FEFF: pos += 4 bo = -1 elif bom == 0xFFFE0000: pos += 4 bo = 1 else: if bom == 0x0000FEFF: pos += 2 bo = 1 elif bom == 0xFFFE0000: pos += 2 bo = -1 elif byteorder == 'little': bo = -1 else: bo = 1 if size == 0: return u'', 0, bo if bo == -1: # force little endian iorder = [0, 1, 2, 3] elif bo == 1: # force big endian iorder = [3, 2, 1, 0] result = UnicodeBuilder(size // 4) while pos < size: # remaining bytes at the end? (size should be divisible by 4) if len(s) - pos < 4: if not final: break r, pos = errorhandler(errors, 'utf-32', "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: break continue ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: r, pos = errorhandler(errors, 'utf-32', "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue if MAXUNICODE < 65536 and ch >= 0x10000: ch -= 0x10000L result.append(unichr(0xD800 + (ch >> 10))) result.append(unichr(0xDC00 + (ch & 0x03FF))) else: result.append(UNICHR(ch)) pos += 4 return result.build(), pos, bo
|
elif bom == 0xFFFE0000: pos += 2
|
elif bom == BOM32_REVERSE: pos += 4
|
def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, byteorder="native"): if errorhandler is None: errorhandler = raise_unicode_exception_decode bo = 0 if BYTEORDER == 'little': iorder = [0, 1, 2, 3] else: iorder = [3, 2, 1, 0] # Check for BOM marks (U+FEFF) in the input and adjust current # byte order setting accordingly. In native mode, the leading BOM # mark is skipped, in all other modes, it is copied to the output # stream as-is (giving a ZWNBSP character). pos = 0 if byteorder == 'native': if size >= 4: bom = ((ord(s[iorder[3]]) << 24) | (ord(s[iorder[2]]) << 16) | (ord(s[iorder[1]]) << 8) | ord(s[iorder[0]])) if BYTEORDER == 'little': if bom == 0x0000FEFF: pos += 4 bo = -1 elif bom == 0xFFFE0000: pos += 4 bo = 1 else: if bom == 0x0000FEFF: pos += 2 bo = 1 elif bom == 0xFFFE0000: pos += 2 bo = -1 elif byteorder == 'little': bo = -1 else: bo = 1 if size == 0: return u'', 0, bo if bo == -1: # force little endian iorder = [0, 1, 2, 3] elif bo == 1: # force big endian iorder = [3, 2, 1, 0] result = UnicodeBuilder(size // 4) while pos < size: # remaining bytes at the end? (size should be divisible by 4) if len(s) - pos < 4: if not final: break r, pos = errorhandler(errors, 'utf-32', "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: break continue ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: r, pos = errorhandler(errors, 'utf-32', "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue if MAXUNICODE < 65536 and ch >= 0x10000: ch -= 0x10000L result.append(unichr(0xD800 + (ch >> 10))) result.append(unichr(0xDC00 + (ch & 0x03FF))) else: result.append(UNICHR(ch)) pos += 4 return result.build(), pos, bo
|
a.build_types(func, argtypes)
|
a.build_types(func, argtypes, main_entry_point=True)
|
def annotate(func, values, inline=None, backendoptimize=True, type_system="lltype"): # build the normal ll graphs for ll_function t = TranslationContext() annpolicy = AnnotatorPolicy() annpolicy.allow_someobjects = False a = t.buildannotator(policy=annpolicy) argtypes = getargtypes(a, values) a.build_types(func, argtypes) rtyper = t.buildrtyper(type_system = type_system) rtyper.specialize() if inline: auto_inlining(t, threshold=inline) if backendoptimize: from pypy.translator.backendopt.all import backend_optimizations backend_optimizations(t, inline_threshold=inline or 0, remove_asserts=True, really_remove_asserts=True) #if conftest.option.view: # t.view() return rtyper
|
w_exc = space.get_w_value(space)
|
w_exc = e.get_w_value(space)
|
def write_w(self, space, w_data): self._check_init(space) self._check_closed(space, "write to closed file") data = space.str_w(w_data) size = len(data)
|
retValueSize[0] = 256
|
retValueSize[0] = r_uint(256)
|
def EnumKey(space, w_hkey, index): """string = EnumKey(key, index) - Enumerates subkeys of an open registry key.
|
self.indicies = [(0, len(self.gears_w[x].wrappeditems)) for x in range(0, self.num_gears)]
|
self.indicies = [(0, space.int_w(space.len(w_gear))) for w_gear in self.gears_w]
|
def __init__(self, space, args_w, repeat_w): self.space = space self.gears_w = [x for x in args_w] * repeat_w.intval self.num_gears = len(self.gears_w) # initialization of indicies to loop over self.indicies = [(0, len(self.gears_w[x].wrappeditems)) for x in range(0, self.num_gears)] self.cont = True
|
l.append(self.gears_w[x].wrappeditems[index])
|
l.append(self.space.getitem(self.gears_w[x], self.space.wrap(index)))
|
def next_w(self): if not self.cont: raise OperationError(self.space.w_StopIteration, self.space.w_None) l = [] for x in range(0, self.num_gears): index, limit = self.indicies[x] l.append(self.gears_w[x].wrappeditems[index]) self.roll_gears() return self.space.newtuple(l)
|
def __init__(self, space, args_w, repeat_w): self.space = space self.gears_w = [x for x in args_w] * repeat_w.intval
|
def __init__(self, space, args_w, w_repeat): self.space = space self.gears_w = [x for x in args_w] * space.int_w(w_repeat)
|
def __init__(self, space, args_w, repeat_w): self.space = space self.gears_w = [x for x in args_w] * repeat_w.intval self.num_gears = len(self.gears_w) # initialization of indicies to loop over self.indicies = [(0, space.int_w(space.len(w_gear))) for w_gear in self.gears_w] self.cont = True
|
repeat = kw_args_w.get('repeat', space.wrap(1)) return space.wrap(W_Product(space, star_args_w[1:], repeat))
|
w_repeat = kw_args_w.get('repeat', space.wrap(1)) return space.wrap(W_Product(space, star_args_w[1:], w_repeat))
|
def W_Product__new__(space, args_w): star_args_w, kw_args_w = args_w.unpack() if len(kw_args_w) > 1: raise OperationError(space.w_TypeError, space.wrap("product() takes at most 1 argument (%d given)" % len(kw_args_w))) repeat = kw_args_w.get('repeat', space.wrap(1)) return space.wrap(W_Product(space, star_args_w[1:], repeat))
|
from pypy.rlib.rarithmetic import r_uint
|
from pypy.interpreter.typedef import TypeDef, GetSetProperty
|
|
rffi.cast(rffi.UINTP, message)[0] = r_uint(size)
|
rffi.cast(rffi.UINTP, message)[0] = rffi.r_uint(size)
|
def do_send_string(self, space, buffer, offset, size): # Since str2charp copies the buffer anyway, always combine the # "header" and the "body" of the message and send them at once. message = lltype.malloc(rffi.CCHARP.TO, size + 4, flavor='raw') try: rffi.cast(rffi.UINTP, message)[0] = r_uint(size) # XXX htonl! i = size - 1 while i >= 0: message[4 + i] = buffer[offset + i] i -= 1 self._sendall(space, message, size + 4) finally: lltype.free(message, flavor='raw')
|
spilled = False
|
def f(self, op, fcond): assert fcond is not None a0 = op.getarg(0) a1 = op.getarg(1) arg1 = self.make_sure_var_in_reg(a0, selected_reg=r.r0) arg2 = self.make_sure_var_in_reg(a1, selected_reg=r.r1) assert arg1 == r.r0 assert arg2 == r.r1 spilled = False if isinstance(a0, Box) and self.stays_alive(a0): spilled = True self.force_spill_var(a0) self.after_call(op.result) if spilled: self.possibly_free_var(a0) self.possibly_free_var(a1) self.possibly_free_var(op.result) return []
|
|
spilled = True
|
def f(self, op, fcond): assert fcond is not None a0 = op.getarg(0) a1 = op.getarg(1) arg1 = self.make_sure_var_in_reg(a0, selected_reg=r.r0) arg2 = self.make_sure_var_in_reg(a1, selected_reg=r.r1) assert arg1 == r.r0 assert arg2 == r.r1 spilled = False if isinstance(a0, Box) and self.stays_alive(a0): spilled = True self.force_spill_var(a0) self.after_call(op.result) if spilled: self.possibly_free_var(a0) self.possibly_free_var(a1) self.possibly_free_var(op.result) return []
|
|
if spilled: self.possibly_free_var(a0)
|
def f(self, op, fcond): assert fcond is not None a0 = op.getarg(0) a1 = op.getarg(1) arg1 = self.make_sure_var_in_reg(a0, selected_reg=r.r0) arg2 = self.make_sure_var_in_reg(a1, selected_reg=r.r1) assert arg1 == r.r0 assert arg2 == r.r1 spilled = False if isinstance(a0, Box) and self.stays_alive(a0): spilled = True self.force_spill_var(a0) self.after_call(op.result) if spilled: self.possibly_free_var(a0) self.possibly_free_var(a1) self.possibly_free_var(op.result) return []
|
|
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[0]>, line 3)
|
File "<doctest test.test_generators.__test__.syntax[0]>", line 3 SyntaxError: 'return' with argument inside generator
|
>>> def f():
|
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[1]>, line 3)
|
File "<doctest test.test_generators.__test__.syntax[1]>", line 3 SyntaxError: 'return' with argument inside generator
|
>>> def f():
|
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[2]>, line 3)
|
File "<doctest test.test_generators.__test__.syntax[2]>", line 3 SyntaxError: 'return' with argument inside generator
|
>>> def f():
|
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[24]>, line 10)
|
... File "<doctest test.test_generators.__test__.syntax[24]>", line 10 SyntaxError: 'return' with argument inside generator
|
... def f(i):
|
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.coroutine[22]>, line 1)
|
File "<doctest test.test_generators.__test__.coroutine[22]>", line 1 SyntaxError: 'return' with argument inside generator
|
>>> def f(): return lambda x=(yield): 1
|
SyntaxError: assignment to yield expression not possible
|
SyntaxError: can't assign to yield expression
|
>>> def f(): x = yield = y
|
TypeError: exceptions must be classes, or instances, not str
|
TypeError: exceptions must be old-style classes or derived from BaseException, not str
|
>>> def throw(g,exc):
|
assert exc.msg == "EOL while scanning single-quoted string"
|
assert exc.msg == "EOL while scanning string literal"
|
def test_syntax_error(self): parse = self.parse exc = py.test.raises(SyntaxError, parse, "name another for").value assert exc.msg == "invalid syntax" assert exc.lineno == 1 assert exc.offset == 5 assert exc.text.startswith("name another for") exc = py.test.raises(SyntaxError, parse, "\"blah").value assert exc.msg == "EOL while scanning single-quoted string" exc = py.test.raises(SyntaxError, parse, "'''\n").value assert exc.msg == "EOF while scanning triple-quoted string" for input in ("())", "(()", "((", "))"): py.test.raises(SyntaxError, parse, input)
|
assert exc.msg == "EOF while scanning triple-quoted string"
|
assert exc.msg == "EOF while scanning triple-quoted string literal"
|
def test_syntax_error(self): parse = self.parse exc = py.test.raises(SyntaxError, parse, "name another for").value assert exc.msg == "invalid syntax" assert exc.lineno == 1 assert exc.offset == 5 assert exc.text.startswith("name another for") exc = py.test.raises(SyntaxError, parse, "\"blah").value assert exc.msg == "EOL while scanning single-quoted string" exc = py.test.raises(SyntaxError, parse, "'''\n").value assert exc.msg == "EOF while scanning triple-quoted string" for input in ("())", "(()", "((", "))"): py.test.raises(SyntaxError, parse, input)
|
if result is None: retval = lltype.nullptr(PyObject.TO) elif isinstance(result, BorrowedPair):
|
if isinstance(result, BorrowedPair):
|
def wrapper(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import BorrowedPair from pypy.module.cpyext.pyobject import NullPointerException # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py rffi.stackcounter.stacks_counter += 1 retval = fatal_value boxed_args = () try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, assert len(args) == len(callable.api_func.argtypes) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if typ is PyObject and is_wrapped: if arg: arg_conv = from_ref(space, arg) else: arg_conv = None else: arg_conv = arg boxed_args += (arg_conv, ) state = space.fromcache(State) try: result = callable(space, *boxed_args) if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, " DONE" except OperationError, e: failed = True state.set_exception(e) except BaseException, e: failed = True state.set_exception(OperationError(space.w_SystemError, space.wrap(str(e)))) if not we_are_translated(): import traceback traceback.print_exc() else: failed = False
|
assert isinstance(result, W_Root)
|
def wrapper(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import BorrowedPair from pypy.module.cpyext.pyobject import NullPointerException # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py rffi.stackcounter.stacks_counter += 1 retval = fatal_value boxed_args = () try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, assert len(args) == len(callable.api_func.argtypes) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if typ is PyObject and is_wrapped: if arg: arg_conv = from_ref(space, arg) else: arg_conv = None else: arg_conv = arg boxed_args += (arg_conv, ) state = space.fromcache(State) try: result = callable(space, *boxed_args) if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, " DONE" except OperationError, e: failed = True state.set_exception(e) except BaseException, e: failed = True state.set_exception(OperationError(space.w_SystemError, space.wrap(str(e)))) if not we_are_translated(): import traceback traceback.print_exc() else: failed = False
|
|
A = POINTER(c_int) * 24
|
A = POINTER(c_long) * 24
|
def test_array_of_pointers(self): # tests array item assignements & pointer.contents = ... A = POINTER(c_int) * 24 a = A() l = c_long(2) p = pointer(l) a[3] = p assert l._objects is None assert p._objects == {'1':l} assert a._objects == {'3':{'1':l}}
|
w_modules = space.sys.get('modules') try: return space.getitem(w_modules, w(fullname)) except OperationError, e: pass
|
def load_module(self, space, fullname): w = space.wrap w_modules = space.sys.get('modules') try: return space.getitem(w_modules, w(fullname)) except OperationError, e: pass filename = self.mangle(fullname) last_exc = None for compiled, is_package, ext in ENUMERATE_EXTS: fname = filename + ext try: zip_file = RZipFile(self.filename, 'r') try: buf = zip_file.read(fname) finally: zip_file.close() except (KeyError, OSError): pass else: if is_package: pkgpath = self.name + os.path.sep + filename else: pkgpath = None try: if compiled: return self.import_pyc_file(space, fullname, fname, buf, pkgpath) else: return self.import_py_file(space, fullname, fname, buf, pkgpath) except OperationError, e: last_exc = e w_mods = space.sys.get('modules') space.call_method(w_mods, 'pop', w(fullname), space.w_None) if last_exc: raise OperationError(self.w_ZipImportError, last_exc.get_w_value(space)) # should never happen I think return space.w_None
|
|
def PyRun_String(space, str, start, w_globals, w_locals):
|
def PyRun_String(space, source, start, w_globals, w_locals):
|
def PyRun_String(space, str, start, w_globals, w_locals): """This is a simplified interface to PyRun_StringFlags() below, leaving flags set to NULL.""" source = rffi.charp2str(str) filename = "<string>" return run_string(space, source, filename, start, w_globals, w_locals)
|
source = rffi.charp2str(str)
|
source = rffi.charp2str(source)
|
def PyRun_String(space, str, start, w_globals, w_locals): """This is a simplified interface to PyRun_StringFlags() below, leaving flags set to NULL.""" source = rffi.charp2str(str) filename = "<string>" return run_string(space, source, filename, start, w_globals, w_locals)
|
srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, forbidden_vars)
|
srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, forbidden_vars, selected_reg=r.r1)
|
def _emit_copystrcontent(self, op, regalloc, fcond, is_unicode): # compute the source address args = list(op.getarglist()) base_loc, box = self._ensure_value_is_boxed(args[0], regalloc, args) args.append(box) ofs_loc, box = self._ensure_value_is_boxed(args[2], regalloc, args) args.append(box) assert args[0] is not args[1] # forbidden case of aliasing regalloc.possibly_free_var(args[0]) if args[3] is not args[2] is not args[4]: # MESS MESS MESS: don't free regalloc.possibly_free_var(args[2]) # it if ==args[3] or args[4] srcaddr_box = TempBox() forbidden_vars = [args[1], args[3], args[4], srcaddr_box] srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, forbidden_vars) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc, is_unicode=is_unicode)
|
forbidden_vars = [args[4], srcaddr_box] dstaddr_box = TempBox() dstaddr_loc = regalloc.force_allocate_reg(dstaddr_box, forbidden_vars)
|
def _emit_copystrcontent(self, op, regalloc, fcond, is_unicode): # compute the source address args = list(op.getarglist()) base_loc, box = self._ensure_value_is_boxed(args[0], regalloc, args) args.append(box) ofs_loc, box = self._ensure_value_is_boxed(args[2], regalloc, args) args.append(box) assert args[0] is not args[1] # forbidden case of aliasing regalloc.possibly_free_var(args[0]) if args[3] is not args[2] is not args[4]: # MESS MESS MESS: don't free regalloc.possibly_free_var(args[2]) # it if ==args[3] or args[4] srcaddr_box = TempBox() forbidden_vars = [args[1], args[3], args[4], srcaddr_box] srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, forbidden_vars) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc, is_unicode=is_unicode)
|
|
return "", endingpos
|
return u"", endingpos
|
def errorhandler(errors, enc, msg, t, startingpos, endingpos): called[0] += 1 if called[0] == 1: assert errors == "foo!" assert enc == encoding assert t is s assert start == startingpos assert stop == endingpos return u"42424242", stop return "", endingpos
|
func._annspecialcase_ = 'specialize:memo()'
|
func._annspecialcase_ = 'specialize:memo'
|
def decorated_func(func): func._annspecialcase_ = 'specialize:memo()' return func
|
func._annspecialcase_ = 'specialize:ll()'
|
func._annspecialcase_ = 'specialize:ll'
|
def decorated_func(func): func._annspecialcase_ = 'specialize:ll()' return func
|
print line
|
def compute(self, graph): self.links = {} dotgen = DotGen(str(graph.no)) # split over debug_merge_points counter = 0 lines = graph.content.split("\n") lines_so_far = [] for line in lines: line = re.sub('.\[.*\]', '', line) boxes = re.findall('([pif]\d+)', line) for box in boxes: self.links[box] = box if 'debug_merge_point' in line: dotgen.emit_node('node%d' % counter, shape="box", label="\n".join(lines_so_far)) if counter != 0: dotgen.emit_edge('node%d' % (counter - 1), 'node%d' % counter) counter += 1 lines_so_far = [] print line lines_so_far.append(line) dotgen.emit_node('node%d' % counter, shape="box", label="\n".join(lines_so_far)) dotgen.emit_edge('node%d' % (counter - 1), 'node%d' % counter) self.source = dotgen.generate(target=None)
|
|
def test_TypeCheck(self, space, api): assert api.PyObject_TypeCheck(space.wrap(1), space.w_int) assert api.PyObject_TypeCheck(space.wrap(1), api.PyInt_Type) assert api.PyObject_TypeCheck(space.wrap('foo'), space.w_str) assert api.PyObject_TypeCheck(space.wrap('foo'), api.PyString_Type) assert api.PyObject_TypeCheck(space.wrap('foo'), space.w_object) assert api.PyObject_TypeCheck(space.wrap(1L), api.PyLong_Type) assert api.PyObject_TypeCheck(space.wrap(True), api.PyBool_Type) assert api.PyObject_TypeCheck(space.wrap(1.2), api.PyFloat_Type) assert api.PyObject_TypeCheck(space.w_int, api.PyType_Type)
|
def test_TypeCheck(self, space, api): assert api.PyObject_TypeCheck(space.wrap(1), space.w_int) assert api.PyObject_TypeCheck(space.wrap(1), api.PyInt_Type) assert api.PyObject_TypeCheck(space.wrap('foo'), space.w_str) assert api.PyObject_TypeCheck(space.wrap('foo'), api.PyString_Type) assert api.PyObject_TypeCheck(space.wrap('foo'), space.w_object) assert api.PyObject_TypeCheck(space.wrap(1L), api.PyLong_Type) assert api.PyObject_TypeCheck(space.wrap(True), api.PyBool_Type) assert api.PyObject_TypeCheck(space.wrap(1.2), api.PyFloat_Type) assert api.PyObject_TypeCheck(space.w_int, api.PyType_Type)
|
|
class AppTestObjectPrint(AppTestCpythonExtensionBase):
|
class AppTestObject(AppTestCpythonExtensionBase):
|
def test_file_fromstring(self, space, api): filename = rffi.str2charp(str(udir / "_test_file")) mode = rffi.str2charp("wb") w_file = api.PyFile_FromString(filename, mode) rffi.free_charp(filename) rffi.free_charp(mode)
|
result = float(int(t.c_time)) + float(int(t.c_millitm)) * 0.001
|
result = (float(intmask(t.c_time)) + float(intmask(t.c_millitm)) * 0.001)
|
def time_time_llimpl(): void = lltype.nullptr(rffi.VOIDP.TO) result = -1.0 if self.HAVE_GETTIMEOFDAY: t = lltype.malloc(self.TIMEVAL, flavor='raw')
|
identifier = self.code.identifier if Function._all.get(identifier, self) is not self: print "builtin code identifier %s used twice: %s and %s" % ( identifier, self, Function._all[identifier])
|
def _freeze_(self): from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): identifier = self.code.identifier if Function._all.get(identifier, self) is not self: print "builtin code identifier %s used twice: %s and %s" % ( identifier, self, Function._all[identifier]) # we have been seen by other means so rtyping should not choke # on us Function._all[identifier] = self return False
|
|
Function._all[identifier] = self
|
Function._all[self.code.identifier] = self
|
def _freeze_(self): from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): identifier = self.code.identifier if Function._all.get(identifier, self) is not self: print "builtin code identifier %s used twice: %s and %s" % ( identifier, self, Function._all[identifier]) # we have been seen by other means so rtyping should not choke # on us Function._all[identifier] = self return False
|
return w_self.space.wrap(w_self.val.__dict__)
|
try: d = w_self.val.__dict__ except AttributeError: return W_Object.getdict(w_self) return w_self.space.wrap(d)
|
def getdict(w_self): return w_self.space.wrap(w_self.val.__dict__)
|
assert info.backend_no == 1
|
assert info.backend_no == 2
|
def f(n): i = 0 while i < n: mydriver.can_enter_jit(i=i, n=n) mydriver.jit_merge_point(i=i, n=n) i += 1
|
assert info.opt_ops == 6 assert info.opt_guards == 1
|
assert info.opt_ops == 11 assert info.opt_guards == 2
|
def f(n): i = 0 while i < n: mydriver.can_enter_jit(i=i, n=n) mydriver.jit_merge_point(i=i, n=n) i += 1
|
if untag(modifier._gettagged(b2s))[0] == -2: expected = [b2new, b4new] + b4set + b2set else: expected = [b4new, b2new] + b2set + b4set for x, y in zip(expected, trace): assert x == y
|
expected = [b2new, b4new] + b4set + b2set assert len(trace) == len(expected) for x in trace: assert x in expected expected.remove(x)
|
def test_virtual_adder_make_virtual(): b2s, b3s, b4s, b5s = [BoxPtr(), BoxInt(3), BoxPtr(), BoxPtr()] c1s = ConstInt(111) storage = Storage() memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) modifier.liveboxes_from_env = {} modifier.liveboxes = {} modifier.vfieldboxes = {} v4 = VirtualValue(fakeoptimizer, ConstAddr(LLtypeMixin.node_vtable_adr2, LLtypeMixin.cpu), b4s) v4.setfield(LLtypeMixin.nextdescr, OptValue(b2s)) v4.setfield(LLtypeMixin.valuedescr, OptValue(b3s)) v4.setfield(LLtypeMixin.otherdescr, OptValue(b5s)) v4._cached_sorted_fields = [LLtypeMixin.nextdescr, LLtypeMixin.valuedescr, LLtypeMixin.otherdescr] v2 = VirtualValue(fakeoptimizer, ConstAddr(LLtypeMixin.node_vtable_adr, LLtypeMixin.cpu), b2s) v2.setfield(LLtypeMixin.nextdescr, v4) v2.setfield(LLtypeMixin.valuedescr, OptValue(c1s)) v2._cached_sorted_fields = [LLtypeMixin.nextdescr, LLtypeMixin.valuedescr] modifier.register_virtual_fields(b2s, [b4s, c1s]) modifier.register_virtual_fields(b4s, [b2s, b3s, b5s]) values = {b2s: v2, b4s: v4} liveboxes = [] modifier._number_virtuals(liveboxes, values, 0) storage.rd_consts = memo.consts[:] storage.rd_numb = None # resume b3t, b5t = [BoxInt(33), BoxPtr(demo55o)] newboxes = _resume_remap(liveboxes, [#b2s -- virtual b3s, #b4s -- virtual #b2s -- again, shared #b3s -- again, shared b5s], b3t, b5t) metainterp = MyMetaInterp() reader = ResumeDataFakeReader(storage, newboxes, metainterp) assert len(reader.virtuals_cache) == 2 b2t = reader.decode_ref(modifier._gettagged(b2s)) b4t = reader.decode_ref(modifier._gettagged(b4s)) trace = metainterp.trace b2new = (rop.NEW_WITH_VTABLE, [ConstAddr(LLtypeMixin.node_vtable_adr, LLtypeMixin.cpu)], b2t, None) b4new = (rop.NEW_WITH_VTABLE, [ConstAddr(LLtypeMixin.node_vtable_adr2, LLtypeMixin.cpu)], b4t, None) b2set = [(rop.SETFIELD_GC, [b2t, b4t], None, LLtypeMixin.nextdescr), (rop.SETFIELD_GC, [b2t, c1s], None, LLtypeMixin.valuedescr)] b4set = [(rop.SETFIELD_GC, [b4t, b2t], None, LLtypeMixin.nextdescr), (rop.SETFIELD_GC, [b4t, b3t], None, LLtypeMixin.valuedescr), (rop.SETFIELD_GC, [b4t, b5t], None, LLtypeMixin.otherdescr)] if untag(modifier._gettagged(b2s))[0] == -2: expected = [b2new, b4new] + b4set + b2set else: expected = [b4new, b2new] + b2set + b4set for x, y in zip(expected, trace): assert x == y ptr = b2t.value._obj.container._as_ptr() assert lltype.typeOf(ptr) == lltype.Ptr(LLtypeMixin.NODE) assert ptr.value == 111 ptr2 = ptr.next ptr2 = lltype.cast_pointer(lltype.Ptr(LLtypeMixin.NODE2), ptr2) assert ptr2.other == demo55 assert ptr2.parent.value == 33 assert ptr2.parent.next == ptr
|
error = lltype.nullptr(PyObject.TO)
|
error = lltype.nullptr(restype.TO)
|
def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, external=True): """ Declares a function to be exported. - `argtypes`, `restype` are lltypes and describe the function signature. - `error` is the value returned when an applevel exception is raised. The special value 'CANNOT_FAIL' (also when restype is Void) turns an eventual exception into a wrapped SystemError. Unwrapped exceptions also cause a SytemError. - set `external` to False to get a C function pointer, but not exported by the API headers. """ if error is _NOT_SPECIFIED: if restype is PyObject: error = lltype.nullptr(PyObject.TO) elif restype is lltype.Void: error = CANNOT_FAIL if type(error) is int: error = rffi.cast(restype, error) def decorate(func): func_name = func.func_name if external: c_name = None else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, c_name=c_name) func.api_func = api_function if external: assert func_name not in FUNCTIONS, ( "%s already registered" % func_name) if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) def make_unwrapper(catch_exception): names = api_function.argnames types_names_enum_ui = unrolling_iterable(enumerate( zip(api_function.argtypes, [tp_name.startswith("w_") for tp_name in names]))) @specialize.ll() def unwrapper(space, *args): from pypy.module.cpyext.pyobject import Py_DecRef from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import BorrowPair newargs = () to_decref = [] assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: # build a reference if input_arg is None: arg = lltype.nullptr(PyObject.TO) elif isinstance(input_arg, W_Root): ref = make_ref(space, input_arg) to_decref.append(ref) arg = rffi.cast(ARG, ref) else: arg = input_arg elif is_PyObject(ARG) and is_wrapped: # convert to a wrapped object if input_arg is None: arg = input_arg elif isinstance(input_arg, W_Root): arg = input_arg else: arg = from_ref(space, rffi.cast(PyObject, input_arg)) else: arg = input_arg newargs += (arg, ) try: try: res = func(space, *newargs) except OperationError, e: if not catch_exception: raise if not hasattr(api_function, "error_value"): raise state = space.fromcache(State) state.set_exception(e) if restype is PyObject: return None else: return api_function.error_value if res is None: return None elif isinstance(res, BorrowPair): return res.w_borrowed else: return res finally: for arg in to_decref: Py_DecRef(space, arg) unwrapper.func = func unwrapper.api_func = api_function unwrapper._always_inline_ = True return unwrapper unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) if external: FUNCTIONS[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. return decorate
|
if restype is PyObject:
|
if is_PyObject(restype):
|
def unwrapper(space, *args): from pypy.module.cpyext.pyobject import Py_DecRef from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import BorrowPair newargs = () to_decref = [] assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: # build a reference if input_arg is None: arg = lltype.nullptr(PyObject.TO) elif isinstance(input_arg, W_Root): ref = make_ref(space, input_arg) to_decref.append(ref) arg = rffi.cast(ARG, ref) else: arg = input_arg elif is_PyObject(ARG) and is_wrapped: # convert to a wrapped object if input_arg is None: arg = input_arg elif isinstance(input_arg, W_Root): arg = input_arg else: arg = from_ref(space, rffi.cast(PyObject, input_arg)) else: arg = input_arg newargs += (arg, ) try: try: res = func(space, *newargs) except OperationError, e: if not catch_exception: raise if not hasattr(api_function, "error_value"): raise state = space.fromcache(State) state.set_exception(e) if restype is PyObject: return None else: return api_function.error_value if res is None: return None elif isinstance(res, BorrowPair): return res.w_borrowed else: return res finally: for arg in to_decref: Py_DecRef(space, arg)
|
if typ is PyObject and is_wrapped:
|
if is_PyObject(typ) and is_wrapped:
|
def wrapper(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import BorrowPair # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py rffi.stackcounter.stacks_counter += 1 retval = fatal_value boxed_args = () try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, assert len(args) == len(callable.api_func.argtypes) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if typ is PyObject and is_wrapped: if arg: arg_conv = from_ref(space, arg) else: arg_conv = None else: arg_conv = arg boxed_args += (arg_conv, ) state = space.fromcache(State) try: result = callable(space, *boxed_args) if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, " DONE" except OperationError, e: failed = True state.set_exception(e) except BaseException, e: failed = True if not we_are_translated(): message = repr(e) import traceback traceback.print_exc() else: message = str(e) state.set_exception(OperationError(space.w_SystemError, space.wrap(message))) else: failed = False
|
arg_conv = from_ref(space, arg)
|
arg_conv = from_ref(space, rffi.cast(PyObject, arg))
|
def wrapper(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import BorrowPair # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py rffi.stackcounter.stacks_counter += 1 retval = fatal_value boxed_args = () try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, assert len(args) == len(callable.api_func.argtypes) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if typ is PyObject and is_wrapped: if arg: arg_conv = from_ref(space, arg) else: arg_conv = None else: arg_conv = arg boxed_args += (arg_conv, ) state = space.fromcache(State) try: result = callable(space, *boxed_args) if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, " DONE" except OperationError, e: failed = True state.set_exception(e) except BaseException, e: failed = True if not we_are_translated(): message = repr(e) import traceback traceback.print_exc() else: message = str(e) state.set_exception(OperationError(space.w_SystemError, space.wrap(message))) else: failed = False
|
assert dlog[1] == ('jit-backend-counts', [('debug_print', '0:10')])
|
assert ('jit-backend-counts', [('debug_print', '0:10')]) in dlog
|
def test_debugger_on(self): from pypy.tool.logparser import parse_log_file, extract_category from pypy.rlib import debug loop = """ [i0] debug_merge_point('xyz', 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) guard_false(i2) [] jump(i1) """ ops = parse(loop) debug._log = dlog = debug.DebugLog() try: self.cpu.assembler.set_debug(True) self.cpu.compile_loop(ops.inputargs, ops.operations, ops.token) self.cpu.set_future_value_int(0, 0) self.cpu.execute_token(ops.token) # check debugging info name, struct = self.cpu.assembler.loop_run_counters[0] assert name == 0 # 'xyz' assert struct.i == 10 self.cpu.finish_once() finally: debug._log = None assert dlog[1] == ('jit-backend-counts', [('debug_print', '0:10')])
|
fclose = rffi.llexternal('fopen', [FILEP], rffi.INT)
|
fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
|
def test_opaque_obj_2(self): FILEP = rffi.COpaquePtr('FILE') fopen = rffi.llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) fclose = rffi.llexternal('fopen', [FILEP], rffi.INT) tmppath = udir.join('test_ll2ctypes.test_opaque_obj_2') ll_file = fopen(str(tmppath), "w") assert ll_file fclose(ll_file) assert tmppath.check(file=1) #assert not ALLOCATED --- fails, because ll2ctypes misses the # fact that fclose() frees 'll_file'
|
class_name = w_arg.w_class.getname(space, '?')
|
class_name = space.type(w_arg).getname(space, '?')
|
def create_spec(space, w_arg): if isinstance(w_arg, Method): w_function = w_arg.w_function class_name = w_arg.w_class.getname(space, '?') if isinstance(w_function, Function): name = w_function.name else: name = '?' return "{method '%s' of '%s' objects}" % (name, class_name) elif isinstance(w_arg, Function): if w_arg.w_module is None: module = '' else: module = space.str_w(w_arg.w_module) if module == '__builtin__': module = '' else: module += '.' return '{%s%s}' % (module, w_arg.name) else: class_name = w_arg.w_class.getname(space, '?') return "{'%s' object}" % (class_name,)
|
descr._arm_guard_pos = self.mc.currpos()
|
def _emit_guard(self, op, arglocs, fcond, save_exc=False): descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) self.guard_descrs.append(descr) if not we_are_translated() and hasattr(op, 'getfailargs'): print 'Failargs: ', op.getfailargs()
|
|
return self._emit_guard(op, locs[2:], c.EQ)
|
def _cmp_guard_class(self, op, locs, regalloc, fcond): offset = locs[2] if offset is not None: if offset.is_imm(): self.mc.LDR_ri(r.ip.value, locs[0].value, offset.value) else: assert offset.is_reg() self.mc.LDR_rr(r.ip.value, locs[0].value, offset.value) self.mc.CMP_rr(r.ip.value, locs[1].value) else: raise NotImplementedError # XXX port from x86 backend once gc support is in place
|
|
srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, forbidden_vars, selected_reg=r.r1)
|
srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, selected_reg=r.r1)
|
def _emit_copystrcontent(self, op, regalloc, fcond, is_unicode): # compute the source address args = list(op.getarglist()) base_loc, box = self._ensure_value_is_boxed(args[0], regalloc, args) args.append(box) ofs_loc, box = self._ensure_value_is_boxed(args[2], regalloc, args) args.append(box) assert args[0] is not args[1] # forbidden case of aliasing regalloc.possibly_free_var(args[0]) if args[3] is not args[2] is not args[4]: # MESS MESS MESS: don't free regalloc.possibly_free_var(args[2]) # it if ==args[3] or args[4] srcaddr_box = TempBox() forbidden_vars = [args[1], args[3], args[4], srcaddr_box] srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, forbidden_vars, selected_reg=r.r1) self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc, is_unicode=is_unicode)
|
f.write("{\n") for sym in self.export_symbols: f.write("%s;\n" % (sym,)) f.write("};")
|
if host.name.startswith('darwin'): for sym in self.export_symbols: f.write("_%s\n" % (sym,)) d['link_extra'] += ("-Wl,-exported_symbols_list,"+str(file_name), ) else: f.write("{\n") for sym in self.export_symbols: f.write("%s;\n" % (sym,)) f.write("};") d['link_extra'] += ("-Wl,--dynamic-list=" + str(file_name), )
|
def convert_exportsymbols_to_file(self): if not self.export_symbols: return self num = 0 while 1: file_name = udir.join('dynamic-symbols-%i' % num) num += 1 if not file_name.check(): break
|
d = self._copy_attributes() d['link_extra'] += ("-Wl,--dynamic-list=" + str(file_name), )
|
def convert_exportsymbols_to_file(self): if not self.export_symbols: return self num = 0 while 1: file_name = udir.join('dynamic-symbols-%i' % num) num += 1 if not file_name.check(): break
|
|
raises(RuntimeError, module.return_invalid_pointer)
|
def test_export_function2(self): import sys init = """ if (Py_IsInitialized()) Py_InitModule("foo", methods); """ body = """ static PyObject* my_objects[1]; static PyObject* foo_cached_pi(PyObject* self, PyObject *args) { if (my_objects[0] == NULL) { my_objects[0] = PyFloat_FromDouble(3.14); Py_INCREF(my_objects[0]); } return my_objects[0]; } static PyObject* foo_drop_pi(PyObject* self, PyObject *args) { if (my_objects[0] != NULL) { Py_DECREF(my_objects[0]); my_objects[0] = NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject* foo_retinvalid(PyObject* self, PyObject *args) { return (PyObject*)0xAFFEBABE; } static PyMethodDef methods[] = { { "return_pi", foo_cached_pi, METH_NOARGS }, { "drop_pi", foo_drop_pi, METH_NOARGS }, { "return_invalid_pointer", foo_retinvalid, METH_NOARGS }, { NULL } }; """ module = self.import_module(name='foo', init=init, body=body) raises(RuntimeError, module.return_invalid_pointer) assert module.return_pi() == 3.14 module.drop_pi() module.drop_pi() assert module.return_pi() == 3.14 assert module.return_pi() == 3.14
|
|
py.test.skip("Useful to see how programming errors look like")
|
skip("Useful to see how programming errors look like")
|
def test_internal_exceptions(self): py.test.skip("Useful to see how programming errors look like") import sys init = """ if (Py_IsInitialized()) Py_InitModule("foo", methods); """ body = """ static PyObject* foo_crash1(PyObject* self, PyObject *args) { return PyPy_Crash1(); } static PyObject* foo_crash2(PyObject* self, PyObject *args) { int a = PyPy_Crash2(); if (a == -1) return NULL; return PyFloat_FromDouble(a); } static PyMethodDef methods[] = { { "crash1", foo_crash1, METH_NOARGS }, { "crash2", foo_crash2, METH_NOARGS }, { NULL } }; """ module = self.import_module(name='foo', init=init, body=body) module.crash1() module.crash2()
|
if sys.platform == "win32": encoding = "mbcs" elif sys.platform == "darwin": encoding = "utf-8" else: encoding = None
|
encoding = base_encoding
|
def _getfilesystemencoding(space): if sys.platform == "win32": encoding = "mbcs" elif sys.platform == "darwin": encoding = "utf-8" else: encoding = None # CPython does this at startup time, I don't thing it matter that much if rlocale.HAVE_LANGINFO and rlocale.CODESET: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) rlocale.setlocale(rlocale.LC_CTYPE, "") loc_codeset = rlocale.nl_langinfo(rlocale.CODESET) if loc_codeset: codecmod = space.getbuiltinmodule('_codecs') w_res = space.call_function(space.getattr(codecmod, space.wrap('lookup')), space.wrap(loc_codeset)) if space.is_true(w_res): encoding = loc_codeset return encoding
|
return (self.shared_only + ['-bundle', '-undefined', 'dynamic_lookup']
|
return (self.shared_only + ['-dynamiclib', '-undefined', 'dynamic_lookup']
|
def _args_for_shared(self, args): return (self.shared_only + ['-bundle', '-undefined', 'dynamic_lookup'] + args)
|
return []
|
return ['/usr/lib/']
|
def library_dirs_for_libffi(self): return []
|
w_subclass._version_tag = None
|
if isinstance(w_type, W_TypeObject): w_subclass._version_tag = None
|
def descr_set__bases__(space, w_type, w_value): # this assumes all app-level type objects are W_TypeObject from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.typeobject import check_and_find_best_base from pypy.objspace.std.typeobject import get_parent_layout from pypy.objspace.std.typeobject import is_mro_purely_of_types w_type = _check(space, w_type) if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, "can't set %s.__bases__", w_type.name) if not space.is_true(space.isinstance(w_value, space.w_tuple)): raise operationerrfmt(space.w_TypeError, "can only assign tuple to %s.__bases__, not %s", w_type.name, space.type(w_value).getname(space, '?')) newbases_w = space.fixedview(w_value) if len(newbases_w) == 0: raise operationerrfmt(space.w_TypeError, "can only assign non-empty tuple to %s.__bases__, not ()", w_type.name) for w_newbase in newbases_w: if isinstance(w_newbase, W_TypeObject): if w_type in w_newbase.compute_default_mro(): raise OperationError(space.w_TypeError, space.wrap("a __bases__ item causes" " an inheritance cycle")) w_oldbestbase = check_and_find_best_base(space, w_type.bases_w) w_newbestbase = check_and_find_best_base(space, newbases_w) oldlayout = w_oldbestbase.get_full_instance_layout() newlayout = w_newbestbase.get_full_instance_layout() if oldlayout != newlayout: raise operationerrfmt(space.w_TypeError, "__bases__ assignment: '%s' object layout" " differs from '%s'", w_newbestbase.getname(space, '?'), w_oldbestbase.getname(space, '?')) # invalidate the version_tag of all the current subclasses w_type.mutated() # now we can go ahead and change 'w_type.bases_w' saved_bases_w = w_type.bases_w temp = [] try: for w_oldbase in saved_bases_w: if isinstance(w_oldbase, W_TypeObject): w_oldbase.remove_subclass(w_type) w_type.bases_w = newbases_w for w_newbase in newbases_w: if isinstance(w_newbase, W_TypeObject): w_newbase.add_subclass(w_type) # try to recompute all MROs mro_subclasses(space, w_type, temp) except: for cls, old_mro in temp: cls.mro_w = old_mro w_type.bases_w = saved_bases_w raise if not is_mro_purely_of_types(w_type.mro_w): # Disable method cache if the hierarchy isn't pure. w_type._version_tag = None for w_subclass in w_type.get_subclasses(): w_subclass._version_tag = None assert w_type.w_same_layout_as is get_parent_layout(w_type) # invariant
|
msg = ("exceptions must be classes, or instances," "or strings (deprecated) not %s" % (instclassname,))
|
msg = ("exceptions must be classes, or instances, " "or strings (deprecated), not %s" % (instclassname,))
|
def normalize_exception(self, space): """Normalize the OperationError. In other words, fix w_type and/or w_value to make sure that the __class__ of w_value is exactly w_type. """ # # This method covers all ways in which the Python statement # "raise X, Y" can produce a valid exception type and instance. # # In the following table, 'Class' means a subclass of BaseException # and 'inst' is an instance of either 'Class' or a subclass of it. # Or 'Class' can also be an old-style class and 'inst' an old-style # instance of it. # # Note that 'space.full_exceptions' is set to False by the flow # object space; in this case we must assume that we are in a # non-advanced case, and ignore the advanced cases. Old-style # classes and instances *are* advanced. # # input (w_type, w_value)... becomes... advanced case? # --------------------------------------------------------------------- # (tuple, w_value) (tuple[0], w_value) yes # (Class, None) (Class, Class()) no # (Class, inst) (inst.__class__, inst) no # (Class, tuple) (Class, Class(*tuple)) yes # (Class, x) (Class, Class(x)) no # ("string", ...) ("string", ...) deprecated # (inst, None) (inst.__class__, inst) no # w_type = self.w_type w_value = self.w_value if space.full_exceptions: while space.is_true(space.isinstance(w_type, space.w_tuple)): w_type = space.getitem(w_type, space.wrap(0))
|
func._annspecialcase_ = 'args' + self._wrap(args)
|
func._annspecialcase_ = 'arg' + self._wrap(args)
|
def decorated_func(func): func._annspecialcase_ = 'args' + self._wrap(args) return func
|
func._annspecialcase_ = 'argtypes' + self._wrap(args)
|
func._annspecialcase_ = 'argtype' + self._wrap(args)
|
def decorated_func(func): func._annspecialcase_ = 'argtypes' + self._wrap(args) return func
|
ops = self.get_by_bytecode("LOAD_GLOBAL")
|
ops = self.get_by_bytecode("LOAD_GLOBAL", True)
|
def main(n): i = 0 while i < n+OFFSET: i = f(f(i)) return i
|
assert ops[0].get_opnames() == ["getfield_gc", "guard_value",
|
assert ops[0].get_opnames() == ["guard_value", "getfield_gc", "guard_value",
|
def main(n): i = 0 while i < n+OFFSET: i = f(f(i)) return i
|
ops = self.get_by_bytecode("CALL_FUNCTION")
|
ops = self.get_by_bytecode("CALL_FUNCTION", True)
|
def main(n): i = 0 while i < n+OFFSET: i = f(f(i)) return i
|
future_features = misc.parse_future(node) info = pyparse.CompileInfo(filename, mode, flags, future_flags)
|
future_pos = misc.parse_future(node) info = pyparse.CompileInfo(filename, mode, flags, future_pos)
|
def compile_ast(self, node, filename, mode, flags): future_features = misc.parse_future(node) info = pyparse.CompileInfo(filename, mode, flags, future_flags) return self._compile_ast(node, info)
|
if platform.machine() == 'i386': if sys.maxint <= 2147483647: host_factory = Darwin_i386 else: host_factory = Darwin_x86_64 else: host_factory = Darwin
|
assert platform.machine() in ('i386', 'x86_64') if sys.maxint <= 2147483647: host_factory = Darwin_i386 else: host_factory = Darwin_x86_64
|
def check___thread(self): return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.