rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
if w_stararg is not None and space.is_true(w_stararg):
if w_stararg is not None:
def __init__(self, space, args_w, keywords=None, keywords_w=None, w_stararg=None, w_starstararg=None): self.space = space assert isinstance(args_w, list) self.arguments_w = args_w self.keywords = keywords self.keywords_w = keywords_w if keywords is not None: assert keywords_w is not None assert len(keywords_w) == len(keywords) make_sure_not_resized(self.keywords) make_sure_not_resized(self.keywords_w)
if w_starstararg is not None and space.is_true(w_starstararg):
if w_starstararg is not None:
def __init__(self, space, args_w, keywords=None, keywords_w=None, w_stararg=None, w_starstararg=None): self.space = space assert isinstance(args_w, list) self.arguments_w = args_w self.keywords = keywords self.keywords_w = keywords_w if keywords is not None: assert keywords_w is not None assert len(keywords_w) == len(keywords) make_sure_not_resized(self.keywords) make_sure_not_resized(self.keywords_w)
for func in targets: graph = getattr(func._obj, 'graph', None)
for graph in targets:
def generic_call(self, FUNC, fnexpr, args_v, v_result, targets=None): args = [] assert len(args_v) == len(FUNC.TO.ARGS) for v, ARGTYPE in zip(args_v, FUNC.TO.ARGS): if ARGTYPE is Void: continue # skip 'void' argument args.append(self.expr(v)) # special case for rctypes: by-value container args: # XXX is this still needed now that rctypes is gone if isinstance(ARGTYPE, ContainerType): args[-1] = '*%s' % (args[-1],)
op.args[1:], op.result, [fn.value])
op.args[1:], op.result, targets)
def OP_DIRECT_CALL(self, op): fn = op.args[0] return self.generic_call(fn.concretetype, self.expr(fn), op.args[1:], op.result, [fn.value])
elif letter == "d" or letter == "g":
elif letter == "d":
def unwrap_value(space, push_func, add_arg, argdesc, letter, w_arg): w = space.wrap if letter in TYPEMAP_PTR_LETTERS: # check for NULL ptr datainstance = space.interpclass_w(w_arg) if isinstance(datainstance, W_DataInstance): ptr = datainstance.ll_buffer else: ptr = unwrap_truncate_int(rffi.VOIDP, space, w_arg) push_func(add_arg, argdesc, ptr) elif letter == "d" or letter == "g": push_func(add_arg, argdesc, space.float_w(w_arg)) elif letter == "f": push_func(add_arg, argdesc, rffi.cast(rffi.FLOAT, space.float_w(w_arg))) elif letter == "c": s = space.str_w(w_arg) if len(s) != 1: raise OperationError(space.w_TypeError, w( "Expected string of length one as character")) val = s[0] push_func(add_arg, argdesc, val) elif letter == 'u': s = space.unicode_w(w_arg) if len(s) != 1: raise OperationError(space.w_TypeError, w( "Expected unicode string of length one as wide character")) val = s[0] push_func(add_arg, argdesc, val) else: for c in unroll_letters_for_numbers: if letter == c: TP = LL_TYPEMAP[c] val = unwrap_truncate_int(TP, space, w_arg) push_func(add_arg, argdesc, val) return else: raise OperationError(space.w_TypeError, space.wrap("cannot directly write value"))
assert out.getvalue == "None"
assert out.getvalue() == "None\n"
def test_print_function(self): import __builtin__ import sys import StringIO pr = getattr(__builtin__, "print") save = sys.stdout out = sys.stdout = StringIO.StringIO() try: pr("Hello,", "person!") finally: sys.stdout = save assert out.getvalue() == "Hello, person!\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out) assert out.getvalue() == "Hello, person!\n" out = StringIO.StringIO() pr("Hello,", "person!", file=out, end="") assert out.getvalue() == "Hello, person!" out = StringIO.StringIO() pr("Hello,", "person!", file=out, sep="X") assert out.getvalue() == "Hello,Xperson!\n" out = StringIO.StringIO() pr(u"Hello,", u"person!", file=out) result = out.getvalue() assert isinstance(result, unicode) assert result == u"Hello, person!\n" pr("Hello", file=None) # This works. out = StringIO.StringIO() pr(None, file=out) assert out.getvalue == "None"
self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value)
self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable)
def set_vtable(self, loc, loc_vtable): if self.cpu.vtable_offset is not None: assert isinstance(loc, RegLoc) assert isinstance(loc_vtable, ImmedLoc) self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value)
optimizer = Optimizer(metainterp_sd, loop, optimizations, not_a_bridge=True)
optimizer = Optimizer(metainterp_sd, loop, optimizations)
def optimize_loop(self, ops, optops): loop = self.parse(ops) # self.loop = loop metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo # # XXX list the exact optimizations that are needed for each test from pypy.jit.metainterp.optimizeopt import (OptIntBounds, OptRewrite, OptVirtualize, OptString, OptHeap, OptFfiCall, Optimizer) optimizations = [OptIntBounds(), OptRewrite(), OptVirtualize(), OptString(), OptHeap(), OptFfiCall(), ] optimizer = Optimizer(metainterp_sd, loop, optimizations, not_a_bridge=True) optimizer.propagate_all_forward() # expected = self.parse(optops) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected)
def test_interp2app_unwrap_spec_path(self):
def test_interp2app_unwrap_spec_path(self, monkeypatch):
def test_interp2app_unwrap_spec_path(self): space = self.space def g(space, p): return p
parts = name.split(os.path.sep)
parts_ends = [i for i in range(0, len(name)) if name[i] == os.path.sep or name[i] == ZIPSEP] parts_ends.append(len(name))
def descr_new_zipimporter(space, w_type, name): w = space.wrap w_ZipImportError = space.getattr(space.getbuiltinmodule('zipimport'), w('ZipImportError')) ok = False parts = name.split(os.path.sep) filename = "" # make annotator happy for i in range(1, len(parts) + 1): filename = os.path.sep.join(parts[:i]) if not filename: filename = os.path.sep try: s = os.stat(filename) except OSError: raise operationerrfmt(w_ZipImportError, "Cannot find name %s", filename) if not stat.S_ISDIR(s.st_mode): ok = True break if not ok: raise operationerrfmt(w_ZipImportError, "Did not find %s to be a valid zippath", name) try: w_result = zip_cache.get(filename) if w_result is None: raise operationerrfmt(w_ZipImportError, "Cannot import %s from zipfile, recursion detected or" "already tried and failed", name) return w_result except KeyError: zip_cache.cache[filename] = None try: zip_file = RZipFile(filename, 'r') except (BadZipfile, OSError): raise operationerrfmt(w_ZipImportError, "%s seems not to be a zipfile", filename) zip_file.close() prefix = name[len(filename):] if prefix.startswith(os.sep): prefix = prefix[1:] w_result = space.wrap(W_ZipImporter(space, name, filename, zip_file.NameToInfo, prefix)) zip_cache.set(filename, w_result) return w_result
for i in range(1, len(parts) + 1): filename = os.path.sep.join(parts[:i])
for i in parts_ends: filename = name[:i]
def descr_new_zipimporter(space, w_type, name): w = space.wrap w_ZipImportError = space.getattr(space.getbuiltinmodule('zipimport'), w('ZipImportError')) ok = False parts = name.split(os.path.sep) filename = "" # make annotator happy for i in range(1, len(parts) + 1): filename = os.path.sep.join(parts[:i]) if not filename: filename = os.path.sep try: s = os.stat(filename) except OSError: raise operationerrfmt(w_ZipImportError, "Cannot find name %s", filename) if not stat.S_ISDIR(s.st_mode): ok = True break if not ok: raise operationerrfmt(w_ZipImportError, "Did not find %s to be a valid zippath", name) try: w_result = zip_cache.get(filename) if w_result is None: raise operationerrfmt(w_ZipImportError, "Cannot import %s from zipfile, recursion detected or" "already tried and failed", name) return w_result except KeyError: zip_cache.cache[filename] = None try: zip_file = RZipFile(filename, 'r') except (BadZipfile, OSError): raise operationerrfmt(w_ZipImportError, "%s seems not to be a zipfile", filename) zip_file.close() prefix = name[len(filename):] if prefix.startswith(os.sep): prefix = prefix[1:] w_result = space.wrap(W_ZipImporter(space, name, filename, zip_file.NameToInfo, prefix)) zip_cache.set(filename, w_result) return w_result
)
ifdef='AF_PACKET')
'typedef unsigned __int32 uint32_t;',
('ifr_name', rffi.CFixedArray(rffi.CHAR, 8))])
('ifr_name', rffi.CFixedArray(rffi.CHAR, 8))], ifdef='AF_PACKET')
'typedef unsigned __int32 uint32_t;',
ioctl = external('ioctl', [socketfd_type, rffi.INT, lltype.Ptr(ifreq)], rffi.INT)
if ifreq is not None: ioctl = external('ioctl', [socketfd_type, rffi.INT, lltype.Ptr(ifreq)], rffi.INT)
def external_c(name, args, result): return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv='c')
newsize = base.read_from_env('PYPY_GC_NURSERY')
newsize = env.read_from_env('PYPY_GC_NURSERY')
def setup(self): """Called at run-time to initialize the GC.""" # # Hack: MovingGCBase.setup() sets up stuff related to id(), which # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # # A list of all objects with finalizers (never in the nursery). self.objects_with_finalizers = self.AddressDeque() # # Two lists of the objects with weakrefs. No weakref can be an # old object weakly pointing to a young object: indeed, weakrefs # are immutable so they cannot point to an object that was # created after it. self.young_objects_with_weakrefs = self.AddressStack() self.old_objects_with_weakrefs = self.AddressStack() # # Support for id and identityhash: map nursery objects with # GCFLAG_HAS_SHADOW to their future location at the next # minor collection. self.young_objects_shadows = self.AddressDict() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look # up the env var, which requires the GC; and then really # allocate the nursery of the final size. if not self.read_from_env: self.allocate_nursery() else: # defaultsize = self.nursery_size minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # # From there on, the GC is fully initialized and the code # below can use it newsize = base.read_from_env('PYPY_GC_NURSERY') # PYPY_GC_NURSERY=1 forces a minor collect for every malloc. # Useful to debug external factors, like trackgcroot or the # handling of the write barrier. self.debug_always_do_minor_collect = newsize == 1 if newsize <= 0: newsize = env.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll > 1.0: self.major_collection_threshold = major_coll # growth = base.read_float_from_env('PYPY_GC_GROWTH') if growth > 1.0: self.growth_rate_max = growth # min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') if min_heap_size > 0: self.min_heap_size = float(min_heap_size) else: # defaults to 8 times the nursery self.min_heap_size = newsize * 8 # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # max_delta = base.read_uint_from_env('PYPY_GC_MAX_DELTA') if max_delta > 0: self.max_delta = float(max_delta) else: self.max_delta = 0.125 * env.get_total_memory() # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery()
major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT')
major_coll = env.read_float_from_env('PYPY_GC_MAJOR_COLLECT')
def setup(self): """Called at run-time to initialize the GC.""" # # Hack: MovingGCBase.setup() sets up stuff related to id(), which # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # # A list of all objects with finalizers (never in the nursery). self.objects_with_finalizers = self.AddressDeque() # # Two lists of the objects with weakrefs. No weakref can be an # old object weakly pointing to a young object: indeed, weakrefs # are immutable so they cannot point to an object that was # created after it. self.young_objects_with_weakrefs = self.AddressStack() self.old_objects_with_weakrefs = self.AddressStack() # # Support for id and identityhash: map nursery objects with # GCFLAG_HAS_SHADOW to their future location at the next # minor collection. self.young_objects_shadows = self.AddressDict() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look # up the env var, which requires the GC; and then really # allocate the nursery of the final size. if not self.read_from_env: self.allocate_nursery() else: # defaultsize = self.nursery_size minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # # From there on, the GC is fully initialized and the code # below can use it newsize = base.read_from_env('PYPY_GC_NURSERY') # PYPY_GC_NURSERY=1 forces a minor collect for every malloc. # Useful to debug external factors, like trackgcroot or the # handling of the write barrier. self.debug_always_do_minor_collect = newsize == 1 if newsize <= 0: newsize = env.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll > 1.0: self.major_collection_threshold = major_coll # growth = base.read_float_from_env('PYPY_GC_GROWTH') if growth > 1.0: self.growth_rate_max = growth # min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') if min_heap_size > 0: self.min_heap_size = float(min_heap_size) else: # defaults to 8 times the nursery self.min_heap_size = newsize * 8 # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # max_delta = base.read_uint_from_env('PYPY_GC_MAX_DELTA') if max_delta > 0: self.max_delta = float(max_delta) else: self.max_delta = 0.125 * env.get_total_memory() # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery()
growth = base.read_float_from_env('PYPY_GC_GROWTH')
growth = env.read_float_from_env('PYPY_GC_GROWTH')
def setup(self): """Called at run-time to initialize the GC.""" # # Hack: MovingGCBase.setup() sets up stuff related to id(), which # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # # A list of all objects with finalizers (never in the nursery). self.objects_with_finalizers = self.AddressDeque() # # Two lists of the objects with weakrefs. No weakref can be an # old object weakly pointing to a young object: indeed, weakrefs # are immutable so they cannot point to an object that was # created after it. self.young_objects_with_weakrefs = self.AddressStack() self.old_objects_with_weakrefs = self.AddressStack() # # Support for id and identityhash: map nursery objects with # GCFLAG_HAS_SHADOW to their future location at the next # minor collection. self.young_objects_shadows = self.AddressDict() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look # up the env var, which requires the GC; and then really # allocate the nursery of the final size. if not self.read_from_env: self.allocate_nursery() else: # defaultsize = self.nursery_size minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # # From there on, the GC is fully initialized and the code # below can use it newsize = base.read_from_env('PYPY_GC_NURSERY') # PYPY_GC_NURSERY=1 forces a minor collect for every malloc. # Useful to debug external factors, like trackgcroot or the # handling of the write barrier. self.debug_always_do_minor_collect = newsize == 1 if newsize <= 0: newsize = env.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll > 1.0: self.major_collection_threshold = major_coll # growth = base.read_float_from_env('PYPY_GC_GROWTH') if growth > 1.0: self.growth_rate_max = growth # min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') if min_heap_size > 0: self.min_heap_size = float(min_heap_size) else: # defaults to 8 times the nursery self.min_heap_size = newsize * 8 # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # max_delta = base.read_uint_from_env('PYPY_GC_MAX_DELTA') if max_delta > 0: self.max_delta = float(max_delta) else: self.max_delta = 0.125 * env.get_total_memory() # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery()
min_heap_size = base.read_uint_from_env('PYPY_GC_MIN')
min_heap_size = env.read_uint_from_env('PYPY_GC_MIN')
def setup(self): """Called at run-time to initialize the GC.""" # # Hack: MovingGCBase.setup() sets up stuff related to id(), which # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # # A list of all objects with finalizers (never in the nursery). self.objects_with_finalizers = self.AddressDeque() # # Two lists of the objects with weakrefs. No weakref can be an # old object weakly pointing to a young object: indeed, weakrefs # are immutable so they cannot point to an object that was # created after it. self.young_objects_with_weakrefs = self.AddressStack() self.old_objects_with_weakrefs = self.AddressStack() # # Support for id and identityhash: map nursery objects with # GCFLAG_HAS_SHADOW to their future location at the next # minor collection. self.young_objects_shadows = self.AddressDict() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look # up the env var, which requires the GC; and then really # allocate the nursery of the final size. if not self.read_from_env: self.allocate_nursery() else: # defaultsize = self.nursery_size minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # # From there on, the GC is fully initialized and the code # below can use it newsize = base.read_from_env('PYPY_GC_NURSERY') # PYPY_GC_NURSERY=1 forces a minor collect for every malloc. # Useful to debug external factors, like trackgcroot or the # handling of the write barrier. self.debug_always_do_minor_collect = newsize == 1 if newsize <= 0: newsize = env.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll > 1.0: self.major_collection_threshold = major_coll # growth = base.read_float_from_env('PYPY_GC_GROWTH') if growth > 1.0: self.growth_rate_max = growth # min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') if min_heap_size > 0: self.min_heap_size = float(min_heap_size) else: # defaults to 8 times the nursery self.min_heap_size = newsize * 8 # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # max_delta = base.read_uint_from_env('PYPY_GC_MAX_DELTA') if max_delta > 0: self.max_delta = float(max_delta) else: self.max_delta = 0.125 * env.get_total_memory() # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery()
max_heap_size = base.read_uint_from_env('PYPY_GC_MAX')
max_heap_size = env.read_uint_from_env('PYPY_GC_MAX')
def setup(self): """Called at run-time to initialize the GC.""" # # Hack: MovingGCBase.setup() sets up stuff related to id(), which # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # # A list of all objects with finalizers (never in the nursery). self.objects_with_finalizers = self.AddressDeque() # # Two lists of the objects with weakrefs. No weakref can be an # old object weakly pointing to a young object: indeed, weakrefs # are immutable so they cannot point to an object that was # created after it. self.young_objects_with_weakrefs = self.AddressStack() self.old_objects_with_weakrefs = self.AddressStack() # # Support for id and identityhash: map nursery objects with # GCFLAG_HAS_SHADOW to their future location at the next # minor collection. self.young_objects_shadows = self.AddressDict() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look # up the env var, which requires the GC; and then really # allocate the nursery of the final size. if not self.read_from_env: self.allocate_nursery() else: # defaultsize = self.nursery_size minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # # From there on, the GC is fully initialized and the code # below can use it newsize = base.read_from_env('PYPY_GC_NURSERY') # PYPY_GC_NURSERY=1 forces a minor collect for every malloc. # Useful to debug external factors, like trackgcroot or the # handling of the write barrier. self.debug_always_do_minor_collect = newsize == 1 if newsize <= 0: newsize = env.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll > 1.0: self.major_collection_threshold = major_coll # growth = base.read_float_from_env('PYPY_GC_GROWTH') if growth > 1.0: self.growth_rate_max = growth # min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') if min_heap_size > 0: self.min_heap_size = float(min_heap_size) else: # defaults to 8 times the nursery self.min_heap_size = newsize * 8 # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # max_delta = base.read_uint_from_env('PYPY_GC_MAX_DELTA') if max_delta > 0: self.max_delta = float(max_delta) else: self.max_delta = 0.125 * env.get_total_memory() # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery()
max_delta = base.read_uint_from_env('PYPY_GC_MAX_DELTA')
max_delta = env.read_uint_from_env('PYPY_GC_MAX_DELTA')
def setup(self): """Called at run-time to initialize the GC.""" # # Hack: MovingGCBase.setup() sets up stuff related to id(), which # we implement differently anyway. So directly call GCBase.setup(). GCBase.setup(self) # # A list of all raw_malloced objects (the objects too large) self.rawmalloced_objects = self.AddressStack() self.rawmalloced_total_size = r_uint(0) # # A list of all objects with finalizers (never in the nursery). self.objects_with_finalizers = self.AddressDeque() # # Two lists of the objects with weakrefs. No weakref can be an # old object weakly pointing to a young object: indeed, weakrefs # are immutable so they cannot point to an object that was # created after it. self.young_objects_with_weakrefs = self.AddressStack() self.old_objects_with_weakrefs = self.AddressStack() # # Support for id and identityhash: map nursery objects with # GCFLAG_HAS_SHADOW to their future location at the next # minor collection. self.young_objects_shadows = self.AddressDict() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look # up the env var, which requires the GC; and then really # allocate the nursery of the final size. if not self.read_from_env: self.allocate_nursery() else: # defaultsize = self.nursery_size minsize = 2 * (self.nonlarge_gcptrs_max + 1) self.nursery_size = minsize self.allocate_nursery() # # From there on, the GC is fully initialized and the code # below can use it newsize = base.read_from_env('PYPY_GC_NURSERY') # PYPY_GC_NURSERY=1 forces a minor collect for every malloc. # Useful to debug external factors, like trackgcroot or the # handling of the write barrier. self.debug_always_do_minor_collect = newsize == 1 if newsize <= 0: newsize = env.estimate_best_nursery_size() if newsize <= 0: newsize = defaultsize newsize = max(newsize, minsize) # major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT') if major_coll > 1.0: self.major_collection_threshold = major_coll # growth = base.read_float_from_env('PYPY_GC_GROWTH') if growth > 1.0: self.growth_rate_max = growth # min_heap_size = base.read_uint_from_env('PYPY_GC_MIN') if min_heap_size > 0: self.min_heap_size = float(min_heap_size) else: # defaults to 8 times the nursery self.min_heap_size = newsize * 8 # max_heap_size = base.read_uint_from_env('PYPY_GC_MAX') if max_heap_size > 0: self.max_heap_size = float(max_heap_size) # max_delta = base.read_uint_from_env('PYPY_GC_MAX_DELTA') if max_delta > 0: self.max_delta = float(max_delta) else: self.max_delta = 0.125 * env.get_total_memory() # self.minor_collection() # to empty the nursery llarena.arena_free(self.nursery) self.nursery_size = newsize self.allocate_nursery()
if hasattr(op, 'getfailargs'):
if not we_are_translated() and hasattr(op, 'getfailargs'):
def _emit_guard(self, op, regalloc, fcond, save_exc=False): descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) if hasattr(op, 'getfailargs'): print 'Failargs: ', op.getfailargs()
self._emit_guard(op, regalloc, c.NE)
if offset is not None: self.mc.ADD_ri(r.pc.value, r.pc.value, 2*WORD, cond=c.EQ) else: raise NotImplementedError
def emit_op_guard_nonnull_class(self, op, regalloc, fcond): locs = self._prepare_guard_class(op, regalloc, fcond) self.mc.CMP_ri(locs[0].value, 0) self._emit_guard(op, regalloc, c.NE) self._cmp_guard_class(op, locs, regalloc, fcond) return fcond
imm_ofs = self._check_imm_arg(ofs_length)
ofs_box = ConstInt(ofs_length) imm_ofs = self._check_imm_arg(ofs_box)
def emit_op_strlen(self, op, regalloc, fcond): l0, box = self._ensure_value_is_boxed(op.getarg(0), regalloc) boxes = [box]
l1 = regalloc.make_sure_var_in_reg(ConstInt(ofs_length), boxes) else: l1, box1 = self._ensure_value_is_boxed(ConstInt(ofs_length), regalloc, boxes)
l1 = regalloc.make_sure_var_in_reg(ofs_box, boxes) else: l1, box1 = self._ensure_value_is_boxed(ofs_box, regalloc, boxes)
def emit_op_strlen(self, op, regalloc, fcond): l0, box = self._ensure_value_is_boxed(op.getarg(0), regalloc) boxes = [box]
imm_ofs = self._check_imm_arg(ofs_length)
ofs_box = ConstInt(ofs_length) imm_ofs = self._check_imm_arg(ofs_box)
def emit_op_unicodelen(self, op, regalloc, fcond): l0, box = self._ensure_value_is_boxed(op.getarg(0), regalloc) boxes = [box] res = regalloc.force_allocate_reg(op.result, boxes) boxes.append(op.result) basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) imm_ofs = self._check_imm_arg(ofs_length)
l1 = regalloc.make_sure_var_in_reg(ConstInt(ofs_length), boxes) else: l1, box1 = self._ensure_value_is_boxed(ConstInt(ofs_length), regalloc, boxes)
l1 = regalloc.make_sure_var_in_reg(ofs_box, boxes) else: l1, box1 = self._ensure_value_is_boxed(ofs_box, regalloc, boxes)
def emit_op_unicodelen(self, op, regalloc, fcond): l0, box = self._ensure_value_is_boxed(op.getarg(0), regalloc) boxes = [box] res = regalloc.force_allocate_reg(op.result, boxes) boxes.append(op.result) basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, self.cpu.translate_support_code) imm_ofs = self._check_imm_arg(ofs_length)
regalloc.possibly_free_var(v) regalloc.possibly_free_var(tempbox)
def _malloc_varsize(self, ofs_items, ofs_length, scale, v, res_v, regalloc): tempbox = TempBox() size_loc = regalloc.force_allocate_reg(tempbox) self.mc.gen_load_int(size_loc.value, ofs_items + (v.getint() << scale)) self._emit_call(self.malloc_func_addr, [tempbox], regalloc, result=res_v) loc = regalloc.make_sure_var_in_reg(v, [res_v]) regalloc.possibly_free_var(v) regalloc.possibly_free_var(tempbox)
import warnings warnings.simplefilter('error', DeprecationWarning) try: raises(DeprecationWarning, f) finally: warnings.simplefilter('default', DeprecationWarning)
raises(TypeError, f)
def f(): raise "test"
def test_stringexc(self): a = "hello world" try: raise a except a, e: assert e == None try: raise a, "message" except a, e: assert e == "message"
def test_more_precise_instancearg(self): try: raise Exception, SystemError(1, 2) except SystemError, e: assert e.args[0] == 1 assert e.args[1] == 2
setfield_gc(p2, 0, descr=virtualtokendescr)
setfield_gc(p2, -2, descr=virtualtokendescr)
def test_vref_nonvirtual_escape(self): ops = """ [p1] p2 = virtual_ref(p1, 5) escape(p2) virtual_ref_finish(p2, p1) jump(p1) """ expected = """ [p1] i0 = force_token() p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i0, descr=virtualtokendescr) setfield_gc(p2, 5, descr=virtualrefindexdescr) escape(p2) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, 0, descr=virtualtokendescr) jump(p1) """ # XXX we should optimize a bit more the case of a nonvirtual. # in theory it is enough to just do 'p2 = p1'. self.optimize_loop(ops, 'Not', expected)
setfield_gc(p2, 0, descr=virtualtokendescr)
setfield_gc(p2, -2, descr=virtualtokendescr)
def test_vref_virtual_1(self): ops = """ [p0, i1] # p1 = new_with_vtable(ConstClass(node_vtable)) p1b = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1b, 252, descr=valuedescr) setfield_gc(p1, p1b, descr=nextdescr) # p2 = virtual_ref(p1, 3) setfield_gc(p0, p2, descr=nextdescr) call_may_force(i1, descr=mayforcevirtdescr) guard_not_forced() [i1] virtual_ref_finish(p2, p1) setfield_gc(p0, NULL, descr=nextdescr) jump(p0, i1) """ expected = """ [p0, i1] i3 = force_token() # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) setfield_gc(p2, 3, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) guard_not_forced() [i1] setfield_gc(p0, NULL, descr=nextdescr) # p1 = new_with_vtable(ConstClass(node_vtable)) p1b = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1b, 252, descr=valuedescr) setfield_gc(p1, p1b, descr=nextdescr) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, 0, descr=virtualtokendescr) # jump(p0, i1) """ self.optimize_loop(ops, 'Not, Not', expected)
setfield_gc(p2, 0, descr=virtualtokendescr)
setfield_gc(p2, -2, descr=virtualtokendescr)
def test_vref_virtual_2(self): self.make_fail_descr() ops = """ [p0, i1] # p1 = new_with_vtable(ConstClass(node_vtable)) p1b = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1b, i1, descr=valuedescr) setfield_gc(p1, p1b, descr=nextdescr) # p2 = virtual_ref(p1, 2) setfield_gc(p0, p2, descr=nextdescr) call_may_force(i1, descr=mayforcevirtdescr) guard_not_forced(descr=fdescr) [p2, p1] virtual_ref_finish(p2, p1) setfield_gc(p0, NULL, descr=nextdescr) jump(p0, i1) """ expected = """ [p0, i1] i3 = force_token() # p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable)) setfield_gc(p2, i3, descr=virtualtokendescr) setfield_gc(p2, 2, descr=virtualrefindexdescr) setfield_gc(p0, p2, descr=nextdescr) # call_may_force(i1, descr=mayforcevirtdescr) guard_not_forced(descr=fdescr) [p2, i1] setfield_gc(p0, NULL, descr=nextdescr) # p1 = new_with_vtable(ConstClass(node_vtable)) p1b = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1b, i1, descr=valuedescr) setfield_gc(p1, p1b, descr=nextdescr) setfield_gc(p2, p1, descr=virtualforceddescr) setfield_gc(p2, 0, descr=virtualtokendescr) # jump(p0, i1) """ # the point of this test is that 'i1' should show up in the fail_args # of 'guard_not_forced', because it was stored in the virtual 'p1b'. self.optimize_loop(ops, 'Not, Not', expected) self.check_expanded_fail_descr('''p2, p1 where p1 is a node_vtable, nextdescr=p1b where p1b is a node_vtable, valuedescr=i1 ''')
newdata.extend(c for c in space.str_w(list_w[i]))
newdata.extend([c for c in space.str_w(list_w[i])])
def str_join__Bytearray_ANY(space, w_self, w_list): list_w = space.listview(w_list) if not list_w: return W_BytearrayObject([]) data = w_self.data reslen = 0 for i in range(len(list_w)): w_s = list_w[i] if not (space.is_true(space.isinstance(w_s, space.w_str)) or space.is_true(space.isinstance(w_s, space.w_bytearray))): raise operationerrfmt( space.w_TypeError, "sequence item %d: expected string, %s " "found", i, space.type(w_s).getname(space, '?')) reslen += len(space.str_w(w_s)) newdata = [] for i in range(len(list_w)): if data and i != 0: newdata.extend(data) newdata.extend(c for c in space.str_w(list_w[i])) return W_BytearrayObject(newdata)
op.getdescr().rd_snapshot = None
orgdescr = op.getdescr() assert isinstance(orgdescr, ResumeGuardDescr) orgdescr.rd_snapshot = None
def inline(self, loop_operations, loop_args, jump_args): self.argmap = argmap = {} assert len(loop_args) == len(jump_args) for i in range(len(loop_args)): argmap[loop_args[i]] = jump_args[i]
op.getdescr().rd_snapshot = None
descr = op.getdescr() assert isinstance(descr, ResumeGuardDescr) descr.rd_snapshot = None
def inline(self, loop_operations, loop_args, jump_args): self.argmap = argmap = {} assert len(loop_args) == len(jump_args) for i in range(len(loop_args)): argmap[loop_args[i]] = jump_args[i]
return Snapshot(self.inline_snapshot(snapshot.prev), boxes)
new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes[:]) self.snapshot_map[snapshot] = new_snapshot return new_snapshot
def inline_snapshot(self, snapshot): if snapshot in self.snapshot_map: return self.snapshot_map[snapshot] boxes = [] for a in snapshot.boxes: if isinstance(a, Const): boxes.append(a) else: boxes.append(self.inline_arg(a)) return Snapshot(self.inline_snapshot(snapshot.prev), boxes)
out = py.builtin._totext(out, sys.getdefaultencoding()) err = py.builtin._totext(err, sys.getdefaultencoding())
out = py.builtin._totext(out, sys.stdout.encoding) err = py.builtin._totext(err, sys.stderr.encoding)
def cmdexec(cmd): """ return output of executing 'cmd' in a separate process. raise cmdexec.ExecutionFailed exeception if the command failed. the exception will provide an 'err' attribute containing the error-output from the command. """ process = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() out = py.builtin._totext(out, sys.getdefaultencoding()) err = py.builtin._totext(err, sys.getdefaultencoding()) status = process.poll() if status: raise ExecutionFailed(status, status, cmd, out, err) return out
printf("&p = %ld, p.x = %ld, p.y = %ld\\n", &p, p.x, p.y);
def test_byval_argument(self): """ struct Point { long x; long y; };
def fdopenstream(self, stream, fd, mode):
def fdopenstream(self, stream, fd, mode, w_name=None):
def fdopenstream(self, stream, fd, mode): self.fd = fd self.mode = mode self.stream = stream if stream.flushable(): getopenstreams(self.space)[stream] = None
assert fn(sys.maxint) == 4*sys.maxint
assert fn(2147483647) == 4*2147483647
def f(i): return 4*i
assert gn(sys.maxint) == 4*sys.maxint
assert gn(2147483647) == 4*2147483647
def g(i): return 4*i
importlist.append(os.path.join(python_std_lib, 'plat-mac')) importlist.append(os.path.join(python_std_lib, 'plat-mac', 'lib-scriptpackages'))
platmac = os.path.join(python_std_lib, 'plat-mac') importlist.append(platmac) importlist.append(os.path.join(platmac, 'lib-scriptpackages'))
def getinitialpath(prefix): from pypy.module.sys.version import CPYTHON_VERSION dirname = '%d.%d.%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1], CPYTHON_VERSION[2]) lib_python = os.path.join(prefix, 'lib-python') python_std_lib = os.path.join(lib_python, dirname) checkdir(python_std_lib) python_std_lib_modified = os.path.join(lib_python, 'modified-' + dirname) checkdir(python_std_lib_modified) lib_pypy = os.path.join(prefix, 'lib_pypy') checkdir(lib_pypy) importlist = [] importlist.append(lib_pypy) importlist.append(python_std_lib_modified) importlist.append(python_std_lib) # # List here the extra platform-specific paths. if platform != 'win32': importlist.append(os.path.join(python_std_lib, 'plat-'+platform)) if platform == 'darwin': importlist.append(os.path.join(python_std_lib, 'plat-mac')) importlist.append(os.path.join(python_std_lib, 'plat-mac', 'lib-scriptpackages')) # return importlist
elif self.find_rewriteable_bool(op, args):
elif self.find_rewritable_bool(op, args):
def optimize_default(self, op): if op.is_always_pure(): for arg in op.args: if self.get_constant_box(arg) is None: break else: # all constant arguments: constant-fold away argboxes = [self.get_constant_box(arg) for arg in op.args] resbox = execute_nonspec(self.cpu, None, op.opnum, argboxes, op.descr) self.make_constant(op.result, resbox.constbox()) return
def find_rewriteable_bool(self, op, args):
def find_rewritable_bool(self, op, args):
def find_rewriteable_bool(self, op, args): try: oldopnum = opboolinvers[op.opnum] targs = [args[0], args[1], ConstInt(oldopnum)] if self.try_boolinvers(op, targs): return True except KeyError: pass
rstack.resume_point("CALL_METHOD_no_kwargs", f, n_args, returns=w_result)
rstack.resume_point("CALL_METHOD", f, w_self, n_args, returns=w_result)
def CALL_METHOD(f, oparg, *ignored): # opargs contains the arg, and kwarg count, excluding the implicit 'self' n_args = oparg & 0xff n_kwargs = (oparg >> 8) & 0xff w_self = f.peekvalue(n_args + (2 * n_kwargs)) w_callable = f.peekvalue(n_args + (2 * n_kwargs) + 1) n = n_args + (w_self is not None) if not n_kwargs: try: w_result = f.space.call_valuestack(w_callable, n, f) rstack.resume_point("CALL_METHOD_no_kwargs", f, n_args, returns=w_result) finally: f.dropvalues(n_args + 2) else: keywords = [None] * n_kwargs keywords_w = [None] * n_kwargs while True: n_kwargs -= 1 if n_kwargs < 0: break w_value = f.popvalue() w_key = f.popvalue() key = f.space.str_w(w_key) keywords[n_kwargs] = key keywords_w[n_kwargs] = w_value arguments = f.popvalues(n) args = f.argument_factory(arguments, keywords, keywords_w, None, None) try: w_result = f.space.call_args(w_callable, args) rstack.resume_point("CALL_METHOD", f, w_self, returns=w_result) finally: f.dropvalues(1 + (w_self is None)) f.pushvalue(w_result)
rstack.resume_point("CALL_METHOD", f, w_self, returns=w_result)
rstack.resume_point("CALL_METHOD", f, w_self, n_args, returns=w_result)
def CALL_METHOD(f, oparg, *ignored): # opargs contains the arg, and kwarg count, excluding the implicit 'self' n_args = oparg & 0xff n_kwargs = (oparg >> 8) & 0xff w_self = f.peekvalue(n_args + (2 * n_kwargs)) w_callable = f.peekvalue(n_args + (2 * n_kwargs) + 1) n = n_args + (w_self is not None) if not n_kwargs: try: w_result = f.space.call_valuestack(w_callable, n, f) rstack.resume_point("CALL_METHOD_no_kwargs", f, n_args, returns=w_result) finally: f.dropvalues(n_args + 2) else: keywords = [None] * n_kwargs keywords_w = [None] * n_kwargs while True: n_kwargs -= 1 if n_kwargs < 0: break w_value = f.popvalue() w_key = f.popvalue() key = f.space.str_w(w_key) keywords[n_kwargs] = key keywords_w[n_kwargs] = w_value arguments = f.popvalues(n) args = f.argument_factory(arguments, keywords, keywords_w, None, None) try: w_result = f.space.call_args(w_callable, args) rstack.resume_point("CALL_METHOD", f, w_self, returns=w_result) finally: f.dropvalues(1 + (w_self is None)) f.pushvalue(w_result)
if encoding == "ascii": expect = self.lowleveltype
if encoding == "ascii" and self.lowleveltype == UniChar: expect = UniChar
def rtype_method_encode(self, hop): if not hop.args_s[1].is_constant(): raise TyperError("encoding must be constant") encoding = hop.args_s[1].const if encoding == "ascii": expect = self.lowleveltype # can be a UniChar else: expect = self.repr # must be a regular unicode string v_self = hop.inputarg(expect, 0) hop.exception_is_here() if encoding == "ascii": return hop.gendirectcall(self.ll_str, v_self) elif encoding == "latin-1": return hop.gendirectcall(self.ll_encode_latin1, v_self) else: raise TyperError("encoding %s not implemented" % (encoding, ))
compilation_info=CConfig._compilation_info_)
compilation_info=CConfig._compilation_info_, sandboxsafe=True, threadsafe=True)
def external(name, args, result): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_)
return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv='win')
return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv='win', sandboxsafe=True, threadsafe=True)
def winexternal(name, args, result): return rffi.llexternal(name, args, result, compilation_info=CConfig._compilation_info_, calling_conv='win')
from pypy.interpreter.gateway import interp2app, Arguments
from pypy.interpreter.gateway import interp2app, Arguments, unwrap_spec
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
descr_new.unwrap_spec = [ObjSpace, W_Root, Arguments]
def descr_new(space, w_subtype, __args__): self = space.allocate_instance(W_Type, w_subtype) W_Type.__init__(self, space) return space.wrap(self)
descr_init.unwrap_spec = ['self', ObjSpace, W_Root, W_Root, int]
def descr_init(self, space, w_errno, w_strerror, written=0): W_IOError.descr_init(self, space, [w_errno, w_strerror]) self.written = written
gc_buf = lltype.nullptr(STRTYPE)
gc_buf = rgc.malloc_nonmovable(STRTYPE, count)
def alloc_buffer(count): """ Returns a (raw_buffer, gc_buffer) pair, allocated with count bytes. The raw_buffer can be safely passed to a native function which expects it to not move. Call str_from_buffer with the returned values to get a safe high-level string. When the garbage collector cooperates, this allows for the process to be performed without an extra copy. Make sure to call keep_buffer_alive_until_here on the returned values. """ str_chars_offset = (offsetof(STRTYPE, 'chars') + \ itemoffsetof(STRTYPE.chars, 0)) gc_buf = lltype.nullptr(STRTYPE) # rgc.malloc_nonmovable(STRTYPE, count) if gc_buf: realbuf = cast_ptr_to_adr(gc_buf) + str_chars_offset raw_buf = cast(TYPEP, realbuf) return raw_buf, gc_buf else: raw_buf = lltype.malloc(TYPEP.TO, count, flavor='raw') return raw_buf, lltype.nullptr(STRTYPE)
def optimize_loop(self, ops, spectext, optops, checkspecnodes=True, expected_preamble=None):
def optimize_loop(self, ops, spectext, optops, expected_preamble=None):
def optimize_loop(self, ops, spectext, optops, checkspecnodes=True, expected_preamble=None): loop = self.parse(ops) # if checkspecnodes: # verify that 'spectext' is indeed what optimizefindnode would # compute for this loop cpu = self.cpu perfect_specialization_finder = PerfectSpecializationFinder(cpu) perfect_specialization_finder.find_nodes_loop(loop) self.check_specnodes(loop.token.specnodes, spectext) else: # for cases where we want to see how optimizeopt behaves with # combinations different from the one computed by optimizefindnode loop.token.specnodes = self.unpack_specnodes(spectext) # self.loop = loop loop.preamble = TreeLoop('preamble') loop.preamble.inputargs = loop.inputargs metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo optimize_loop_1(metainterp_sd, loop) # expected = self.parse(optops) print print "Ops: " print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected)
if checkspecnodes: cpu = self.cpu perfect_specialization_finder = PerfectSpecializationFinder(cpu) perfect_specialization_finder.find_nodes_loop(loop) self.check_specnodes(loop.token.specnodes, spectext) else: loop.token.specnodes = self.unpack_specnodes(spectext)
loop.token.specnodes = self.unpack_specnodes(spectext)
def optimize_loop(self, ops, spectext, optops, checkspecnodes=True, expected_preamble=None): loop = self.parse(ops) # if checkspecnodes: # verify that 'spectext' is indeed what optimizefindnode would # compute for this loop cpu = self.cpu perfect_specialization_finder = PerfectSpecializationFinder(cpu) perfect_specialization_finder.find_nodes_loop(loop) self.check_specnodes(loop.token.specnodes, spectext) else: # for cases where we want to see how optimizeopt behaves with # combinations different from the one computed by optimizefindnode loop.token.specnodes = self.unpack_specnodes(spectext) # self.loop = loop loop.preamble = TreeLoop('preamble') loop.preamble.inputargs = loop.inputargs metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo optimize_loop_1(metainterp_sd, loop) # expected = self.parse(optops) print print "Ops: " print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected)
print "Ops: "
print loop.preamble.inputargs print '\n'.join([str(o) for o in loop.preamble.operations]) print print loop.inputargs
def optimize_loop(self, ops, spectext, optops, checkspecnodes=True, expected_preamble=None): loop = self.parse(ops) # if checkspecnodes: # verify that 'spectext' is indeed what optimizefindnode would # compute for this loop cpu = self.cpu perfect_specialization_finder = PerfectSpecializationFinder(cpu) perfect_specialization_finder.find_nodes_loop(loop) self.check_specnodes(loop.token.specnodes, spectext) else: # for cases where we want to see how optimizeopt behaves with # combinations different from the one computed by optimizefindnode loop.token.specnodes = self.unpack_specnodes(spectext) # self.loop = loop loop.preamble = TreeLoop('preamble') loop.preamble.inputargs = loop.inputargs metainterp_sd = FakeMetaInterpStaticData(self.cpu) if hasattr(self, 'vrefinfo'): metainterp_sd.virtualref_info = self.vrefinfo optimize_loop_1(metainterp_sd, loop) # expected = self.parse(optops) print print "Ops: " print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected)
expected = """
preamble = """
def test_constant_boolrewrite_lt(self): ops = """ [i0] i1 = int_lt(i0, 0) guard_true(i1) [] i2 = int_ge(i0, 0) guard_false(i2) [] jump(i0) """ expected = """ [i0] i1 = int_lt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [i0] jump(i0) """ self.optimize_loop(ops, 'Not', expected, expected_preamble=preamble)
def test_constant_boolrewrite_lt(self): ops = """ [i0] i1 = int_lt(i0, 0) guard_true(i1) [] i2 = int_ge(i0, 0) guard_false(i2) [] jump(i0) """ expected = """ [i0] i1 = int_lt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_constant_boolrewrite_gt(self): ops = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] i2 = int_le(i0, 0) guard_false(i2) [] jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [i0] jump(i0) """ self.optimize_loop(ops, 'Not', expected, expected_preamble=preamble)
def test_constant_boolrewrite_gt(self): ops = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] i2 = int_le(i0, 0) guard_false(i2) [] jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_constant_boolrewrite_reflex(self): ops = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] i2 = int_lt(0, i0) guard_true(i2) [] jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [i0] jump(i0) """ self.optimize_loop(ops, 'Not', expected, expected_preamble=preamble)
def test_constant_boolrewrite_reflex(self): ops = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] i2 = int_lt(0, i0) guard_true(i2) [] jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_constant_boolrewrite_reflex_invers(self): ops = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] i2 = int_ge(0, i0) guard_false(i2) [] jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [i0] jump(i0) """ self.optimize_loop(ops, 'Not', expected, expected_preamble=preamble)
def test_constant_boolrewrite_reflex_invers(self): ops = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] i2 = int_ge(0, i0) guard_false(i2) [] jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
jump(ConstPtr(myptr))
jump(p1)
def test_remove_guard_value_if_constant(self): ops = """ [p1] guard_value(p1, ConstPtr(myptr)) [] jump(ConstPtr(myptr)) """ expected = """ [] jump() """ self.optimize_loop(ops, 'Constant(myptr)', expected)
self.optimize_loop(ops, 'Constant(myptr)', expected)
self.optimize_loop(ops, 'Not', expected)
def test_remove_guard_value_if_constant(self): ops = """ [p1] guard_value(p1, ConstPtr(myptr)) [] jump(ConstPtr(myptr)) """ expected = """ [] jump() """ self.optimize_loop(ops, 'Constant(myptr)', expected)
expected = """
preamble = """
def test_ooisnull_oononnull_1(self): ops = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] guard_nonnull(p0) [] jump(p0) """ expected = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [p0] jump(p0) """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_ooisnull_oononnull_1(self): ops = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] guard_nonnull(p0) [] jump(p0) """ expected = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_int_is_true_1(self): ops = """ [i0] i1 = int_is_true(i0) guard_true(i1) [] i2 = int_is_true(i0) guard_true(i2) [] jump(i0) """ expected = """ [i0] i1 = int_is_true(i0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [i0] jump(i0) """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_int_is_true_1(self): ops = """ [i0] i1 = int_is_true(i0) guard_true(i1) [] i2 = int_is_true(i0) guard_true(i2) [] jump(i0) """ expected = """ [i0] i1 = int_is_true(i0) guard_true(i1) [] jump(i0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_ooisnull_oononnull_2(self): ops = """ [p0] guard_nonnull(p0) [] guard_nonnull(p0) [] jump(p0) """ expected = """ [p0] guard_nonnull(p0) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [p0] jump(p0) """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_ooisnull_oononnull_2(self): ops = """ [p0] guard_nonnull(p0) [] guard_nonnull(p0) [] jump(p0) """ expected = """ [p0] guard_nonnull(p0) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_ooisnull_oononnull_via_virtual(self): ops = """ [p0] pv = new_with_vtable(ConstClass(node_vtable)) setfield_gc(pv, p0, descr=valuedescr) guard_nonnull(p0) [] p1 = getfield_gc(pv, descr=valuedescr) guard_nonnull(p1) [] jump(p0) """ expected = """ [p0] guard_nonnull(p0) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [p0] jump(p0) """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_ooisnull_oononnull_via_virtual(self): ops = """ [p0] pv = new_with_vtable(ConstClass(node_vtable)) setfield_gc(pv, p0, descr=valuedescr) guard_nonnull(p0) [] p1 = getfield_gc(pv, descr=valuedescr) guard_nonnull(p1) [] jump(p0) """ expected = """ [p0] guard_nonnull(p0) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_oois_1(self): ops = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] i0 = ptr_ne(p0, NULL) guard_true(i0) [] i1 = ptr_eq(p0, NULL) guard_false(i1) [] i2 = ptr_ne(NULL, p0) guard_true(i0) [] i3 = ptr_eq(NULL, p0) guard_false(i1) [] jump(p0) """ expected = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [p0] jump(p0) """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_oois_1(self): ops = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] i0 = ptr_ne(p0, NULL) guard_true(i0) [] i1 = ptr_eq(p0, NULL) guard_false(i1) [] i2 = ptr_ne(NULL, p0) guard_true(i0) [] i3 = ptr_eq(NULL, p0) guard_false(i1) [] jump(p0) """ expected = """ [p0] guard_class(p0, ConstClass(node_vtable)) [] jump(p0) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_guard_value_to_guard_true(self): ops = """ [i] i1 = int_lt(i, 3) guard_value(i1, 1) [i] jump(i) """ expected = """ [i] i1 = int_lt(i, 3) guard_true(i1) [i] jump(i) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [i] jump(i) """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_guard_value_to_guard_true(self): ops = """ [i] i1 = int_lt(i, 3) guard_value(i1, 1) [i] jump(i) """ expected = """ [i] i1 = int_lt(i, 3) guard_true(i1) [i] jump(i) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_guard_value_to_guard_false(self): ops = """ [i] i1 = int_is_true(i) guard_value(i1, 0) [i] jump(i) """ expected = """ [i] i1 = int_is_true(i) guard_false(i1) [i] jump(i) """ self.optimize_loop(ops, 'Not', expected)
self.optimize_loop(ops, 'Not', expected)
expected = """ [i] jump(i) """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_guard_value_to_guard_false(self): ops = """ [i] i1 = int_is_true(i) guard_value(i1, 0) [i] jump(i) """ expected = """ [i] i1 = int_is_true(i) guard_false(i1) [i] jump(i) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_guard_value_on_nonbool(self): ops = """ [i] i1 = int_add(i, 3) guard_value(i1, 0) [i] jump(i) """ expected = """ [i] i1 = int_add(i, 3) guard_value(i1, 0) [i] jump(-3) """ self.optimize_loop(ops, 'Not', expected)
jump(-3) """ self.optimize_loop(ops, 'Not', expected)
jump() """ expected = """ [] jump() """ self.optimize_loop(ops, 'Not', expected, preamble)
def test_guard_value_on_nonbool(self): ops = """ [i] i1 = int_add(i, 3) guard_value(i1, 0) [i] jump(i) """ expected = """ [i] i1 = int_add(i, 3) guard_value(i1, 0) [i] jump(-3) """ self.optimize_loop(ops, 'Not', expected)
expected = """
preamble = """
def test_int_is_true_of_bool(self): ops = """ [i0, i1] i2 = int_gt(i0, i1) i3 = int_is_true(i2) i4 = int_is_true(i3) guard_value(i4, 0) [i0, i1] jump(i0, i1) """ expected = """ [i0, i1] i2 = int_gt(i0, i1) guard_false(i2) [i0, i1] jump(i0, i1) """ self.optimize_loop(ops, 'Not, Not', expected)
self.optimize_loop(ops, 'Not, Not', expected)
expected = """ [i0, i1] jump(i0, i1) """ self.optimize_loop(ops, 'Not, Not', expected, preamble)
def test_int_is_true_of_bool(self): ops = """ [i0, i1] i2 = int_gt(i0, i1) i3 = int_is_true(i2) i4 = int_is_true(i3) guard_value(i4, 0) [i0, i1] jump(i0, i1) """ expected = """ [i0, i1] i2 = int_gt(i0, i1) guard_false(i2) [i0, i1] jump(i0, i1) """ self.optimize_loop(ops, 'Not, Not', expected)
self.optimize_loop(ops, 'Not, Not, Not', ops)
self.optimize_loop(ops, 'Not, Not, Not', expected, preamble)
def test_p123_simple(self): ops = """ [i1, p2, p3] i3 = getfield_gc(p3, descr=valuedescr) escape(i3) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1, i1, descr=valuedescr) jump(i1, p1, p2) """ # We cannot track virtuals that survive for more than two iterations. self.optimize_loop(ops, 'Not, Not, Not', ops)
self.optimize_loop(ops, 'Not, Not, Not', ops)
preamble = """ [i1, p2, p3] i3 = getfield_gc(p3, descr=valuedescr) escape(i3) jump(i1, p2) """ expected = """ [i1, p2] i3 = getfield_gc(p2, descr=valuedescr) escape(i3) p4 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p4, i1, descr=valuedescr) p1sub = new_with_vtable(ConstClass(node_vtable2)) setfield_gc(p1sub, i1, descr=valuedescr) setfield_gc(p4, p1sub, descr=nextdescr) jump(i1, p4) """ self.optimize_loop(ops, 'Not, Not, Not', expected, preamble)
def test_p123_nested(self): ops = """ [i1, p2, p3] i3 = getfield_gc(p3, descr=valuedescr) escape(i3) p1 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p1, i1, descr=valuedescr) p1sub = new_with_vtable(ConstClass(node_vtable2)) setfield_gc(p1sub, i1, descr=valuedescr) setfield_gc(p1, p1sub, descr=nextdescr) jump(i1, p1, p2) """ # The same as test_p123_simple, but with a virtual containing another # virtual. self.optimize_loop(ops, 'Not, Not, Not', ops)
self.optimize_loop(ops, 'Not, Not, Not', ops)
preamble = """ [i1, p2, p3] p3sub = getfield_gc(p3, descr=nextdescr) i3 = getfield_gc(p3sub, descr=valuedescr) escape(i3) jump(i1, p2) """ expected = """ [i1, p3] p2sub = new_with_vtable(ConstClass(node_vtable2)) setfield_gc(p2sub, i1, descr=valuedescr) setfield_gc(p3, p2sub, descr=nextdescr) escape(i1) p4 = new_with_vtable(ConstClass(node_vtable)) jump(i1, p4) """ self.optimize_loop(ops, 'Not, Not, Not', expected, preamble)
def test_p123_anti_nested(self): ops = """ [i1, p2, p3] p3sub = getfield_gc(p3, descr=nextdescr) i3 = getfield_gc(p3sub, descr=valuedescr) escape(i3) p1 = new_with_vtable(ConstClass(node_vtable)) p2sub = new_with_vtable(ConstClass(node_vtable2)) setfield_gc(p2sub, i1, descr=valuedescr) setfield_gc(p2, p2sub, descr=nextdescr) jump(i1, p1, p2) """ # The same as test_p123_simple, but in the end the "old" p2 contains # a "young" virtual p2sub. Make sure it is all forced. self.optimize_loop(ops, 'Not, Not, Not', ops)
" really return an PyObject?"
" really return a PyObject?"
def cfunction_descr_call(space, w_self, __args__): self = space.interp_w(W_PyCFunctionObject, w_self) args_w, kw_w = __args__.unpack() null = lltype.nullptr(PyObject.TO) # XXX for the moment # Call the C function result = self.ml.c_ml_meth(null, null) try: ret = from_ref(space, result) except RuntimeError: if not we_are_translated(): import sys print >>sys.stderr, "Calling a function failed. Did it" \ " really return an PyObject?" raise # XXX result.decref() return ret
if isinstance(self.buf, buffer.SubBuffer): offset = self.buf.offset elif isinstance(self.buf, buffer.RWSubBuffer): offset = self.buf.offset
buf = self.buf if isinstance(buf, buffer.SubBuffer): offset = buf.offset elif isinstance(buf, buffer.RWSubBuffer): offset = buf.offset
def w_get_suboffsets(space, self): if isinstance(self.buf, buffer.SubBuffer): offset = self.buf.offset elif isinstance(self.buf, buffer.RWSubBuffer): offset = self.buf.offset else: offset = 0 return space.newtuple([space.wrap(offset)])
if opcode == opcodedesc.RETURN_VALUE.index: w_returnvalue = self.popvalue() block = self.unrollstack(SReturnValue.kind) if block is None: self.pushvalue(w_returnvalue) raise Return else: unroller = SReturnValue(w_returnvalue) next_instr = block.handle(self, unroller) return next_instr if opcode == opcodedesc.YIELD_VALUE.index: raise Yield if opcode == opcodedesc.END_FINALLY.index: unroller = self.end_finally() if isinstance(unroller, SuspendedUnroller): block = self.unrollstack(unroller.kind) if block is None: w_result = unroller.nomoreblocks() self.pushvalue(w_result) raise Return else: next_instr = block.handle(self, unroller) return next_instr if opcode == opcodedesc.JUMP_ABSOLUTE.index: return self.JUMP_ABSOLUTE(oparg, next_instr, ec)
def dispatch_bytecode(self, co_code, next_instr, ec): space = self.space while True: self.last_instr = intmask(next_instr) if not jit.we_are_jitted(): ec.bytecode_trace(self) next_instr = r_uint(self.last_instr) opcode = ord(co_code[next_instr]) next_instr += 1 if space.config.objspace.logbytecodes: space.bytecodecounts[opcode] += 1 try: probs = space.bytecodetransitioncount[self.last_opcode] except KeyError: probs = space.bytecodetransitioncount[self.last_opcode] = {} probs[opcode] = probs.get(opcode, 0) + 1 self.last_opcode = opcode
def JUMP_ABSOLUTE(f, jumpto, next_instr, *ignored): return jumpto
def JUMP_IF_TRUE(f, stepby, next_instr, *ignored): w_cond = f.peekvalue() if f.space.is_true(w_cond): next_instr += stepby return next_instr
def YIELD_VALUE(f, oparg, next_instr): raise Yield def RETURN_VALUE(self, oparg, next_instr): w_returnvalue = self.popvalue() block = self.unrollstack(SReturnValue.kind) if block is None: self.pushvalue(w_returnvalue) raise Return unroller = SReturnValue(w_returnvalue) next_instr = block.handle(self, unroller) return next_instr
def WITH_CLEANUP(f, *ignored): # see comment in END_FINALLY for stack state w_exitfunc = f.popvalue() w_unroller = f.peekvalue(2) unroller = f.space.interpclass_w(w_unroller) if isinstance(unroller, SApplicationException): operr = unroller.operr w_result = f.space.call_function(w_exitfunc, operr.w_type, operr.get_w_value(f.space), operr.application_traceback) if f.space.is_true(w_result): # __exit__() returned True -> Swallow the exception. f.settopvalue(f.space.w_None, 2) else: f.space.call_function(w_exitfunc, f.space.w_None, f.space.w_None, f.space.w_None)
w_stacklevel = space.wrap(stacklevel)
w_stacklevel = space.wrap(rffi.cast(lltype.Signed, stacklevel))
def PyErr_WarnEx(space, w_category, message_ptr, stacklevel): """Issue a warning message. The category argument is a warning category (see below) or NULL; the message argument is a message string. stacklevel is a positive number giving a number of stack frames; the warning will be issued from the currently executing line of code in that stack frame. A stacklevel of 1 is the function calling PyErr_WarnEx(), 2 is the function above that, and so forth. This function normally prints a warning message to sys.stderr; however, it is also possible that the user has specified that warnings are to be turned into errors, and in that case this will raise an exception. It is also possible that the function raises an exception because of a problem with the warning machinery (the implementation imports the warnings module to do the heavy lifting). The return value is 0 if no exception is raised, or -1 if an exception is raised. (It is not possible to determine whether a warning message is actually printed, nor what the reason is for the exception; this is intentional.) If an exception is raised, the caller should do its normal exception handling (for example, Py_DECREF() owned references and return an error value). Warning categories must be subclasses of Warning; the default warning category is RuntimeWarning. The standard Python warning categories are available as global variables whose names are PyExc_ followed by the Python exception name. These have the type PyObject*; they are all class objects. Their names are PyExc_Warning, PyExc_UserWarning, PyExc_UnicodeWarning, PyExc_DeprecationWarning, PyExc_SyntaxWarning, PyExc_RuntimeWarning, and PyExc_FutureWarning. PyExc_Warning is a subclass of PyExc_Exception; the other warning categories are subclasses of PyExc_Warning. For information about warning control, see the documentation for the warnings module and the -W option in the command line documentation. There is no C API for warning control.""" if w_category is None: w_category = space.w_None w_message = space.wrap(rffi.charp2str(message_ptr)) w_stacklevel = space.wrap(stacklevel) w_module = PyImport_Import(space, space.wrap("warnings")) w_warn = space.getattr(w_module, space.wrap("warn")) space.call_function(w_warn, w_message, w_category, w_stacklevel) return 0
warmrunnerdesc.state.set_param_threshold(3) warmrunnerdesc.state.set_param_trace_eagerness(2) warmrunnerdesc.state.set_param_trace_limit(trace_limit) warmrunnerdesc.state.set_param_inlining(inline) warmrunnerdesc.state.set_param_optimizer(OPTIMIZER_FULL)
for jd in warmrunnerdesc.jitdrivers_sd: jd.warmstate.set_param_threshold(3) jd.warmstate.set_param_trace_eagerness(2) jd.warmstate.set_param_trace_limit(trace_limit) jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_optimizer(OPTIMIZER_FULL)
def entry_point(argv): args = %s res = function(*args) print res return 0
pass
self.w_newlines_dict = { SEEN_CR: space.wrap("\r"), SEEN_LF: space.wrap("\n"), SEEN_CRLF: space.wrap("\r\n"), SEEN_CR | SEEN_LF: space.newtuple( [space.wrap("\r"), space.wrap("\n")]), SEEN_CR | SEEN_CRLF: space.newtuple( [space.wrap("\r"), space.wrap("\r\n")]), SEEN_LF | SEEN_CRLF: space.newtuple( [space.wrap("\n"), space.wrap("\r\n")]), SEEN_CR | SEEN_LF | SEEN_CRLF: space.newtuple( [space.wrap("\r"), space.wrap("\n"), space.wrap("\r\n")]), }
def __init__(self, space): pass
return { SEEN_CR: space.wrap("\r"), SEEN_LF: space.wrap("\n"), SEEN_CRLF: space.wrap("\r\n"), SEEN_CR | SEEN_LF: space.wrap(("\r", "\n")), SEEN_CR | SEEN_CRLF: space.wrap(("\r", "\r\n")), SEEN_LF | SEEN_CRLF: space.wrap(("\n", "\r\n")), SEEN_CR | SEEN_LF | SEEN_CRLF: space.wrap(("\r", "\n", "\r\n")), }.get(self.seennl)
return self.w_newlines_dict.get(self.seennl, space.w_None)
def newlines_get_w(space, self): return { SEEN_CR: space.wrap("\r"), SEEN_LF: space.wrap("\n"), SEEN_CRLF: space.wrap("\r\n"), SEEN_CR | SEEN_LF: space.wrap(("\r", "\n")), SEEN_CR | SEEN_CRLF: space.wrap(("\r", "\r\n")), SEEN_LF | SEEN_CRLF: space.wrap(("\n", "\r\n")), SEEN_CR | SEEN_LF | SEEN_CRLF: space.wrap(("\r", "\n", "\r\n")), }.get(self.seennl)
if not space.match(space, space.w_ImportError):
if not e.match(space, space.w_ImportError):
def descr_init(self, space, w_buffer, w_encoding=None, w_errors=None, w_newline=None, line_buffering=0): self.state = STATE_ZERO
table = space.str_w(w_table) if len(table) != 256: raise OperationError( space.w_ValueError, space.wrap("translation table must be 256 characters long"))
if space.is_w(w_table, space.w_None): table = DEFAULT_NOOP_TABLE else: table = space.str_w(w_table) if len(table) != 256: raise OperationError( space.w_ValueError, space.wrap("translation table must be 256 characters long"))
def str_translate__Rope_ANY_ANY(space, w_string, w_table, w_deletechars=''): """charfilter - unicode handling is not implemented Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256""" # XXX CPython accepts buffers, too, not sure what we should do table = space.str_w(w_table) if len(table) != 256: raise OperationError( space.w_ValueError, space.wrap("translation table must be 256 characters long")) node = w_string._node chars = [] iter = rope.ItemIterator(node) while 1: try: c = iter.nextchar() w_char = W_RopeObject.PREBUILT[ord(c)] if not space.is_true(space.contains(w_deletechars, w_char)): chars.append(table[ord(c)]) except StopIteration: break return W_RopeObject(rope.rope_from_charlist(chars))
block.exits[0].args[0] is block.operations[-1].result)):
block.exits[0].args[0] is block.operations[-1].result) and block.operations[-1].opname not in ('malloc', 'malloc_nonmovable')):
def transform_block(self, graph, block): need_exc_matching = False n_gen_exc_checks = 0 if block is graph.exceptblock: return need_exc_matching, n_gen_exc_checks elif block is graph.returnblock: return need_exc_matching, n_gen_exc_checks last_operation = len(block.operations) - 1 if block.exitswitch == c_last_exception: need_exc_matching = True last_operation -= 1 elif (len(block.exits) == 1 and block.exits[0].target is graph.returnblock and len(block.operations) and (block.exits[0].args[0].concretetype is lltype.Void or block.exits[0].args[0] is block.operations[-1].result)): last_operation -= 1 lastblock = block for i in range(last_operation, -1, -1): op = block.operations[i] if not self.raise_analyzer.can_raise(op): continue
WORD = rffi.sizeof(lltype.Signed) assert descr1.get_base_size(False) == WORD assert descr2.get_base_size(False) == WORD assert descr3.get_base_size(False) == WORD assert descr4.get_base_size(False) == WORD
def get_alignment(code): return struct.calcsize('l' + code) - struct.calcsize(code) assert descr1.get_base_size(False) == get_alignment('c') assert descr2.get_base_size(False) == get_alignment('p') assert descr3.get_base_size(False) == get_alignment('p') assert descr4.get_base_size(False) == get_alignment('d')
def test_get_array_descr(): U = lltype.Struct('U') T = lltype.GcStruct('T') A1 = lltype.GcArray(lltype.Char) A2 = lltype.GcArray(lltype.Ptr(T)) A3 = lltype.GcArray(lltype.Ptr(U)) A4 = lltype.GcArray(lltype.Float) assert getArrayDescrClass(A2) is GcPtrArrayDescr assert getArrayDescrClass(A3) is NonGcPtrArrayDescr cls = getArrayDescrClass(A1) assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed)) assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char)) clsf = getArrayDescrClass(A4) assert clsf != cls assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float)) # c0 = GcCache(False) descr1 = get_array_descr(c0, A1) descr2 = get_array_descr(c0, A2) descr3 = get_array_descr(c0, A3) descr4 = get_array_descr(c0, A4) assert descr1.__class__ is cls assert descr2.__class__ is GcPtrArrayDescr assert descr3.__class__ is NonGcPtrArrayDescr assert descr4.__class__ is clsf assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char)) assert not descr1.is_array_of_pointers() assert descr2.is_array_of_pointers() assert not descr3.is_array_of_pointers() assert not descr4.is_array_of_pointers() assert not descr1.is_array_of_floats() assert not descr2.is_array_of_floats() assert not descr3.is_array_of_floats() assert descr4.is_array_of_floats() # WORD = rffi.sizeof(lltype.Signed) assert descr1.get_base_size(False) == WORD assert descr2.get_base_size(False) == WORD assert descr3.get_base_size(False) == WORD assert descr4.get_base_size(False) == WORD assert descr1.get_ofs_length(False) == 0 assert descr2.get_ofs_length(False) == 0 assert descr3.get_ofs_length(False) == 0 assert descr4.get_ofs_length(False) == 0 assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char) assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T)) assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U)) assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float) # assert isinstance(descr1.get_base_size(True), Symbolic) assert isinstance(descr2.get_base_size(True), Symbolic) assert isinstance(descr3.get_base_size(True), Symbolic) assert isinstance(descr4.get_base_size(True), Symbolic) assert isinstance(descr1.get_ofs_length(True), Symbolic) assert isinstance(descr2.get_ofs_length(True), Symbolic) assert isinstance(descr3.get_ofs_length(True), Symbolic) assert isinstance(descr4.get_ofs_length(True), Symbolic) assert isinstance(descr1.get_item_size(True), Symbolic) assert isinstance(descr2.get_item_size(True), Symbolic) assert isinstance(descr3.get_item_size(True), Symbolic) assert isinstance(descr4.get_item_size(True), Symbolic) CA = rffi.CArray(lltype.Signed) descr = get_array_descr(c0, CA) assert not descr.is_array_of_floats() assert descr.get_base_size(False) == 0 assert descr.get_ofs_length(False) == -1 CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S'))) descr = get_array_descr(c0, CA) assert descr.is_array_of_pointers() assert descr.get_base_size(False) == 0 assert descr.get_ofs_length(False) == -1 CA = rffi.CArray(lltype.Ptr(lltype.Struct('S'))) descr = get_array_descr(c0, CA) assert descr.get_base_size(False) == 0 assert descr.get_ofs_length(False) == -1 CA = rffi.CArray(lltype.Float) descr = get_array_descr(c0, CA) assert descr.is_array_of_floats() assert descr.get_base_size(False) == 0 assert descr.get_ofs_length(False) == -1