response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Get non-zero basicsize of type,
including the header sizes.
|
def _basicsize(t, base=0, heap=False, obj=None):
"""Get non-zero basicsize of type,
including the header sizes.
"""
s = max(getattr(t, "__basicsize__", 0), base)
# include gc header size
if t != _Type_type:
h = getattr(t, "__flags__", 0) & _Py_TPFLAGS_HAVE_GC
elif heap: # type, allocated on heap
h = True
else: # None has no __flags__ attr
h = getattr(obj, "__flags__", 0) & _Py_TPFLAGS_HEAPTYPE
if h:
s += _sizeof_CPyGC_Head
# include reference counters
return s + _sizeof_Crefcounts
|
Return the object's class object.
|
def _classof(obj, dflt=None):
"""Return the object's class object."""
return getattr(obj, "__class__", dflt)
|
Return single, existing super type typedef or None.
|
def _derive_typedef(typ):
"""Return single, existing super type typedef or None."""
v = [v for v in _values(_typedefs) if _issubclass(typ, v.type)]
return v[0] if len(v) == 1 else None
|
Return an attribute name, object 2-tuple for certain
attributes or for the ``__slots__`` attributes of the
given object, but not both. Any iterator referent
objects are returned with the given name if the
latter is non-empty.
|
def _dir2(obj, pref=_NN, excl=(), slots=None, itor=_NN):
"""Return an attribute name, object 2-tuple for certain
attributes or for the ``__slots__`` attributes of the
given object, but not both. Any iterator referent
objects are returned with the given name if the
latter is non-empty.
"""
if slots: # __slots__ attrs
if hasattr(obj, slots):
# collect all inherited __slots__ attrs
# from list, tuple, or dict __slots__,
# while removing any duplicate attrs
s = {}
for c in type(obj).mro():
n = _nameof(c)
for a in getattr(c, slots, ()):
if a.startswith("__"):
a = "_" + n + a
if hasattr(obj, a):
s.setdefault(a, getattr(obj, a))
# assume __slots__ tuple-like is holding the values
# yield slots, _Slots(s) # _keys(s) ... REMOVED,
# see _Slots.__doc__ further below
for t in _items(s):
yield t # attr name, value
elif itor: # iterator referents
for o in obj: # iter(obj)
yield itor, o
else: # regular attrs
for a in dir(obj):
if a.startswith(pref) and hasattr(obj, a) and a not in excl:
yield a, getattr(obj, a)
|
Return True for likely dict object via duck typing.
|
def _infer_dict(obj):
"""Return True for likely dict object via duck typing."""
for attrs in (("items", "keys", "values"), ("iteritems", "iterkeys", "itervalues")):
attrs += "__len__", "get", "has_key" # 'update'
if all(callable(getattr(obj, a, None)) for a in attrs):
return True
return False
|
Return True for built-in types as in Python 2.
|
def _isbuiltin2(typ):
"""Return True for built-in types as in Python 2."""
# range is no longer a built-in in Python 3+
return isbuiltin(typ) or (typ is range)
|
Return True if obj is a cell as used in a closure.
|
def _iscell(obj):
"""Return True if obj is a cell as used in a closure."""
return isinstance(obj, _cell_type)
|
Return True for known dict objects.
|
def _isdictype(obj):
"""Return True for known dict objects."""
c = _classof(obj)
n = _nameof(c)
return n and n in _dict_types.get(_moduleof(c), ())
|
Return True for a stack frame object.
|
def _isframe(obj):
"""Return True for a stack frame object."""
try:
return isframe(obj)
except ReferenceError:
return False
|
Is this a type or class to be ignored?
|
def _isignored(typ):
"""Is this a type or class to be ignored?"""
return _moduleof(typ) in _ignored_modules
|
Named tuples are identified via duck typing:
<http://www.Gossamer-Threads.com/lists/python/dev/1142178>
|
def _isnamedtuple(obj):
"""Named tuples are identified via duck typing:
<http://www.Gossamer-Threads.com/lists/python/dev/1142178>
"""
return isinstance(obj, tuple) and hasattr(obj, "_fields")
|
Prevent asizeof(all=True, ...) crash.
Sizing gc.get_objects() crashes in Pythonista3 with
Python 3.5.1 on iOS due to 1-tuple (<Null>,) object,
see <http://forum.omz-software.com/user/mrjean1>.
|
def _isNULL(obj):
"""Prevent asizeof(all=True, ...) crash.
Sizing gc.get_objects() crashes in Pythonista3 with
Python 3.5.1 on iOS due to 1-tuple (<Null>,) object,
see <http://forum.omz-software.com/user/mrjean1>.
"""
return isinstance(obj, tuple) and len(obj) == 1 and repr(obj) == "(<NULL>,)"
|
Safe inspect.issubclass() returning None if Super is
*object* or if obj and Super are not a class or type.
|
def _issubclass(obj, Super):
"""Safe inspect.issubclass() returning None if Super is
*object* or if obj and Super are not a class or type.
"""
if Super is not object:
try:
return issubclass(obj, Super)
except TypeError:
pass
return None
|
Get non-zero itemsize of type.
|
def _itemsize(t, item=0):
"""Get non-zero itemsize of type."""
# replace zero value with default
return getattr(t, "__itemsize__", 0) or item
|
Keyword arguments as a string.
|
def _kwdstr(**kwds):
"""Keyword arguments as a string."""
return ", ".join(sorted("%s=%r" % kv for kv in _items(kwds)))
|
Object length as a string.
|
def _lengstr(obj):
"""Object length as a string."""
n = leng(obj)
if n is None: # no len
r = _NN
else:
x = "!" if n > _len(obj) else _NN # extended
r = " leng %d%s" % (n, x)
return r
|
Return the object's module name.
|
def _moduleof(obj, dflt=_NN):
"""Return the object's module name."""
return getattr(obj, "__module__", dflt)
|
Return the name of an object.
|
def _nameof(obj, dflt=_NN):
"""Return the name of an object."""
return getattr(obj, "__name__", dflt)
|
Return the given or 'all' objects plus
the remaining options and exclude flag
|
def _objs_opts_x(where, objs, all=None, **opts):
"""Return the given or 'all' objects plus
the remaining options and exclude flag
"""
if objs: # given objects
t, x = objs, False
elif all in (False, None):
t, x = (), True
elif all is True: # 'all' objects
t, x = _getobjects(), True
else:
raise _OptionError(where, all=all)
return t, opts, x
|
Format an *Error* instance for invalid *option* or *options*.
|
def _OptionError(where, Error=ValueError, **options):
"""Format an *Error* instance for invalid *option* or *options*."""
t = _plural(len(options)), _nameof(where), _kwdstr(**options)
return Error("invalid option%s: %s(%s)" % t)
|
Return percentage as string.
|
def _p100(part, total, prec=1):
"""Return percentage as string."""
t = float(total)
if t > 0:
p = part * 100.0 / t
r = "%.*f%%" % (prec, p)
else:
r = "n/a"
return r
|
Return 's' if *num* is not one.
|
def _plural(num):
"""Return 's' if *num* is not one."""
return "s" if num != 1 else _NN
|
Find the next power of 2.
|
def _power_of_2(n):
"""Find the next power of 2."""
p2 = 2 ** int(log(n, 2))
while n > p2:
p2 += p2
return p2
|
Prettify and clip long repr() string.
|
def _prepr(obj, clip=0):
"""Prettify and clip long repr() string."""
return _repr(obj, clip=clip).strip("<>").replace("'", _NN)
|
Formatted print to sys.stdout or given stream.
*print3options* -- some keyword arguments, like Python 3+ print.
|
def _printf(fmt, *args, **print3options):
"""Formatted print to sys.stdout or given stream.
*print3options* -- some keyword arguments, like Python 3+ print.
"""
if print3options: # like Python 3+
f = print3options.get("file", None) or sys.stdout
if args:
f.write(fmt % args)
else:
f.write(fmt)
f.write(print3options.get("end", linesep))
if print3options.get("flush", False):
f.flush()
elif args:
print(fmt % args)
else:
print(fmt)
|
Return specific attribute objects of an object.
|
def _refs(obj, named, *attrs, **kwds):
"""Return specific attribute objects of an object."""
if named:
_N = _NamedRef
else:
def _N(unused, o):
return o
for a in attrs: # cf. inspect.getmembers()
if hasattr(obj, a):
yield _N(a, getattr(obj, a))
if kwds: # kwds are _dir2() args
for a, o in _dir2(obj, **kwds):
yield _N(a, o)
|
Clip long repr() string.
|
def _repr(obj, clip=80):
"""Clip long repr() string."""
try: # safe repr()
r = repr(obj).replace(linesep, "\\n")
except Exception:
r = "N/A"
if len(r) > clip > 0:
h = (clip // 2) - 2
if h > 0:
r = r[:h] + "...." + r[-h:]
return r
|
Return size as SI string.
|
def _SI(size, K=1024, i="i"):
"""Return size as SI string."""
if 1 < K <= size:
f = float(size)
for si in iter("KMGPTE"):
f /= K
if f < K:
return " or %.1f %s%sB" % (f, si, i)
return _NN
|
Return size as regular plus SI string.
|
def _SI2(size, **kwds):
"""Return size as regular plus SI string."""
return str(size) + _SI(size, **kwds)
|
Return specific referents of a class object.
|
def _class_refs(obj, named):
"""Return specific referents of a class object."""
return _refs(
obj,
named,
"__class__",
"__doc__",
"__mro__",
"__name__",
"__slots__",
"__weakref__",
"__dict__",
)
|
Return specific referents of a code object.
|
def _co_refs(obj, named):
"""Return specific referents of a code object."""
return _refs(obj, named, pref="co_")
|
Return key and value objects of a dict/proxy.
|
def _dict_refs(obj, named):
"""Return key and value objects of a dict/proxy."""
try:
if named:
for k, v in _items(obj):
s = str(k)
yield _NamedRef("[K] " + s, k)
s += ": " + _repr(v)
yield _NamedRef("[V] " + s, v)
else:
for k, v in _items(obj):
yield k
yield v
except (KeyError, ReferenceError, TypeError) as x:
warnings.warn("Iterating '%s': %r" % (_classof(obj), x))
|
Return specific referents of an enumerate object.
|
def _enum_refs(obj, named):
"""Return specific referents of an enumerate object."""
return _refs(obj, named, "__doc__")
|
Return specific referents of an Exception object.
|
def _exc_refs(obj, named):
"""Return specific referents of an Exception object."""
# .message raises DeprecationWarning in Python 2.6
return _refs(
obj, named, "args", "filename", "lineno", "msg", "text"
)
|
Return specific referents of a file object.
|
def _file_refs(obj, named):
"""Return specific referents of a file object."""
return _refs(obj, named, "mode", "name")
|
Return specific referents of a frame object.
|
def _frame_refs(obj, named):
"""Return specific referents of a frame object."""
return _refs(obj, named, pref="f_")
|
Return specific referents of a function or lambda object.
|
def _func_refs(obj, named):
"""Return specific referents of a function or lambda object."""
return _refs(
obj,
named,
"__doc__",
"__name__",
"__code__",
"__closure__",
pref="func_",
excl=("func_globals",),
)
|
Return the referent(s) of a generator (expression) object.
|
def _gen_refs(obj, named):
"""Return the referent(s) of a generator (expression) object."""
# only some gi_frame attrs, but none of
# the items to keep the generator intact
f = getattr(obj, "gi_frame", None)
return _refs(f, named, "f_locals", "f_code")
|
Return specific referents of a method object.
|
def _im_refs(obj, named):
"""Return specific referents of a method object."""
return _refs(obj, named, "__doc__", "__name__", "__code__", pref="im_")
|
Return specific referents of a class instance.
|
def _inst_refs(obj, named):
"""Return specific referents of a class instance."""
return _refs(obj, named, "__dict__", "__class__", slots="__slots__")
|
Return the referent(s) of an iterator object.
|
def _iter_refs(obj, named):
"""Return the referent(s) of an iterator object."""
r = _getreferents(obj) # special case
return _refs(r, named, itor=_nameof(obj) or "iteref")
|
Return specific referents of a module object.
|
def _module_refs(obj, named):
"""Return specific referents of a module object."""
n = _nameof(obj) == __name__ # i.e. this module
# ignore this very module, module is essentially a dict
return () if n else _dict_refs(obj.__dict__, named)
|
Return specific referents of obj-as-sequence and slots but exclude dict.
|
def _namedtuple_refs(obj, named):
"""Return specific referents of obj-as-sequence and slots but exclude dict."""
for r in _refs(obj, named, "__class__", slots="__slots__"):
yield r
for r in obj:
yield r
|
Return specific referents of a property object.
|
def _prop_refs(obj, named):
"""Return specific referents of a property object."""
return _refs(obj, named, "__doc__", pref="f")
|
Return specific referents of a frozen/set, list, tuple and xrange object.
|
def _seq_refs(obj, unused): # named unused for PyChecker
"""Return specific referents of a frozen/set, list, tuple and xrange object."""
return obj
|
Return referents of a os.stat object.
|
def _stat_refs(obj, named):
"""Return referents of a os.stat object."""
return _refs(obj, named, pref="st_")
|
Return referents of a os.statvfs object.
|
def _statvfs_refs(obj, named):
"""Return referents of a os.statvfs object."""
return _refs(obj, named, pref="f_")
|
Return specific referents of a traceback object.
|
def _tb_refs(obj, named):
"""Return specific referents of a traceback object."""
return _refs(obj, named, pref="tb_")
|
Return specific referents of a type object.
|
def _type_refs(obj, named):
"""Return specific referents of a type object."""
return _refs(
obj,
named,
"__doc__",
"__mro__",
"__name__",
"__slots__",
"__weakref__",
"__dict__",
)
|
Return weakly referent object.
|
def _weak_refs(obj, unused): # unused for named
"""Return weakly referent object."""
try: # ignore 'key' of KeyedRef
return (obj(),)
except Exception: # XXX ReferenceError
return ()
|
Safe len().
|
def _len(obj):
"""Safe len()."""
try:
return len(obj)
except TypeError: # no len() nor __len__
return 0
|
Bytearray size.
|
def _len_bytearray(obj):
"""Bytearray size."""
return obj.__alloc__()
|
Length of code object (stack and variables only).
|
def _len_code(obj): # see .../Lib/test/test_sys.py
"""Length of code object (stack and variables only)."""
return (
_len(obj.co_freevars)
+ obj.co_stacksize
+ _len(obj.co_cellvars)
+ obj.co_nlocals
- 1
)
|
Dict length in items (estimate).
|
def _len_dict(obj):
"""Dict length in items (estimate)."""
n = len(obj) # active items
if n < 6: # ma_smalltable ...
n = 0 # ... in basicsize
else: # at least one unused
n = _power_of_2(n + 1)
return n
|
Length of a frame object.
|
def _len_frame(obj):
"""Length of a frame object."""
c = getattr(obj, "f_code", None)
return _len_code(c) if c else 0
|
Length of *int* (multi-precision, formerly long) in Cdigits.
|
def _len_int(obj):
"""Length of *int* (multi-precision, formerly long) in Cdigits."""
n = _getsizeof(obj, 0) - int.__basicsize__
return (n // int.__itemsize__) if n > 0 else 0
|
Length (hint) of an iterator.
|
def _len_iter(obj):
"""Length (hint) of an iterator."""
n = getattr(obj, "__length_hint__", None)
return n() if n and callable(n) else _len(obj)
|
Length of list (estimate).
|
def _len_list(obj):
"""Length of list (estimate)."""
n = len(obj)
# estimate over-allocation
if n > 8:
n += 6 + (n >> 3)
elif n:
n += 4
return n
|
Module length.
|
def _len_module(obj):
"""Module length."""
return _len(obj.__dict__)
|
Length of frozen/set (estimate).
|
def _len_set(obj):
"""Length of frozen/set (estimate)."""
n = len(obj)
if n > 8: # assume half filled
n = _power_of_2(n + n - 2)
elif n: # at least 8
n = 8
return n
|
Slice length.
|
def _len_slice(obj):
"""Slice length."""
try:
return (obj.stop - obj.start + 1) // obj.step
except (AttributeError, TypeError):
return 0
|
Struct length in bytes.
|
def _len_struct(obj):
"""Struct length in bytes."""
try:
return obj.size
except AttributeError:
return 0
|
Unicode size.
|
def _len_unicode(obj):
"""Unicode size."""
return len(obj) + 1
|
Wrap a class object.
|
def _claskey(obj):
"""Wrap a class object."""
i = id(obj)
try:
k = _claskeys[i]
except KeyError:
_claskeys[i] = k = _Claskey(obj)
return k
|
Return class and instance keys for a class.
|
def _key2tuple(obj): # PYCHOK expected
"""Return class and instance keys for a class."""
t = type(obj) is _Type_type # isclass(obj):
return (_claskey(obj), obj) if t else _NoneNone
|
Return the key for any object.
|
def _objkey(obj): # PYCHOK expected
"""Return the key for any object."""
k = type(obj)
if k is _Type_type: # isclass(obj):
k = _claskey(obj)
return k
|
Add new typedef for both data and code.
|
def _typedef_both(
t,
base=0,
item=0,
leng=None,
refs=None,
kind=_kind_static,
heap=False,
vari=_Not_vari,
):
"""Add new typedef for both data and code."""
v = _Typedef(
base=_basicsize(t, base=base),
item=_itemsize(t, item),
refs=refs,
leng=leng,
both=True,
kind=kind,
type=t,
vari=vari,
)
v.save(t, base=base, heap=heap)
return v
|
Add new typedef for code only.
|
def _typedef_code(t, base=0, refs=None, kind=_kind_static, heap=False):
"""Add new typedef for code only."""
v = _Typedef(
base=_basicsize(t, base=base), refs=refs, both=False, kind=kind, type=t
)
v.save(t, base=base, heap=heap)
return v
|
Array length (in bytes!).
|
def _len_array(obj):
"""Array length (in bytes!)."""
return len(obj) * obj.itemsize
|
Create a new typedef for an object.
|
def _typedef(obj, derive=False, frames=False, infer=False): # MCCABE 25
"""Create a new typedef for an object."""
t = type(obj)
v = _Typedef(base=_basicsize(t, obj=obj), kind=_kind_dynamic, type=t)
# _printf('new %r %r/%r %s', t, _basicsize(t), _itemsize(t), _repr(dir(obj)))
if ismodule(obj): # handle module like dict
v.dup(
item=_dict_typedef.item + _sizeof_CPyModuleObject,
leng=_len_module,
refs=_module_refs,
)
elif _isframe(obj):
v.set(
base=_basicsize(t, base=_sizeof_CPyFrameObject, obj=obj),
item=_itemsize(t),
leng=_len_frame,
refs=_frame_refs,
)
if not frames: # ignore frames
v.set(kind=_kind_ignored)
elif iscode(obj):
v.set(
base=_basicsize(t, base=_sizeof_CPyCodeObject, obj=obj),
item=_sizeof_Cvoidp,
leng=_len_code,
refs=_co_refs,
both=False,
) # code only
elif callable(obj):
if isclass(obj): # class or type
v.set(refs=_class_refs, both=False) # code only
if _isignored(obj):
v.set(kind=_kind_ignored)
elif isbuiltin(obj): # function or method
v.set(both=False, kind=_kind_ignored) # code only
elif isfunction(obj):
v.set(refs=_func_refs, both=False) # code only
elif ismethod(obj):
v.set(refs=_im_refs, both=False) # code only
elif isclass(t): # callable instance, e.g. SCons,
# handle like any other instance further below
v.set(item=_itemsize(t), safe_len=True, refs=_inst_refs) # not code only!
else:
v.set(both=False) # code only
elif _issubclass(t, dict):
v.dup(kind=_kind_derived)
elif _isdictype(obj) or (infer and _infer_dict(obj)):
v.dup(kind=_kind_inferred)
elif _iscell(obj):
v.set(item=_itemsize(t), refs=_cell_refs)
elif _isnamedtuple(obj):
v.set(refs=_namedtuple_refs)
elif _numpy and _isnumpy(obj):
v.set(**_numpy_kwds(obj))
elif isinstance(obj, _array):
v.set(**_array_kwds(obj))
elif _isignored(obj):
v.set(kind=_kind_ignored)
else: # assume an instance of some class
if derive:
p = _derive_typedef(t)
if p: # duplicate parent
v.dup(other=p, kind=_kind_derived)
return v
if _issubclass(t, Exception):
v.set(item=_itemsize(t), safe_len=True, refs=_exc_refs, kind=_kind_derived)
elif isinstance(obj, Exception):
v.set(item=_itemsize(t), safe_len=True, refs=_exc_refs)
else:
v.set(item=_itemsize(t), safe_len=True, refs=_inst_refs)
return v
|
Set/get approximate mapped memory usage as a percentage
of the mapped file size.
Sets the new percentage if not None and returns the
previously set percentage.
Applies only to *numpy.memmap* objects.
|
def amapped(percentage=None):
"""Set/get approximate mapped memory usage as a percentage
of the mapped file size.
Sets the new percentage if not None and returns the
previously set percentage.
Applies only to *numpy.memmap* objects.
"""
global _amapped
p = _amapped * 100.0
if percentage is not None:
_amapped = max(0, min(1, percentage * 0.01))
return p
|
Return a tuple containing an **Asized** instance for each
object passed as positional argument.
The available options and defaults are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*detail=0* -- Asized refs level
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
If only one object is given, the return value is the **Asized**
instance for that object. Otherwise, the length of the returned
tuple matches the number of given objects.
The **Asized** size of duplicate and ignored objects will be zero.
Set *detail* to the desired referents level and *limit* to the
maximum recursion depth.
See function **asizeof** for descriptions of the other options.
|
def asized(*objs, **opts):
"""Return a tuple containing an **Asized** instance for each
object passed as positional argument.
The available options and defaults are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*detail=0* -- Asized refs level
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
If only one object is given, the return value is the **Asized**
instance for that object. Otherwise, the length of the returned
tuple matches the number of given objects.
The **Asized** size of duplicate and ignored objects will be zero.
Set *detail* to the desired referents level and *limit* to the
maximum recursion depth.
See function **asizeof** for descriptions of the other options.
"""
_asizer.reset(**opts)
if objs:
t = _asizer.asized(*objs)
_asizer.print_stats(objs, opts=opts, sized=t) # show opts as _kwdstr
_asizer._clear()
else:
t = ()
return t
|
Return the combined size (in bytes) of all objects passed
as positional arguments.
The available options and defaults are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
Set *align* to a power of 2 to align sizes. Any value less
than 2 avoids size alignment.
If *all* is True and if no positional arguments are supplied.
size all current gc objects, including module, global and stack
frame objects.
A positive *clip* value truncates all repr() strings to at
most *clip* characters.
The (byte)code size of callable objects like functions,
methods, classes, etc. is included only if *code* is True.
If *derive* is True, new types are handled like an existing
(super) type provided there is one and only of those.
By default certain base types like object, super, etc. are
ignored. Set *ignored* to False to include those.
If *infer* is True, new types are inferred from attributes
(only implemented for dict types on callable attributes
as get, has_key, items, keys and values).
Set *limit* to a positive value to accumulate the sizes of
the referents of each object, recursively up to the limit.
Using *limit=0* returns the sum of the flat sizes of the
given objects. High *limit* values may cause runtime errors
and miss objects for sizing.
A positive value for *stats* prints up to 9 statistics, (1)
a summary of the number of objects sized and seen and a list
of the largests objects with size over *above* bytes, (2) a
simple profile of the sized objects by type and (3+) up to 6
tables showing the static, dynamic, derived, ignored, inferred
and dict types used, found respectively installed.
The fractional part of the *stats* value (x 100) is the number
of largest objects shown for (*stats*1.+) or the cutoff
percentage for simple profiles for (*stats*=2.+). For example,
*stats=1.10* shows the summary and the 10 largest objects,
also the default.
See this module documentation for the definition of flat size.
|
def asizeof(*objs, **opts):
"""Return the combined size (in bytes) of all objects passed
as positional arguments.
The available options and defaults are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
Set *align* to a power of 2 to align sizes. Any value less
than 2 avoids size alignment.
If *all* is True and if no positional arguments are supplied.
size all current gc objects, including module, global and stack
frame objects.
A positive *clip* value truncates all repr() strings to at
most *clip* characters.
The (byte)code size of callable objects like functions,
methods, classes, etc. is included only if *code* is True.
If *derive* is True, new types are handled like an existing
(super) type provided there is one and only of those.
By default certain base types like object, super, etc. are
ignored. Set *ignored* to False to include those.
If *infer* is True, new types are inferred from attributes
(only implemented for dict types on callable attributes
as get, has_key, items, keys and values).
Set *limit* to a positive value to accumulate the sizes of
the referents of each object, recursively up to the limit.
Using *limit=0* returns the sum of the flat sizes of the
given objects. High *limit* values may cause runtime errors
and miss objects for sizing.
A positive value for *stats* prints up to 9 statistics, (1)
a summary of the number of objects sized and seen and a list
of the largests objects with size over *above* bytes, (2) a
simple profile of the sized objects by type and (3+) up to 6
tables showing the static, dynamic, derived, ignored, inferred
and dict types used, found respectively installed.
The fractional part of the *stats* value (x 100) is the number
of largest objects shown for (*stats*1.+) or the cutoff
percentage for simple profiles for (*stats*=2.+). For example,
*stats=1.10* shows the summary and the 10 largest objects,
also the default.
See this module documentation for the definition of flat size.
"""
t, p, x = _objs_opts_x(asizeof, objs, **opts)
_asizer.reset(**p)
if t:
if x: # don't size, profile or rank _getobjects tuple
_asizer.exclude_objs(t)
s = _asizer.asizeof(*t)
_asizer.print_stats(objs=t, opts=opts) # show opts as _kwdstr
_asizer._clear()
else:
s = 0
return s
|
Return a tuple containing the size (in bytes) of all objects
passed as positional arguments.
The available options and defaults are:
*above=1024* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
See function **asizeof** for a description of the options.
The length of the returned tuple equals the number of given
objects.
The size of duplicate and ignored objects will be zero.
|
def asizesof(*objs, **opts):
"""Return a tuple containing the size (in bytes) of all objects
passed as positional arguments.
The available options and defaults are:
*above=1024* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
See function **asizeof** for a description of the options.
The length of the returned tuple equals the number of given
objects.
The size of duplicate and ignored objects will be zero.
"""
_asizer.reset(**opts)
if objs:
t = _asizer.asizesof(*objs)
_asizer.print_stats(objs, opts=opts, sizes=t) # show opts as _kwdstr
_asizer._clear()
else:
t = ()
return t
|
Get the typedef for an object.
|
def _typedefof(obj, save=False, **opts):
"""Get the typedef for an object."""
k = _objkey(obj)
v = _typedefs.get(k, None)
if not v: # new typedef
v = _typedef(obj, **opts)
if save:
_typedefs[k] = v
return v
|
Return the basic size of an object (in bytes).
The available options and defaults are:
*derive=False* -- derive type from super type
*infer=False* -- try to infer types
*save=False* -- save the object's type definition if new
See this module documentation for the definition of *basic size*.
|
def basicsize(obj, **opts):
"""Return the basic size of an object (in bytes).
The available options and defaults are:
*derive=False* -- derive type from super type
*infer=False* -- try to infer types
*save=False* -- save the object's type definition if new
See this module documentation for the definition of *basic size*.
"""
b = t = _typedefof(obj, **opts)
if t:
b = t.base
return b
|
Return the flat size of an object (in bytes), optionally aligned
to the given power-of-2.
See function **basicsize** for a description of other available options.
See this module documentation for the definition of *flat size*.
|
def flatsize(obj, align=0, **opts):
"""Return the flat size of an object (in bytes), optionally aligned
to the given power-of-2.
See function **basicsize** for a description of other available options.
See this module documentation for the definition of *flat size*.
"""
f = t = _typedefof(obj, **opts)
if t:
if align > 1:
m = align - 1
if m & align:
raise _OptionError(flatsize, align=align)
else:
m = 0
f = t.flat(obj, mask=m)
return f
|
Return the item size of an object (in bytes).
See function **basicsize** for a description of the available options.
See this module documentation for the definition of *item size*.
|
def itemsize(obj, **opts):
"""Return the item size of an object (in bytes).
See function **basicsize** for a description of the available options.
See this module documentation for the definition of *item size*.
"""
i = t = _typedefof(obj, **opts)
if t:
i, v = t.item, t.vari
if v and i == _sizeof_Cbyte:
i = getattr(obj, v, i)
return i
|
Return the length of an object, in number of *items*.
See function **basicsize** for a description of the available options.
|
def leng(obj, **opts):
"""Return the length of an object, in number of *items*.
See function **basicsize** for a description of the available options.
"""
n = t = _typedefof(obj, **opts)
if t:
n = t.leng
if n and callable(n):
i, v, n = t.item, t.vari, n(obj)
if v and i == _sizeof_Cbyte:
i = getattr(obj, v, i)
if i > _sizeof_Cbyte:
n = n // i
return n
|
Return all named **referents** of an object (re-using
functionality from **asizeof**).
Does not return un-named *referents*, e.g. objects in a list.
See function **basicsize** for a description of the available options.
|
def named_refs(obj, **opts):
"""Return all named **referents** of an object (re-using
functionality from **asizeof**).
Does not return un-named *referents*, e.g. objects in a list.
See function **basicsize** for a description of the available options.
"""
rs = []
v = _typedefof(obj, **opts)
if v:
v = v.refs
if v and callable(v):
for r in v(obj, True):
try:
rs.append((r.name, r.ref))
except AttributeError:
pass
return rs
|
Return (a generator for) specific *referents* of an object.
See function **basicsize** for a description of the available options.
|
def refs(obj, **opts):
"""Return (a generator for) specific *referents* of an object.
See function **basicsize** for a description of the available options.
"""
v = _typedefof(obj, **opts)
if v:
v = v.refs
if v and callable(v):
v = v(obj, False)
return v
|
Check if the watchdog module is installed.
|
def _is_watchdog_available() -> bool:
"""Check if the watchdog module is installed."""
try:
import watchdog # noqa: F401
return True
except ImportError:
return False
|
Create a PathWatcher for the given path if we have a viable
PathWatcher class.
Parameters
----------
path
Path to watch.
on_path_changed
Function that's called when the path changes.
watcher_type
Optional watcher_type string. If None, it will default to the
'server.fileWatcherType` config option.
glob_pattern
Optional glob pattern to use when watching a directory. If set, only
files matching the pattern will be counted as being created/deleted
within the watched directory.
allow_nonexistent
If True, allow the file or directory at the given path to be
nonexistent.
Returns
-------
bool
True if the path is being watched, or False if we have no
PathWatcher class.
|
def _watch_path(
path: str,
on_path_changed: Callable[[str], None],
watcher_type: str | None = None,
*, # keyword-only arguments:
glob_pattern: str | None = None,
allow_nonexistent: bool = False,
) -> bool:
"""Create a PathWatcher for the given path if we have a viable
PathWatcher class.
Parameters
----------
path
Path to watch.
on_path_changed
Function that's called when the path changes.
watcher_type
Optional watcher_type string. If None, it will default to the
'server.fileWatcherType` config option.
glob_pattern
Optional glob pattern to use when watching a directory. If set, only
files matching the pattern will be counted as being created/deleted
within the watched directory.
allow_nonexistent
If True, allow the file or directory at the given path to be
nonexistent.
Returns
-------
bool
True if the path is being watched, or False if we have no
PathWatcher class.
"""
if watcher_type is None:
watcher_type = config.get_option("server.fileWatcherType")
watcher_class = get_path_watcher_class(watcher_type)
if watcher_class is NoOpPathWatcher:
return False
watcher_class(
path,
on_path_changed,
glob_pattern=glob_pattern,
allow_nonexistent=allow_nonexistent,
)
return True
|
Return the class to use for path changes notifications, based on the
server.fileWatcherType config option.
|
def get_default_path_watcher_class() -> PathWatcherType:
"""Return the class to use for path changes notifications, based on the
server.fileWatcherType config option.
"""
return get_path_watcher_class(config.get_option("server.fileWatcherType"))
|
Return the PathWatcher class that corresponds to the given watcher_type
string. Acceptable values are 'auto', 'watchdog', 'poll' and 'none'.
|
def get_path_watcher_class(watcher_type: str) -> PathWatcherType:
"""Return the PathWatcher class that corresponds to the given watcher_type
string. Acceptable values are 'auto', 'watchdog', 'poll' and 'none'.
"""
if watcher_type in {"watchdog", "auto"} and _is_watchdog_available():
# Lazy-import this module to prevent unnecessary imports of the watchdog package.
from streamlit.watcher.event_based_path_watcher import EventBasedPathWatcher
return EventBasedPathWatcher
elif watcher_type == "auto":
return PollingPathWatcher
elif watcher_type == "poll":
return PollingPathWatcher
else:
return NoOpPathWatcher
|
Calculate the MD5 checksum of a given path.
For a file, this means calculating the md5 of the file's contents. For a
directory, we concatenate the directory's path with the names of all the
files in it and calculate the md5 of that.
IMPORTANT: This method calls time.sleep(), which blocks execution. So you
should only use this outside the main thread.
|
def calc_md5_with_blocking_retries(
path: str,
*, # keyword-only arguments:
glob_pattern: str | None = None,
allow_nonexistent: bool = False,
) -> str:
"""Calculate the MD5 checksum of a given path.
For a file, this means calculating the md5 of the file's contents. For a
directory, we concatenate the directory's path with the names of all the
files in it and calculate the md5 of that.
IMPORTANT: This method calls time.sleep(), which blocks execution. So you
should only use this outside the main thread.
"""
if allow_nonexistent and not os.path.exists(path):
content = path.encode("UTF-8")
elif os.path.isdir(path):
glob_pattern = glob_pattern or "*"
content = _stable_dir_identifier(path, glob_pattern).encode("UTF-8")
else:
content = _get_file_content_with_blocking_retries(path)
md5 = hashlib.md5(**HASHLIB_KWARGS)
md5.update(content)
# Use hexdigest() instead of digest(), so it's easier to debug.
return md5.hexdigest()
|
Return the modification time of a path (file or directory).
If allow_nonexistent is True and the path does not exist, we return 0.0 to
guarantee that any file/dir later created at the path has a later
modification time than the last time returned by this function for that
path.
If allow_nonexistent is False and no file/dir exists at the path, a
FileNotFoundError is raised (by os.stat).
For any path that does correspond to an existing file/dir, we return its
modification time.
|
def path_modification_time(path: str, allow_nonexistent: bool = False) -> float:
"""Return the modification time of a path (file or directory).
If allow_nonexistent is True and the path does not exist, we return 0.0 to
guarantee that any file/dir later created at the path has a later
modification time than the last time returned by this function for that
path.
If allow_nonexistent is False and no file/dir exists at the path, a
FileNotFoundError is raised (by os.stat).
For any path that does correspond to an existing file/dir, we return its
modification time.
"""
if allow_nonexistent and not os.path.exists(path):
return 0.0
return os.stat(path).st_mtime
|
Wait for the files in a directory to look stable-ish before returning an id.
We do this to deal with problems that would otherwise arise from many tools
(e.g. git) and editors (e.g. vim) "editing" files (from the user's
perspective) by doing some combination of deleting, creating, and moving
various files under the hood.
Because of this, we're unable to rely on FileSystemEvents that we receive
from watchdog to determine when a file has been added to or removed from a
directory.
This is a bit of an unfortunate situation, but the approach we take here is
most likely fine as:
* The worst thing that can happen taking this approach is a false
positive page added/removed notification, which isn't too disastrous
and can just be ignored.
* It is impossible (that is, I'm fairly certain that the problem is
undecidable) to know whether a file created/deleted/moved event
corresponds to a legitimate file creation/deletion/move or is part of
some sequence of events that results in what the user sees as a file
"edit".
|
def _stable_dir_identifier(dir_path: str, glob_pattern: str) -> str:
"""Wait for the files in a directory to look stable-ish before returning an id.
We do this to deal with problems that would otherwise arise from many tools
(e.g. git) and editors (e.g. vim) "editing" files (from the user's
perspective) by doing some combination of deleting, creating, and moving
various files under the hood.
Because of this, we're unable to rely on FileSystemEvents that we receive
from watchdog to determine when a file has been added to or removed from a
directory.
This is a bit of an unfortunate situation, but the approach we take here is
most likely fine as:
* The worst thing that can happen taking this approach is a false
positive page added/removed notification, which isn't too disastrous
and can just be ignored.
* It is impossible (that is, I'm fairly certain that the problem is
undecidable) to know whether a file created/deleted/moved event
corresponds to a legitimate file creation/deletion/move or is part of
some sequence of events that results in what the user sees as a file
"edit".
"""
dirfiles = _dirfiles(dir_path, glob_pattern)
for _ in range(_MAX_RETRIES):
time.sleep(_RETRY_WAIT_SECS)
new_dirfiles = _dirfiles(dir_path, glob_pattern)
if dirfiles == new_dirfiles:
break
dirfiles = new_dirfiles
return f"{dir_path}+{dirfiles}"
|
Add the script's folder to the sys path.
Python normally does this automatically, but since we exec the script
ourselves we need to do it instead.
|
def _fix_sys_path(main_script_path: str) -> None:
"""Add the script's folder to the sys path.
Python normally does this automatically, but since we exec the script
ourselves we need to do it instead.
"""
sys.path.insert(0, os.path.dirname(main_script_path))
|
Set default asyncio policy to be compatible with Tornado 6.
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows. So here we
pick the older SelectorEventLoopPolicy when the OS is Windows
if the known-incompatible default policy is in use.
This has to happen as early as possible to make it a low priority and
overridable
See: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
|
def _fix_tornado_crash() -> None:
"""Set default asyncio policy to be compatible with Tornado 6.
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows. So here we
pick the older SelectorEventLoopPolicy when the OS is Windows
if the known-incompatible default policy is in use.
This has to happen as early as possible to make it a low priority and
overridable
See: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if env_util.IS_WINDOWS:
try:
from asyncio import ( # type: ignore[attr-defined]
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# Not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with
# Tornado 6 fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
|
sys.argv needs to exclude streamlit arguments and parameters
and be set to what a user's script may expect.
|
def _fix_sys_argv(main_script_path: str, args: list[str]) -> None:
"""sys.argv needs to exclude streamlit arguments and parameters
and be set to what a user's script may expect.
"""
import sys
sys.argv = [main_script_path] + list(args)
|
Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception
|
def _fix_pydeck_mapbox_api_warning() -> None:
"""Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception"""
os.environ["MAPBOX_API_KEY"] = config.get_option("mapbox.token")
|
Pydantic by default disallows to reuse of validators with the same name,
this combined with the Streamlit execution model leads to an error on the second
Streamlit script rerun if the Pydantic validator is registered
in the streamlit script.
It is important to note that the same issue exists for Pydantic validators inside
Jupyter notebooks, https://github.com/pydantic/pydantic/issues/312 and in order
to fix that in Pydantic they use the `in_ipython` function that checks that
Pydantic runs not in `ipython` environment.
Inside this function we patch `in_ipython` function to always return `True`.
This change will relax rules for writing Pydantic validators inside
Streamlit script a little bit, similar to how it works in jupyter,
which should not be critical.
|
def _fix_pydantic_duplicate_validators_error():
"""Pydantic by default disallows to reuse of validators with the same name,
this combined with the Streamlit execution model leads to an error on the second
Streamlit script rerun if the Pydantic validator is registered
in the streamlit script.
It is important to note that the same issue exists for Pydantic validators inside
Jupyter notebooks, https://github.com/pydantic/pydantic/issues/312 and in order
to fix that in Pydantic they use the `in_ipython` function that checks that
Pydantic runs not in `ipython` environment.
Inside this function we patch `in_ipython` function to always return `True`.
This change will relax rules for writing Pydantic validators inside
Streamlit script a little bit, similar to how it works in jupyter,
which should not be critical.
"""
try:
from pydantic import class_validators
class_validators.in_ipython = lambda: True # type: ignore[attr-defined]
except ImportError:
pass
|
Prints a warning if the static folder is misconfigured.
|
def _maybe_print_static_folder_warning(main_script_path: str) -> None:
"""Prints a warning if the static folder is misconfigured."""
if config.get_option("server.enableStaticServing"):
static_folder_path = file_util.get_app_static_dir(main_script_path)
if not os.path.isdir(static_folder_path):
cli_util.print_to_cli(
f"WARNING: Static file serving is enabled, but no static folder found "
f"at {static_folder_path}. To disable static file serving, "
f"set server.enableStaticServing to false.",
fg="yellow",
)
else:
# Raise warning when static folder size is larger than 1 GB
static_folder_size = file_util.get_directory_size(static_folder_path)
if static_folder_size > MAX_APP_STATIC_FOLDER_SIZE:
config.set_option("server.enableStaticServing", False)
cli_util.print_to_cli(
"WARNING: Static folder size is larger than 1GB. "
"Static file serving has been disabled.",
fg="yellow",
)
|
If our script is running in a Git repo, and we're running a very old
Git version, print a warning that Git integration will be unavailable.
|
def _maybe_print_old_git_warning(main_script_path: str) -> None:
"""If our script is running in a Git repo, and we're running a very old
Git version, print a warning that Git integration will be unavailable.
"""
repo = GitRepo(main_script_path)
if (
not repo.is_valid()
and repo.git_version is not None
and repo.git_version < MIN_GIT_VERSION
):
git_version_string = ".".join(str(val) for val in repo.git_version)
min_version_string = ".".join(str(val) for val in MIN_GIT_VERSION)
cli_util.print_to_cli("")
cli_util.print_to_cli(" Git integration is disabled.", fg="yellow", bold=True)
cli_util.print_to_cli("")
cli_util.print_to_cli(
f" Streamlit requires Git {min_version_string} or later, "
f"but you have {git_version_string}.",
fg="yellow",
)
cli_util.print_to_cli(
" Git is used by Streamlit Cloud (https://streamlit.io/cloud).",
fg="yellow",
)
cli_util.print_to_cli(
" To enable this feature, please update Git.", fg="yellow"
)
|
Load config options from config.toml files, then overlay the ones set by
flag_options.
The "streamlit run" command supports passing Streamlit's config options
as flags. This function reads through the config options set via flag,
massages them, and passes them to get_config_options() so that they
overwrite config option defaults and those loaded from config.toml files.
Parameters
----------
flag_options : dict[str, Any]
A dict of config options where the keys are the CLI flag version of the
config option names.
|
def load_config_options(flag_options: dict[str, Any]) -> None:
"""Load config options from config.toml files, then overlay the ones set by
flag_options.
The "streamlit run" command supports passing Streamlit's config options
as flags. This function reads through the config options set via flag,
massages them, and passes them to get_config_options() so that they
overwrite config option defaults and those loaded from config.toml files.
Parameters
----------
flag_options : dict[str, Any]
A dict of config options where the keys are the CLI flag version of the
config option names.
"""
options_from_flags = {
name.replace("_", "."): val
for name, val in flag_options.items()
if val is not None
}
# Force a reparse of config files (if they exist). The result is cached
# for future calls.
config.get_config_options(force_reparse=True, options_from_flags=options_from_flags)
|
Run a script in a separate thread and start a server for the app.
This starts a blocking asyncio eventloop.
|
def run(
main_script_path: str,
is_hello: bool,
args: list[str],
flag_options: dict[str, Any],
) -> None:
"""Run a script in a separate thread and start a server for the app.
This starts a blocking asyncio eventloop.
"""
_fix_sys_path(main_script_path)
_fix_tornado_crash()
_fix_sys_argv(main_script_path, args)
_fix_pydeck_mapbox_api_warning()
_fix_pydantic_duplicate_validators_error()
_install_config_watchers(flag_options)
_install_pages_watcher(main_script_path)
# Create the server. It won't start running yet.
server = Server(main_script_path, is_hello)
async def run_server() -> None:
# Start the server
await server.start()
_on_server_start(server)
# Install a signal handler that will shut down the server
# and close all our threads
_set_up_signal_handler(server)
# Wait until `Server.stop` is called, either by our signal handler, or
# by a debug websocket session.
await server.stopped
# Run the server. This function will not return until the server is shut down.
asyncio.run(run_server())
|
Get the cache storage manager.
It would be used both in server.py and in cli.py to have unified cache storage
Returns
-------
CacheStorageManager
The cache storage manager.
|
def create_default_cache_storage_manager() -> CacheStorageManager:
"""
Get the cache storage manager.
It would be used both in server.py and in cli.py to have unified cache storage
Returns
-------
CacheStorageManager
The cache storage manager.
"""
return LocalDiskCacheStorageManager()
|
Composes given config option options as options for click lib.
|
def _convert_config_option_to_click_option(
config_option: ConfigOption,
) -> dict[str, Any]:
"""Composes given config option options as options for click lib."""
option = f"--{config_option.key}"
param = config_option.key.replace(".", "_")
description = config_option.description
if config_option.deprecated:
if description is None:
description = ""
description += (
f"\n {config_option.deprecation_text} - {config_option.expiration_date}"
)
return {
"param": param,
"description": description,
"type": config_option.type,
"option": option,
"envvar": config_option.env_var,
}
|
Decorator that adds config param keys to click dynamically.
|
def configurator_options(func):
"""Decorator that adds config param keys to click dynamically."""
for _, value in reversed(_config._config_options_template.items()):
parsed_parameter = _convert_config_option_to_click_option(value)
if value.sensitive:
# Display a warning if the user tries to set sensitive
# options using the CLI and exit with non-zero code.
click_option_kwargs = {
"expose_value": False,
"hidden": True,
"is_eager": True,
"callback": _make_sensitive_option_callback(value),
}
else:
click_option_kwargs = {
"show_envvar": True,
"envvar": parsed_parameter["envvar"],
}
config_option = click.option(
parsed_parameter["option"],
parsed_parameter["param"],
help=parsed_parameter["description"],
type=parsed_parameter["type"],
**click_option_kwargs,
)
func = config_option(func)
return func
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.