language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def iterate_if_nnone(iterable: tp.Optional[tp.Iterable]) -> tp.Iterable:
"""
Return a generator iterating over every element of iterable if it's not None.
If it's None, return an empty generator
:param iterable: iterable to iterate over
:return: an empty generator if iterable is none, else an iterator over iterable
"""
if iterable is not None:
return iter(iterable)
else:
return iter(tuple()) | def iterate_if_nnone(iterable: tp.Optional[tp.Iterable]) -> tp.Iterable:
"""
Return a generator iterating over every element of iterable if it's not None.
If it's None, return an empty generator
:param iterable: iterable to iterate over
:return: an empty generator if iterable is none, else an iterator over iterable
"""
if iterable is not None:
return iter(iterable)
else:
return iter(tuple()) |
Python | def call_if_nnone(clbl: tp.Optional[tp.Callable[..., V]], *args, **kwargs) -> tp.Optional[V]:
"""
Call clbl with provided arguments, but only if clbl is not None.
If it's None, then None will be returned. Else, the result of this callable will be returned.
:param clbl: callable to call, or a None
:param args: positional arguments to provide to clbl
:param kwargs: keyword arguments to provide to clbl
:return: return value or None
"""
if clbl is not None:
return clbl(*args, **kwargs)
else:
return None | def call_if_nnone(clbl: tp.Optional[tp.Callable[..., V]], *args, **kwargs) -> tp.Optional[V]:
"""
Call clbl with provided arguments, but only if clbl is not None.
If it's None, then None will be returned. Else, the result of this callable will be returned.
:param clbl: callable to call, or a None
:param args: positional arguments to provide to clbl
:param kwargs: keyword arguments to provide to clbl
:return: return value or None
"""
if clbl is not None:
return clbl(*args, **kwargs)
else:
return None |
Python | def extract_optional(v: tp.Union[T, Optional[T]]) -> T:
"""
If v is an optional, extract the value that it wraps.
If it is not, return v
:param v: value to extract the value from
:return: resulting value
"""
if isinstance(v, Optional):
return getattr(v, '_Proxy__obj')
else:
return v | def extract_optional(v: tp.Union[T, Optional[T]]) -> T:
"""
If v is an optional, extract the value that it wraps.
If it is not, return v
:param v: value to extract the value from
:return: resulting value
"""
if isinstance(v, Optional):
return getattr(v, '_Proxy__obj')
else:
return v |
Python | def install(self) -> 'BaseExceptionHandler':
"""
Register this handler to run upon exceptions
"""
from .global_eh import GlobalExcepthook
GlobalExcepthook().add_hook(self)
return self | def install(self) -> 'BaseExceptionHandler':
"""
Register this handler to run upon exceptions
"""
from .global_eh import GlobalExcepthook
GlobalExcepthook().add_hook(self)
return self |
Python | def uninstall(self):
"""
Unregister this handler to run on exceptions
"""
from .global_eh import GlobalExcepthook
GlobalExcepthook().remove_hook(self) | def uninstall(self):
"""
Unregister this handler to run on exceptions
"""
from .global_eh import GlobalExcepthook
GlobalExcepthook().remove_hook(self) |
Python | def handle_exception(self, type_: tp.Callable[[type, BaseException, types.TracebackType], None],
value,
traceback: types.TracebackType) -> tp.Optional[bool]:
"""
Return True to intercept the exception, so that it won't be propagated to other handlers.
"""
pass | def handle_exception(self, type_: tp.Callable[[type, BaseException, types.TracebackType], None],
value,
traceback: types.TracebackType) -> tp.Optional[bool]:
"""
Return True to intercept the exception, so that it won't be propagated to other handlers.
"""
pass |
Python | def exception_handler(priority: int = NORMAL_PRIORITY):
"""
Convert a callable to an FunctionExceptionHandler. Usage
>>> @exception_handler(priority=-10)
>>> def handle_exc(type_, val, traceback):
>>> ...
You can use also:
>>> @exception_handler
>>> def handle_exc(type_, val, traceback):
>>> ...
The default priority is 0. But this way of calling it is not recommended, and will
result in a UserWarning.
:return: ExceptionHandler instance
"""
if not isinstance(priority, int):
warnings.warn('Please specify priority, using default of 0', UserWarning)
return FunctionExceptionHandler(priority, priority=NORMAL_PRIORITY)
def outer(fun: ExceptionHandlerCallable) -> FunctionExceptionHandler:
return FunctionExceptionHandler(fun, priority=priority)
return outer | def exception_handler(priority: int = NORMAL_PRIORITY):
"""
Convert a callable to an FunctionExceptionHandler. Usage
>>> @exception_handler(priority=-10)
>>> def handle_exc(type_, val, traceback):
>>> ...
You can use also:
>>> @exception_handler
>>> def handle_exc(type_, val, traceback):
>>> ...
The default priority is 0. But this way of calling it is not recommended, and will
result in a UserWarning.
:return: ExceptionHandler instance
"""
if not isinstance(priority, int):
warnings.warn('Please specify priority, using default of 0', UserWarning)
return FunctionExceptionHandler(priority, priority=NORMAL_PRIORITY)
def outer(fun: ExceptionHandlerCallable) -> FunctionExceptionHandler:
return FunctionExceptionHandler(fun, priority=priority)
return outer |
Python | def unpack_dict(dct: tp.Dict[K, V], *args: K,
map_through: tp.Callable[[V], V] = lambda y: y) -> tp.Iterator[V]:
"""
Unpack a dictionary by accessing it's given keys in parallel.
Example:
>>> a, b, c = unpack_dict({1:2, 2:3, 4:5}, 1, 2, 4)
>>> assert a == 2 and b == 3 and c == 5
:param dct: dictionary to unpack
:param args: keys in this dictionary
:param map_through: a keyword argument, callable that will be called with
each value returned and the result of this callable will be returned
:return: an iterator
:raises KeyError: a key was not found
"""
for key in args:
yield map_through(dct[key]) | def unpack_dict(dct: tp.Dict[K, V], *args: K,
map_through: tp.Callable[[V], V] = lambda y: y) -> tp.Iterator[V]:
"""
Unpack a dictionary by accessing it's given keys in parallel.
Example:
>>> a, b, c = unpack_dict({1:2, 2:3, 4:5}, 1, 2, 4)
>>> assert a == 2 and b == 3 and c == 5
:param dct: dictionary to unpack
:param args: keys in this dictionary
:param map_through: a keyword argument, callable that will be called with
each value returned and the result of this callable will be returned
:return: an iterator
:raises KeyError: a key was not found
"""
for key in args:
yield map_through(dct[key]) |
Python | def none_if_false(y: tp.Any) -> tp.Optional[tp.Any]:
"""
Return None if y is false, else return y
:param y: value to check
:return: None if y is false, else y
"""
if not y:
return None
return y | def none_if_false(y: tp.Any) -> tp.Optional[tp.Any]:
"""
Return None if y is false, else return y
:param y: value to check
:return: None if y is false, else y
"""
if not y:
return None
return y |
Python | def pad_to_multiple_of_length(seq: Appendable[T], multiple_of: int,
pad_with: tp.Optional[T] = None,
pad_with_factory: tp.Optional[NoArgCallable[T]] = None) -> \
Appendable[T]:
"""
Make sequence multiple of length, ie. append elements to the sequence
until it's length is a multiple of multiple_of.
:param seq: sequence to lengthify
:param multiple_of: sequence returned will be a multiple of this length.
:param pad_with: argument with which to pad the sequence
:param pad_with_factory: a callable/0 that returns an element with which to pad the sequence
:return: a list with elements
"""
if pad_with is not None and pad_with_factory is not None:
raise ValueError('You need to give either pad_with or pad_with_factory')
if pad_with_factory is None:
def pad_with_factory():
return pad_with
while len(seq) % multiple_of:
seq.append(pad_with_factory())
return seq | def pad_to_multiple_of_length(seq: Appendable[T], multiple_of: int,
pad_with: tp.Optional[T] = None,
pad_with_factory: tp.Optional[NoArgCallable[T]] = None) -> \
Appendable[T]:
"""
Make sequence multiple of length, ie. append elements to the sequence
until it's length is a multiple of multiple_of.
:param seq: sequence to lengthify
:param multiple_of: sequence returned will be a multiple of this length.
:param pad_with: argument with which to pad the sequence
:param pad_with_factory: a callable/0 that returns an element with which to pad the sequence
:return: a list with elements
"""
if pad_with is not None and pad_with_factory is not None:
raise ValueError('You need to give either pad_with or pad_with_factory')
if pad_with_factory is None:
def pad_with_factory():
return pad_with
while len(seq) % multiple_of:
seq.append(pad_with_factory())
return seq |
Python | def one_tuple(x: tp.Iterable[T]) -> tp.Iterator[tp.Tuple[T]]:
"""
Change a sequence of iterables into a sequence that displays each element as
a part of one-element tuple. Essentially syntactic sugar for:
>>> for z in x:
>>> yield z,
:param x: sequence to tupleify
:return: a iterator of one-element tuples
"""
for z in x:
yield z, | def one_tuple(x: tp.Iterable[T]) -> tp.Iterator[tp.Tuple[T]]:
"""
Change a sequence of iterables into a sequence that displays each element as
a part of one-element tuple. Essentially syntactic sugar for:
>>> for z in x:
>>> yield z,
:param x: sequence to tupleify
:return: a iterator of one-element tuples
"""
for z in x:
yield z, |
Python | def split_shuffle_and_join(entries: tp.List[T],
whether_to_shuffle: Predicate[T] = lambda x: True,
not_shuffled_to_front: bool = True) -> tp.List[T]:
"""
Split elements in entries into two groups - one group, called True, is the one for which
whether_to_shuffle(elem) is True, the other is False.
Shuffle the group True.
If not_shuffled_to_front, elements in the group False will go at the beginning of the returned
list, after which will go elements shuffled. If it's False, the not-shuffled elements will be
at the back of the list.
Order of the not shuffled elements will be preserved.
:param entries: list of elements
:param whether_to_shuffle: a decider to which group does given element belong?
:param not_shuffled_to_front: if True then not shuffled elements will be put before shuffled,
else the not shuffled elements will be at the back of the list
:return: list altered to specification
"""
not_shuffled, shuffled = [], []
for elem in entries:
(shuffled if whether_to_shuffle(elem) else not_shuffled).append(elem)
random.shuffle(shuffled)
if not_shuffled_to_front:
return not_shuffled + shuffled
else:
return shuffled + not_shuffled | def split_shuffle_and_join(entries: tp.List[T],
whether_to_shuffle: Predicate[T] = lambda x: True,
not_shuffled_to_front: bool = True) -> tp.List[T]:
"""
Split elements in entries into two groups - one group, called True, is the one for which
whether_to_shuffle(elem) is True, the other is False.
Shuffle the group True.
If not_shuffled_to_front, elements in the group False will go at the beginning of the returned
list, after which will go elements shuffled. If it's False, the not-shuffled elements will be
at the back of the list.
Order of the not shuffled elements will be preserved.
:param entries: list of elements
:param whether_to_shuffle: a decider to which group does given element belong?
:param not_shuffled_to_front: if True then not shuffled elements will be put before shuffled,
else the not shuffled elements will be at the back of the list
:return: list altered to specification
"""
not_shuffled, shuffled = [], []
for elem in entries:
(shuffled if whether_to_shuffle(elem) else not_shuffled).append(elem)
random.shuffle(shuffled)
if not_shuffled_to_front:
return not_shuffled + shuffled
else:
return shuffled + not_shuffled |
Python | def stringify(obj: tp.Union[tp.Any], stringifier: tp.Callable[[tp.Any], str] = str,
recursively: bool = False,
str_none: bool = False) -> tp.Union[tp.List[str], tp.Dict[str, str], str]:
"""
Stringify all object:
ie. if a dict, put every item and key (if a dict is given) through stringify.
if a list, put every item through stringify
else just call stringify on it.
Note that if you use recursively, then dicts and lists are allowed to be valid elements of the
returned representation!
Note that enums will be converted to their labels. eg:
>>> class Enum(enum.Enum):
>>> A = 0
>>> assert stringify(Enum.A) == 'A'
:param obj: a list or a dict
:param stringifier: function that accepts any arguments and returns a string representation
:param recursively: whether to recursively stringify elements, ie. stringify will be called on
all the children
:param str_none: whether to return None if given a None. If True, "None" will be returned
instead
:return: stringified object
"""
if isinstance(obj, str):
y = obj
elif isinstance(obj, enum.Enum):
y = obj.name
elif isinstance(obj, collections.abc.Mapping):
make_str = (lambda obj2: stringify(obj2, stringifier, True, str_none)) if recursively else \
stringifier
y = {make_str(k): make_str(v) for k, v in obj.items()}
elif isinstance(obj, collections.abc.Sequence):
make_str = (lambda obj2: stringify(obj2, stringifier, True, str_none)) if recursively else \
stringifier
y = [make_str(v) for v in obj]
elif obj is None:
y = _stringify_none(str_none, stringifier)
else:
y = stringifier(obj)
return y | def stringify(obj: tp.Union[tp.Any], stringifier: tp.Callable[[tp.Any], str] = str,
recursively: bool = False,
str_none: bool = False) -> tp.Union[tp.List[str], tp.Dict[str, str], str]:
"""
Stringify all object:
ie. if a dict, put every item and key (if a dict is given) through stringify.
if a list, put every item through stringify
else just call stringify on it.
Note that if you use recursively, then dicts and lists are allowed to be valid elements of the
returned representation!
Note that enums will be converted to their labels. eg:
>>> class Enum(enum.Enum):
>>> A = 0
>>> assert stringify(Enum.A) == 'A'
:param obj: a list or a dict
:param stringifier: function that accepts any arguments and returns a string representation
:param recursively: whether to recursively stringify elements, ie. stringify will be called on
all the children
:param str_none: whether to return None if given a None. If True, "None" will be returned
instead
:return: stringified object
"""
if isinstance(obj, str):
y = obj
elif isinstance(obj, enum.Enum):
y = obj.name
elif isinstance(obj, collections.abc.Mapping):
make_str = (lambda obj2: stringify(obj2, stringifier, True, str_none)) if recursively else \
stringifier
y = {make_str(k): make_str(v) for k, v in obj.items()}
elif isinstance(obj, collections.abc.Sequence):
make_str = (lambda obj2: stringify(obj2, stringifier, True, str_none)) if recursively else \
stringifier
y = [make_str(v) for v in obj]
elif obj is None:
y = _stringify_none(str_none, stringifier)
else:
y = stringifier(obj)
return y |
Python | def no_less_than(self, no_less_than: int) -> int:
"""
Issue an int, which is no less than a given value
:param no_less_than: value that the returned id will not be less than this
:return: an identifier, no less than no_less_than
"""
try:
if self.start > no_less_than:
return self.start
self.start += 1
else:
self.start = no_less_than
return self.start
finally:
self.start += 1 | def no_less_than(self, no_less_than: int) -> int:
"""
Issue an int, which is no less than a given value
:param no_less_than: value that the returned id will not be less than this
:return: an identifier, no less than no_less_than
"""
try:
if self.start > no_less_than:
return self.start
self.start += 1
else:
self.start = no_less_than
return self.start
finally:
self.start += 1 |
Python | def allocate_int(self) -> int:
"""
Return a previously unallocated int, and mark it as allocated
:return: an allocated int
:raises Empty: could not allocate an int due to top limit
"""
if not self.free_ints:
if self._extend_the_bound_to(self.bound + 10) == 0:
raise Empty('No integers remaining!')
x = self.free_ints.pop()
self.ints_allocated.add(x)
return x + self.start_at | def allocate_int(self) -> int:
"""
Return a previously unallocated int, and mark it as allocated
:return: an allocated int
:raises Empty: could not allocate an int due to top limit
"""
if not self.free_ints:
if self._extend_the_bound_to(self.bound + 10) == 0:
raise Empty('No integers remaining!')
x = self.free_ints.pop()
self.ints_allocated.add(x)
return x + self.start_at |
Python | def pop(self) -> T:
"""
Return next element from the stack
:return: a next element
"""
if not self.collection:
return next(self.iterable)
else:
return self.collection.pop() | def pop(self) -> T:
"""
Return next element from the stack
:return: a next element
"""
if not self.collection:
return next(self.iterable)
else:
return self.collection.pop() |
Python | def push(self, item: T) -> None:
"""
Push an item so that next pop will retrieve this item
:param item: item to push
"""
self.collection.append(item) | def push(self, item: T) -> None:
"""
Push an item so that next pop will retrieve this item
:param item: item to push
"""
self.collection.append(item) |
Python | def push_left(self, item):
"""
Push an item so that last pop from internal buffer will retrieve this item
:param item: item to push
"""
self.collection.appendleft(item) | def push_left(self, item):
"""
Push an item so that last pop from internal buffer will retrieve this item
:param item: item to push
"""
self.collection.appendleft(item) |
Python | def install_force_gc_collect(severity_level: int = 1) -> None:
"""
Install a default first severity level handler that forces a GC collection
:param severity_level: severity level on which to call
"""
MemoryPressureManager().register_on_entered_severity(severity_level)(gc.collect) | def install_force_gc_collect(severity_level: int = 1) -> None:
"""
Install a default first severity level handler that forces a GC collection
:param severity_level: severity level on which to call
"""
MemoryPressureManager().register_on_entered_severity(severity_level)(gc.collect) |
Python | def has_keys(self, *keys) -> Predicate:
"""
Return a predicate checking whether this value has provided keys
"""
return make_operation_two_args(_has_keys)(self, keys) | def has_keys(self, *keys) -> Predicate:
"""
Return a predicate checking whether this value has provided keys
"""
return make_operation_two_args(_has_keys)(self, keys) |
Python | def one_of(self, *values) -> Predicate:
"""
Return a predicate checking if x is amongst values
"""
return make_operation_two_args(_one_of)(self, values) | def one_of(self, *values) -> Predicate:
"""
Return a predicate checking if x is amongst values
"""
return make_operation_two_args(_one_of)(self, values) |
Python | def type(self) -> tp.Type:
"""
Return a predicate returning the type of it's argument
"""
def type_of(v):
return type(self.operation(v))
return PredicateClass(type_of) | def type(self) -> tp.Type:
"""
Return a predicate returning the type of it's argument
"""
def type_of(v):
return type(self.operation(v))
return PredicateClass(type_of) |
Python | def is_instance(self, *args):
"""
Check if given value is one of instances.
:param args: will be passed as argument to isinstance
"""
def is_instance(v):
return isinstance(self.operation(v), args)
return PredicateClass(is_instance) | def is_instance(self, *args):
"""
Check if given value is one of instances.
:param args: will be passed as argument to isinstance
"""
def is_instance(v):
return isinstance(self.operation(v), args)
return PredicateClass(is_instance) |
Python | def identity(self) -> Predicate:
"""
Spawn another object with the same operation, but different identity.
Used for constructing dicts keyed by predicates.
"""
return PredicateClass(self.operation) | def identity(self) -> Predicate:
"""
Spawn another object with the same operation, but different identity.
Used for constructing dicts keyed by predicates.
"""
return PredicateClass(self.operation) |
Python | def is_valid_schema(self, schema: tp.Optional[tp.Union[Descriptor, tp.Dict]] = None, **kwargs):
"""
Check if given value has the correct schema.
The schema is the same as in
:py:meth:`~satella.coding.structures.DictObject.is_valid_schema`
"""
def is_schema_correct(v):
from satella.coding.structures import DictObject
return DictObject(self.operation(v)).is_valid_schema(schema, **kwargs)
return PredicateClass(is_schema_correct) | def is_valid_schema(self, schema: tp.Optional[tp.Union[Descriptor, tp.Dict]] = None, **kwargs):
"""
Check if given value has the correct schema.
The schema is the same as in
:py:meth:`~satella.coding.structures.DictObject.is_valid_schema`
"""
def is_schema_correct(v):
from satella.coding.structures import DictObject
return DictObject(self.operation(v)).is_valid_schema(schema, **kwargs)
return PredicateClass(is_schema_correct) |
Python | def has(self, predicate: 'PredicateClass') -> Predicate:
"""
Check if any element of the current value (which must be an iterable)
returns True when applied to predicate
:param predicate: predicate that has to return True for at least one of this predicate's
values
"""
def op(v):
for e in self.operation(v):
if predicate(e):
return True
return False
return PredicateClass(op) | def has(self, predicate: 'PredicateClass') -> Predicate:
"""
Check if any element of the current value (which must be an iterable)
returns True when applied to predicate
:param predicate: predicate that has to return True for at least one of this predicate's
values
"""
def op(v):
for e in self.operation(v):
if predicate(e):
return True
return False
return PredicateClass(op) |
Python | def build_structure(struct: tp.Union[tuple, list, dict],
argument,
final_operator=lambda y: y,
nested_call=False) -> tp.Union[tuple, list, dict]:
"""
Given a structure (tuple, list, dict) that contains x's as some of the elements,
build such a structure corresponding to given that all x's are replaced
by result of their calculation on argument.
Just note that if you're constructing dictionaries, use the .identity() method of predicate,
to randomize it's identity.
:param struct: structure to build
:param argument: argument
:param final_operator: an operator to call on the result
:param nested_call: internal, don't use
:return: analogous structure
"""
if not nested_call:
v = build_structure(struct, argument, None, True)
f = final_operator(v)
else:
from satella.coding.structures import HashableWrapper
if isinstance(struct, tp.MutableMapping):
new_dict = {}
for key, value in struct.items():
key = build_structure(key, argument, None, True)
value = build_structure(value, argument, None, True)
new_dict[key] = value
f = new_dict
break
elif isinstance(struct, tp.Sequence):
new_seq = []
for elem in struct:
if isinstance(elem, PredicateClass):
elem = elem(argument)
else:
elem = build_structure(elem, argument, None, True)
new_seq.append(elem)
f = struct.__class__(new_seq)
elif isinstance(struct, HashableWrapper):
obj = getattr(struct, '_Proxy__obj')
f = build_structure(obj, argument, None, True)
elif isinstance(struct, PredicateClass):
f = struct(argument)
else:
f = struct
return f | def build_structure(struct: tp.Union[tuple, list, dict],
argument,
final_operator=lambda y: y,
nested_call=False) -> tp.Union[tuple, list, dict]:
"""
Given a structure (tuple, list, dict) that contains x's as some of the elements,
build such a structure corresponding to given that all x's are replaced
by result of their calculation on argument.
Just note that if you're constructing dictionaries, use the .identity() method of predicate,
to randomize it's identity.
:param struct: structure to build
:param argument: argument
:param final_operator: an operator to call on the result
:param nested_call: internal, don't use
:return: analogous structure
"""
if not nested_call:
v = build_structure(struct, argument, None, True)
f = final_operator(v)
else:
from satella.coding.structures import HashableWrapper
if isinstance(struct, tp.MutableMapping):
new_dict = {}
for key, value in struct.items():
key = build_structure(key, argument, None, True)
value = build_structure(value, argument, None, True)
new_dict[key] = value
f = new_dict
break
elif isinstance(struct, tp.Sequence):
new_seq = []
for elem in struct:
if isinstance(elem, PredicateClass):
elem = elem(argument)
else:
elem = build_structure(elem, argument, None, True)
new_seq.append(elem)
f = struct.__class__(new_seq)
elif isinstance(struct, HashableWrapper):
obj = getattr(struct, '_Proxy__obj')
f = build_structure(obj, argument, None, True)
elif isinstance(struct, PredicateClass):
f = struct(argument)
else:
f = struct
return f |
Python | def trace_function(tracer, name: str, tags: tp.Optional[dict] = None,
tags_factory: tp.Optional[tp.Union[
tp.Dict[str, tp.Callable], tp.List[tp.Tuple[str, tp.Callable]]]] = None):
"""
Return a decorator that will trace the execution of a given function
using tracer.start_active_span.
Can optionally construct it's tags from a predicate building, example:
>>> class Device:
>>> device_id = 'test'
>>> @trace_function(tracer, 'Processing', tags_factory=[('device_id', x[0].device_id)])
>>> def process(device: Device):
>>> ...
:param tracer: tracer to use
:param name: Name of the trace
:param tags: optional tags to use
:param tags_factory: a list of tuple (tag name, callable that is called with *args
and **kwargs passed to this function as a sole argument). Extra tags will be generated
from this. Can be also a dict.
"""
if isinstance(tags_factory, dict):
tags_factory = list(tags_factory.items())
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
nonlocal tags, tags_factory
my_tags = tags
if tags_factory is not None:
if tags is None:
tags = {}
my_tags = copy.copy(tags)
for key, value in tags_factory:
try:
v = value(*args, **kwargs)
except TypeError:
warnings.warn('You are using the deprecated single-parameter version '
'of tags_factory. Please upgrade to the newer one.',
DeprecationWarning)
v = value(args)
my_tags[key] = v
with tracer.start_active_span(name, tags=my_tags):
return fun(*args, **kwargs)
return inner
return outer | def trace_function(tracer, name: str, tags: tp.Optional[dict] = None,
tags_factory: tp.Optional[tp.Union[
tp.Dict[str, tp.Callable], tp.List[tp.Tuple[str, tp.Callable]]]] = None):
"""
Return a decorator that will trace the execution of a given function
using tracer.start_active_span.
Can optionally construct it's tags from a predicate building, example:
>>> class Device:
>>> device_id = 'test'
>>> @trace_function(tracer, 'Processing', tags_factory=[('device_id', x[0].device_id)])
>>> def process(device: Device):
>>> ...
:param tracer: tracer to use
:param name: Name of the trace
:param tags: optional tags to use
:param tags_factory: a list of tuple (tag name, callable that is called with *args
and **kwargs passed to this function as a sole argument). Extra tags will be generated
from this. Can be also a dict.
"""
if isinstance(tags_factory, dict):
tags_factory = list(tags_factory.items())
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
nonlocal tags, tags_factory
my_tags = tags
if tags_factory is not None:
if tags is None:
tags = {}
my_tags = copy.copy(tags)
for key, value in tags_factory:
try:
v = value(*args, **kwargs)
except TypeError:
warnings.warn('You are using the deprecated single-parameter version '
'of tags_factory. Please upgrade to the newer one.',
DeprecationWarning)
v = value(args)
my_tags[key] = v
with tracer.start_active_span(name, tags=my_tags):
return fun(*args, **kwargs)
return inner
return outer |
Python | def trace_future(future: tp.Union[ResponseFuture, Future], span: Span):
"""
Install a handler that will close a span upon a future completing,
attaching the exception contents if the future ends with an exception.
:param future: can be either a normal Future or a Cassandra's ResponseFuture
:param span: span to close on future's completion
"""
if isinstance(future, ResponseFuture):
warnings.warn('Tracing Cassandra futures is deprecated. Use wrap_future() to '
'convert it to a standard Python future. This feature will be '
'deprecated in Satella 3.x', DeprecationWarning)
future = wrap_future(future)
def close_future(fut):
exc = fut.exception()
if exc is not None:
# noinspection PyProtectedMember
exc_type, value, traceback = sys.exc_info()
Span._on_error(span, exc_type, value, traceback)
span.finish()
future.add_done_callback(close_future) | def trace_future(future: tp.Union[ResponseFuture, Future], span: Span):
"""
Install a handler that will close a span upon a future completing,
attaching the exception contents if the future ends with an exception.
:param future: can be either a normal Future or a Cassandra's ResponseFuture
:param span: span to close on future's completion
"""
if isinstance(future, ResponseFuture):
warnings.warn('Tracing Cassandra futures is deprecated. Use wrap_future() to '
'convert it to a standard Python future. This feature will be '
'deprecated in Satella 3.x', DeprecationWarning)
future = wrap_future(future)
def close_future(fut):
exc = fut.exception()
if exc is not None:
# noinspection PyProtectedMember
exc_type, value, traceback = sys.exc_info()
Span._on_error(span, exc_type, value, traceback)
span.finish()
future.add_done_callback(close_future) |
Python | def typednamedtuple(cls_name: str, *arg_name_type: type) -> tp.Type[tp.Tuple]:
"""
Returns a new subclass of tuple with named fields.
Fields will be coerced to type passed in the pair, if they are not already
of given type.
Parameters are tuples of (field name, class/constructor as callable/1)
For example:
>>> tnt = typednamedtuple('tnt', ('x', float), ('y', float))
>>> a = tnt('5.0', y=2)
a.x is float, a.y is float too
"""
fieldnames = []
typeops = []
mapping = {}
for name, type_ in arg_name_type:
fieldnames.append(name)
typeops.append(type_)
mapping[name] = type_
MyCls = namedtuple(cls_name, fieldnames)
class Wrapper(MyCls):
__doc__ = MyCls.__doc__
__name__ = MyCls.__name__
def __new__(cls, *args, **kwargs):
nargs = list(map(_adjust, zip(args, typeops[:len(args)])))
for next_field_name in fieldnames[len(nargs):]:
try:
nargs.append(_adjust((kwargs.pop(next_field_name), mapping[next_field_name])))
except KeyError:
raise TypeError('Field %s not given', next_field_name)
if len(kwargs) > 0:
raise TypeError('Too many parameters')
return MyCls.__new__(MyCls, *nargs)
return Wrapper | def typednamedtuple(cls_name: str, *arg_name_type: type) -> tp.Type[tp.Tuple]:
"""
Returns a new subclass of tuple with named fields.
Fields will be coerced to type passed in the pair, if they are not already
of given type.
Parameters are tuples of (field name, class/constructor as callable/1)
For example:
>>> tnt = typednamedtuple('tnt', ('x', float), ('y', float))
>>> a = tnt('5.0', y=2)
a.x is float, a.y is float too
"""
fieldnames = []
typeops = []
mapping = {}
for name, type_ in arg_name_type:
fieldnames.append(name)
typeops.append(type_)
mapping[name] = type_
MyCls = namedtuple(cls_name, fieldnames)
class Wrapper(MyCls):
__doc__ = MyCls.__doc__
__name__ = MyCls.__name__
def __new__(cls, *args, **kwargs):
nargs = list(map(_adjust, zip(args, typeops[:len(args)])))
for next_field_name in fieldnames[len(nargs):]:
try:
nargs.append(_adjust((kwargs.pop(next_field_name), mapping[next_field_name])))
except KeyError:
raise TypeError('Field %s not given', next_field_name)
if len(kwargs) > 0:
raise TypeError('Too many parameters')
return MyCls.__new__(MyCls, *nargs)
return Wrapper |
Python | def pop_item(self, item: T) -> T:
"""
Pop an item off the heap, maintaining the heap invariant
:raise ValueError: element not found
"""
self.data.remove(item) # raises: ValueError
heapq.heapify(self.data)
return item | def pop_item(self, item: T) -> T:
"""
Pop an item off the heap, maintaining the heap invariant
:raise ValueError: element not found
"""
self.data.remove(item) # raises: ValueError
heapq.heapify(self.data)
return item |
Python | def pop(self) -> T:
"""
Return smallest element of the heap.
:raises IndexError: on empty heap
"""
return heapq.heappop(self.data) | def pop(self) -> T:
"""
Return smallest element of the heap.
:raises IndexError: on empty heap
"""
return heapq.heappop(self.data) |
Python | def filter_map(self, filter_fun: tp.Optional[Predicate[T]] = None,
map_fun: tp.Optional[tp.Callable[[T], tp.Any]] = None):
"""
Get only items that return True when condition(item) is True. Apply a
transform: item' = item(condition) on
the rest. Maintain heap invariant.
"""
heap = filter(filter_fun, self.data) if filter_fun else self.data
heap = map(map_fun, heap) if map_fun else heap
heap = list(heap) if not isinstance(heap, list) else heap
self.data = heap
heapq.heapify(self.data) | def filter_map(self, filter_fun: tp.Optional[Predicate[T]] = None,
map_fun: tp.Optional[tp.Callable[[T], tp.Any]] = None):
"""
Get only items that return True when condition(item) is True. Apply a
transform: item' = item(condition) on
the rest. Maintain heap invariant.
"""
heap = filter(filter_fun, self.data) if filter_fun else self.data
heap = map(map_fun, heap) if map_fun else heap
heap = list(heap) if not isinstance(heap, list) else heap
self.data = heap
heapq.heapify(self.data) |
Python | def iter_ascending(self) -> tp.Iterable[T]:
"""
Return an iterator returning all elements in this heap sorted ascending.
State of the heap is not changed
"""
heap = copy.copy(self.data)
while heap:
yield heapq.heappop(heap) | def iter_ascending(self) -> tp.Iterable[T]:
"""
Return an iterator returning all elements in this heap sorted ascending.
State of the heap is not changed
"""
heap = copy.copy(self.data)
while heap:
yield heapq.heappop(heap) |
Python | def iter_descending(self) -> tp.Iterable[T]:
"""
Return an iterator returning all elements in this heap sorted descending.
State of the heap is not changed.
This loads all elements of the heap into memory at once, so be careful.
"""
return reversed(list(self.iter_ascending())) | def iter_descending(self) -> tp.Iterable[T]:
"""
Return an iterator returning all elements in this heap sorted descending.
State of the heap is not changed.
This loads all elements of the heap into memory at once, so be careful.
"""
return reversed(list(self.iter_ascending())) |
Python | def trace_exception(span: tp.Optional[Span], exc_type: tp.Optional[ExceptionClassType] = None,
exc_val: tp.Optional[Exception] = None,
exc_tb: tp.Optional[types.TracebackType] = None) -> None:
"""
Log an exception's information to the chosen span, as logs and tags.
:param span: span to log into or None for a no-op
:param exc_type: exception type. If None this will be taken from sys.exc_info.
:param exc_val: exception value. If None this will be taken from sys.exc_info.
:param exc_tb: exception traceback. If None this will be taken from sys.exc_info.
"""
if span is None:
return
if exc_type is None:
exc_type, exc_val, exc_tb = sys.exc_info()
if exc_type is None:
return
span.set_tag('error', True)
span.log_kv({'event': 'error',
'message': str(exc_val),
'error.object': exc_val,
'error.kind': exc_type,
'stack': Traceback(exc_tb.tb_frame).pretty_format()
}) | def trace_exception(span: tp.Optional[Span], exc_type: tp.Optional[ExceptionClassType] = None,
exc_val: tp.Optional[Exception] = None,
exc_tb: tp.Optional[types.TracebackType] = None) -> None:
"""
Log an exception's information to the chosen span, as logs and tags.
:param span: span to log into or None for a no-op
:param exc_type: exception type. If None this will be taken from sys.exc_info.
:param exc_val: exception value. If None this will be taken from sys.exc_info.
:param exc_tb: exception traceback. If None this will be taken from sys.exc_info.
"""
if span is None:
return
if exc_type is None:
exc_type, exc_val, exc_tb = sys.exc_info()
if exc_type is None:
return
span.set_tag('error', True)
span.log_kv({'event': 'error',
'message': str(exc_val),
'error.object': exc_val,
'error.kind': exc_type,
'stack': Traceback(exc_tb.tb_frame).pretty_format()
}) |
Python | def CopyDocsFrom(target_cls: tp.Type):
"""
A metaclass to copy documentation from some other class for respective methods.
>>> class Source:
>>> def test(self):
>>> 'docstring'
>>> class Target(metaclass=CopyDocsFrom(Source)):
>>> def test(self):
>>> ...
>>> assert Target.test.__doc__ == Source.test.__doc__
:param target_cls: class from which to copy the docs
"""
def inner(name, bases, dictionary):
if '__doc__' not in dictionary:
if hasattr(target_cls, '__doc__'):
if target_cls.__doc__:
dictionary['__doc__'] = target_cls.__doc__
for key, value in dictionary.items():
if not value.__doc__ and callable(value):
if hasattr(target_cls, key):
if getattr(target_cls, key).__doc__:
value.__doc__ = getattr(target_cls, key).__doc__
dictionary[key] = value
break
return type(name, bases, dictionary)
return inner | def CopyDocsFrom(target_cls: tp.Type):
"""
A metaclass to copy documentation from some other class for respective methods.
>>> class Source:
>>> def test(self):
>>> 'docstring'
>>> class Target(metaclass=CopyDocsFrom(Source)):
>>> def test(self):
>>> ...
>>> assert Target.test.__doc__ == Source.test.__doc__
:param target_cls: class from which to copy the docs
"""
def inner(name, bases, dictionary):
if '__doc__' not in dictionary:
if hasattr(target_cls, '__doc__'):
if target_cls.__doc__:
dictionary['__doc__'] = target_cls.__doc__
for key, value in dictionary.items():
if not value.__doc__ and callable(value):
if hasattr(target_cls, key):
if getattr(target_cls, key).__doc__:
value.__doc__ = getattr(target_cls, key).__doc__
dictionary[key] = value
break
return type(name, bases, dictionary)
return inner |
Python | def DocsFromParent(name: str, bases: tp.Tuple[type], dictionary: dict) -> tp.Type:
"""
A metaclass that fetches missing docstring's for methods from the classes' bases,
looked up BFS. This will fetch the class's docstring itself, if available and not
present in the child.
>>> class Father:
>>> def test(self):
>>> '''my docstring'''
>>> class Child(Father, metaclass=DocsFromParent):
>>> def test(self):
>>> ...
>>> assert Child.test.__doc__ == 'my docstring'
"""
if '__doc__' not in dictionary:
for base in walk(bases, _extract_bases, deep_first=False):
if hasattr(base, '__doc__'):
if base.__doc__:
dictionary['__doc__'] = base.__doc__
break
for key, value in dictionary.items():
if not value.__doc__ and callable(value):
for base in walk(bases, _extract_bases, deep_first=False):
if hasattr(base, key):
if getattr(base, key).__doc__:
value.__doc__ = getattr(base, key).__doc__
dictionary[key] = value
break
return type(name, bases, dictionary) | def DocsFromParent(name: str, bases: tp.Tuple[type], dictionary: dict) -> tp.Type:
"""
A metaclass that fetches missing docstring's for methods from the classes' bases,
looked up BFS. This will fetch the class's docstring itself, if available and not
present in the child.
>>> class Father:
>>> def test(self):
>>> '''my docstring'''
>>> class Child(Father, metaclass=DocsFromParent):
>>> def test(self):
>>> ...
>>> assert Child.test.__doc__ == 'my docstring'
"""
if '__doc__' not in dictionary:
for base in walk(bases, _extract_bases, deep_first=False):
if hasattr(base, '__doc__'):
if base.__doc__:
dictionary['__doc__'] = base.__doc__
break
for key, value in dictionary.items():
if not value.__doc__ and callable(value):
for base in walk(bases, _extract_bases, deep_first=False):
if hasattr(base, key):
if getattr(base, key).__doc__:
value.__doc__ = getattr(base, key).__doc__
dictionary[key] = value
break
return type(name, bases, dictionary) |
Python | def skip_redundant(iterable, skip_set=None):
"""Redundant items are repeated items or items in the original skip_set."""
if skip_set is None:
skip_set = set()
for item in iterable:
if item not in skip_set:
skip_set.add(item)
yield item | def skip_redundant(iterable, skip_set=None):
"""Redundant items are repeated items or items in the original skip_set."""
if skip_set is None:
skip_set = set()
for item in iterable:
if item not in skip_set:
skip_set.add(item)
yield item |
Python | def metaclass_maker(name: str, bases: tuple, a_dict: dict) -> tp.Type:
"""
Automatically construct a compatible meta-class like interface. Use like:
>>> class C(A, B, metaclass=metaclass_maker):
>>> pass
"""
metaclass = get_noconflict_metaclass(bases, (), ())
return metaclass(name, bases, a_dict) | def metaclass_maker(name: str, bases: tuple, a_dict: dict) -> tp.Type:
"""
Automatically construct a compatible meta-class like interface. Use like:
>>> class C(A, B, metaclass=metaclass_maker):
>>> pass
"""
metaclass = get_noconflict_metaclass(bases, (), ())
return metaclass(name, bases, a_dict) |
Python | def wrap_property(getter: tp.Callable[[GetterDefinition], GetterDefinition] = lambda x: x,
setter: tp.Callable[[SetterDefinition], SetterDefinition] = lambda x: x,
deleter: tp.Callable[[DeleterDefinition], DeleterDefinition] = lambda x: x):
"""
Construct a property wrapper.
This will return a function, that if given a property, will wrap it's getter, setter and
deleter with provided functions.
Getter, setter and deleter are extracted from fget, fset and fdel, so only native properties,
please, not descriptor-objects.
:param getter: callable that accepts a callable(instance) -> value, and returns the same.
Getter will be wrapped by this
:param setter: callable that accepts a callable(instance, value) and returns the same.
Setter will be wrapped by this
:param deleter: callable that accepts a callable(instance), and returns the same.
Deleter will be wrapped by this
"""
def inner(prop):
return wraps(prop)(property(getter(prop.fget), setter(prop.fset), deleter(prop.fdel)))
return inner | def wrap_property(getter: tp.Callable[[GetterDefinition], GetterDefinition] = lambda x: x,
setter: tp.Callable[[SetterDefinition], SetterDefinition] = lambda x: x,
deleter: tp.Callable[[DeleterDefinition], DeleterDefinition] = lambda x: x):
"""
Construct a property wrapper.
This will return a function, that if given a property, will wrap it's getter, setter and
deleter with provided functions.
Getter, setter and deleter are extracted from fget, fset and fdel, so only native properties,
please, not descriptor-objects.
:param getter: callable that accepts a callable(instance) -> value, and returns the same.
Getter will be wrapped by this
:param setter: callable that accepts a callable(instance, value) and returns the same.
Setter will be wrapped by this
:param deleter: callable that accepts a callable(instance), and returns the same.
Deleter will be wrapped by this
"""
def inner(prop):
return wraps(prop)(property(getter(prop.fget), setter(prop.fset), deleter(prop.fdel)))
return inner |
Python | def wrap_with(callables: tp.Callable[[tp.Callable], tp.Callable] = lambda x: x,
properties: tp.Callable[[property], property] = lambda x: x,
selector_callables: Predicate[tp.Callable] = lambda clb: True,
selector_properties: Predicate[property] = lambda clb: True):
"""
A metaclass that wraps all elements discovered in this class with something
Example:
>>> def make_double(fun):
>>> return lambda self, x: fun(x)*2
>>> class Doubles(metaclass=wrap_all_methods_with(make_double)):
>>> def return_four(self, x):
>>> return 2
>>> assert Doubles().return_four(4) == 4
Note that every callable that appears in the class namespace, ie. object that has __call__
will be considered for wrapping.
This is compatible with the abc.ABCMeta metaclass
:param callables: function to wrap all callables with given class with
:param properties: function to wrap all properties with given class with
:param selector_callables: additional criterion to be ran on given callable before deciding
to wrap it. It must return True for wrapping to proceed.
:param selector_properties: additional criterion to be ran on given property before deciding
to wrap it. It must return True for wrapping to proceed.
"""
@wraps(ABCMeta)
def WrapAllMethodsWithMetaclass(name, bases, dct):
new_dct = {}
for key, value in dct.items():
if not hasattr(value, '_dont_wrap'):
if callable(value) and selector_callables(value):
value = callables(value)
elif isinstance(value, property) and selector_properties(value):
value = properties(value)
new_dct[key] = value
return ABCMeta(name, bases, new_dct)
return WrapAllMethodsWithMetaclass | def wrap_with(callables: tp.Callable[[tp.Callable], tp.Callable] = lambda x: x,
properties: tp.Callable[[property], property] = lambda x: x,
selector_callables: Predicate[tp.Callable] = lambda clb: True,
selector_properties: Predicate[property] = lambda clb: True):
"""
A metaclass that wraps all elements discovered in this class with something
Example:
>>> def make_double(fun):
>>> return lambda self, x: fun(x)*2
>>> class Doubles(metaclass=wrap_all_methods_with(make_double)):
>>> def return_four(self, x):
>>> return 2
>>> assert Doubles().return_four(4) == 4
Note that every callable that appears in the class namespace, ie. object that has __call__
will be considered for wrapping.
This is compatible with the abc.ABCMeta metaclass
:param callables: function to wrap all callables with given class with
:param properties: function to wrap all properties with given class with
:param selector_callables: additional criterion to be ran on given callable before deciding
to wrap it. It must return True for wrapping to proceed.
:param selector_properties: additional criterion to be ran on given property before deciding
to wrap it. It must return True for wrapping to proceed.
"""
@wraps(ABCMeta)
def WrapAllMethodsWithMetaclass(name, bases, dct):
new_dct = {}
for key, value in dct.items():
if not hasattr(value, '_dont_wrap'):
if callable(value) and selector_callables(value):
value = callables(value)
elif isinstance(value, property) and selector_properties(value):
value = properties(value)
new_dct[key] = value
return ABCMeta(name, bases, new_dct)
return WrapAllMethodsWithMetaclass |
Python | def read_nowait(process: subprocess.Popen, output_list: tp.List[str]):
"""
This spawns a thread to read given process' stdout and append it to a list, in
order to prevent buffer filling up completely.
To retrieve entire stdout after process finishes do
>>> ''.join(list)
This thread will terminate automatically after the process closes it's stdout or finishes.
"""
while True:
with silence_excs(subprocess.TimeoutExpired):
process.wait(timeout=0.1)
line = process.stdout.read(2048)
if line:
output_list.append(line)
else:
break | def read_nowait(process: subprocess.Popen, output_list: tp.List[str]):
"""
This spawns a thread to read given process' stdout and append it to a list, in
order to prevent buffer filling up completely.
To retrieve entire stdout after process finishes do
>>> ''.join(list)
This thread will terminate automatically after the process closes it's stdout or finishes.
"""
while True:
with silence_excs(subprocess.TimeoutExpired):
process.wait(timeout=0.1)
line = process.stdout.read(2048)
if line:
output_list.append(line)
else:
break |
Python | def call_and_return_stdout(args: tp.Union[str, tp.List[str]],
timeout: tp.Optional[tp.Union[str, int]] = None,
encoding: tp.Optional[str] = None,
expected_return_code: tp.Optional[int] = None,
**kwargs) -> tp.Union[bytes, str]:
"""
Call a process and return it's stdout.
Everything in kwargs will be passed to subprocess.Popen
A bytes object will be returned if encoding is not defined, else stdout will be decoded
according to specified encoding.
.. deprecated:: Use :code:`subprocess.check_output` instead.
:param args: arguments to run the program with. Can be either a string or a list of strings.
:param timeout: amount of seconds to wait for the process result. If process does not complete
within this time, it will be sent a SIGKILL. Can be also a time string. If left at default,
ie. None, timeout won't be considered at all.
:param encoding: encoding with which to decode stdout. If none is passed, it will be returned as
a bytes object
:param expected_return_code: an expected return code of this process. 0 is the default. If
process returns anything else, ProcessFailed will be raise. If left default (None) return
code won't be checked at all
:raises ProcessFailed: process' result code was different from the requested
:raises TimeoutError: timeout was specified and the process didn't complete
"""
warnings.warn('This is deprecated, use subprocess.check_output instead', DeprecationWarning)
kwargs['stdout'] = subprocess.PIPE
stdout_list = []
proc = subprocess.Popen(args, **kwargs)
fut = read_nowait(proc, stdout_list)
if timeout is not None:
timeout = parse_time_string(timeout)
try:
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
proc.kill()
proc.wait()
raise TimeoutError('Process did not complete within %s seconds' % (timeout,))
finally:
fut.result()
if encoding is None:
result = b''.join(stdout_list)
else:
result = ''.join((row.decode(encoding) for row in stdout_list))
if expected_return_code is not None:
if proc.returncode != expected_return_code:
raise ProcessFailed(proc.returncode, result)
return result | def call_and_return_stdout(args: tp.Union[str, tp.List[str]],
timeout: tp.Optional[tp.Union[str, int]] = None,
encoding: tp.Optional[str] = None,
expected_return_code: tp.Optional[int] = None,
**kwargs) -> tp.Union[bytes, str]:
"""
Call a process and return it's stdout.
Everything in kwargs will be passed to subprocess.Popen
A bytes object will be returned if encoding is not defined, else stdout will be decoded
according to specified encoding.
.. deprecated:: Use :code:`subprocess.check_output` instead.
:param args: arguments to run the program with. Can be either a string or a list of strings.
:param timeout: amount of seconds to wait for the process result. If process does not complete
within this time, it will be sent a SIGKILL. Can be also a time string. If left at default,
ie. None, timeout won't be considered at all.
:param encoding: encoding with which to decode stdout. If none is passed, it will be returned as
a bytes object
:param expected_return_code: an expected return code of this process. 0 is the default. If
process returns anything else, ProcessFailed will be raise. If left default (None) return
code won't be checked at all
:raises ProcessFailed: process' result code was different from the requested
:raises TimeoutError: timeout was specified and the process didn't complete
"""
warnings.warn('This is deprecated, use subprocess.check_output instead', DeprecationWarning)
kwargs['stdout'] = subprocess.PIPE
stdout_list = []
proc = subprocess.Popen(args, **kwargs)
fut = read_nowait(proc, stdout_list)
if timeout is not None:
timeout = parse_time_string(timeout)
try:
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
proc.kill()
proc.wait()
raise TimeoutError('Process did not complete within %s seconds' % (timeout,))
finally:
fut.result()
if encoding is None:
result = b''.join(stdout_list)
else:
result = ''.join((row.decode(encoding) for row in stdout_list))
if expected_return_code is not None:
if proc.returncode != expected_return_code:
raise ProcessFailed(proc.returncode, result)
return result |
Python | def dump_frames_on(sig_no: tp.Optional[SIG_TYPE] = None,
stack_frame: tp.Optional[types.FrameType] = None,
output: tp.TextIO = sys.stderr):
"""
Dump all stack frames of all threads including the values of all the local variables.
:param sig_no: signal received. Default is None.
:param stack_frame: Stack frame. Default is None.
:param output: output to print to. Default is stderr
:return:
"""
from satella.instrumentation import Traceback
output.write("Stack frame dump requested in response to signal %s\n" % (sig_no,))
# noinspection PyProtectedMember
for frame_no, frame in sys._current_frames().items():
output.write("For stack frame %s" % (frame_no,))
tb = Traceback(frame)
tb.pretty_print(output=output)
output.write("End of stack frame dump\n") | def dump_frames_on(sig_no: tp.Optional[SIG_TYPE] = None,
stack_frame: tp.Optional[types.FrameType] = None,
output: tp.TextIO = sys.stderr):
"""
Dump all stack frames of all threads including the values of all the local variables.
:param sig_no: signal received. Default is None.
:param stack_frame: Stack frame. Default is None.
:param output: output to print to. Default is stderr
:return:
"""
from satella.instrumentation import Traceback
output.write("Stack frame dump requested in response to signal %s\n" % (sig_no,))
# noinspection PyProtectedMember
for frame_no, frame in sys._current_frames().items():
output.write("For stack frame %s" % (frame_no,))
tb = Traceback(frame)
tb.pretty_print(output=output)
output.write("End of stack frame dump\n") |
Python | def install_dump_frames_on(signal_number: SIG_TYPE, output: tp.TextIO = sys.stderr):
"""
Instruct Python to dump all frames onto output, along with their local variables
upon receiving given signal
"""
signal.signal(signal_number,
lambda sig_no, stack_frame: dump_frames_on(sig_no, stack_frame, output)) | def install_dump_frames_on(signal_number: SIG_TYPE, output: tp.TextIO = sys.stderr):
"""
Instruct Python to dump all frames onto output, along with their local variables
upon receiving given signal
"""
signal.signal(signal_number,
lambda sig_no, stack_frame: dump_frames_on(sig_no, stack_frame, output)) |
Python | def queue_get(queue_getter: tp.Union[str, tp.Callable[[object], Queue]],
timeout: tp.Optional[float] = None,
exception_empty: tp.Union[
ExceptionClassType, tp.Tuple[ExceptionClassType, ...]] = queue.Empty,
queue_get_method: tp.Callable[[Queue, tp.Optional[float]], tp.Any] =
lambda x, timeout: x.get(
timeout=timeout),
method_to_execute_on_empty: tp.Optional[tp.Union[str, tp.Callable]] = None):
"""
A decorator for class methods that consume from a queue.
Timeout of None means block forever.
First attribute of the decorator-given function must be a normal instance method
accepting an element taken from the queue, so it must accepts two arguments - first is
self, second is the element from the queue.
:param queue_getter: a callable that will render us the queue, or a string, which will be
translated to a property name
:param timeout: a timeout to wait. If timeout happens, simple no-op will be done and None
will be returned.
:param exception_empty: exception (or a tuple of exceptions) that are raised on queue being
empty.
:param queue_get_method: a method to invoke on this queue. Accepts two arguments - the first
is the queue, the second is the timeout. It has to follow the type signature given.
:param method_to_execute_on_empty: a callable, or a name of the method to be executed (with no
arguments other than self) to execute in case queue.Empty was raised. Can be a callable -
in that case it should expect no arguments, or can be a string, which will be assumed to be
a method name
Use instead of:
>>> class QueueProcessor:
>>> def __init__(self, queue):
>>> self.queue = queue
>>> def do(self):
>>> try:
>>> msg = self.queue.get(timeout=TIMEOUT)
>>> except queue.Empty:
>>> return
Instead of aforementioned code, please use:
>>> class QueueProcessor:
>>> def __init__(self, queue):
>>> self.queue = queue
>>> @queue_get(lambda self: self.queue, timeout=TIMEOUT)
>>> def do(self, msg):
>>> ...
"""
if isinstance(queue_getter, str):
def my_queue_getter(x):
return getattr(x, queue_getter)
else:
my_queue_getter = queue_getter
def outer(fun):
@wraps(fun)
def inner(self):
try:
que = my_queue_getter(self)
item = queue_get_method(que, timeout)
return fun(self, item)
except exception_empty:
if method_to_execute_on_empty is not None:
if callable(method_to_execute_on_empty):
method_to_execute_on_empty()
elif isinstance(method_to_execute_on_empty, str):
method = getattr(self, method_to_execute_on_empty)
method()
return inner
return outer | def queue_get(queue_getter: tp.Union[str, tp.Callable[[object], Queue]],
timeout: tp.Optional[float] = None,
exception_empty: tp.Union[
ExceptionClassType, tp.Tuple[ExceptionClassType, ...]] = queue.Empty,
queue_get_method: tp.Callable[[Queue, tp.Optional[float]], tp.Any] =
lambda x, timeout: x.get(
timeout=timeout),
method_to_execute_on_empty: tp.Optional[tp.Union[str, tp.Callable]] = None):
"""
A decorator for class methods that consume from a queue.
Timeout of None means block forever.
First attribute of the decorator-given function must be a normal instance method
accepting an element taken from the queue, so it must accepts two arguments - first is
self, second is the element from the queue.
:param queue_getter: a callable that will render us the queue, or a string, which will be
translated to a property name
:param timeout: a timeout to wait. If timeout happens, simple no-op will be done and None
will be returned.
:param exception_empty: exception (or a tuple of exceptions) that are raised on queue being
empty.
:param queue_get_method: a method to invoke on this queue. Accepts two arguments - the first
is the queue, the second is the timeout. It has to follow the type signature given.
:param method_to_execute_on_empty: a callable, or a name of the method to be executed (with no
arguments other than self) to execute in case queue.Empty was raised. Can be a callable -
in that case it should expect no arguments, or can be a string, which will be assumed to be
a method name
Use instead of:
>>> class QueueProcessor:
>>> def __init__(self, queue):
>>> self.queue = queue
>>> def do(self):
>>> try:
>>> msg = self.queue.get(timeout=TIMEOUT)
>>> except queue.Empty:
>>> return
Instead of aforementioned code, please use:
>>> class QueueProcessor:
>>> def __init__(self, queue):
>>> self.queue = queue
>>> @queue_get(lambda self: self.queue, timeout=TIMEOUT)
>>> def do(self, msg):
>>> ...
"""
if isinstance(queue_getter, str):
def my_queue_getter(x):
return getattr(x, queue_getter)
else:
my_queue_getter = queue_getter
def outer(fun):
@wraps(fun)
def inner(self):
try:
que = my_queue_getter(self)
item = queue_get_method(que, timeout)
return fun(self, item)
except exception_empty:
if method_to_execute_on_empty is not None:
if callable(method_to_execute_on_empty):
method_to_execute_on_empty()
elif isinstance(method_to_execute_on_empty, str):
method = getattr(self, method_to_execute_on_empty)
method()
return inner
return outer |
Python | def loop_while(pred: tp.Union[Predicate, NoArgCallable[bool]] = lambda: True):
"""
Decorator to loop the following function while predicate called on it's first argument is True.
Use to mostly loop class methods basing off classes, like:
>>> from satella.coding.predicates import x
>>> class Terminable:
>>> terminated = False
>>> @loop_while(x.terminated == False)
>>> def run(self):
>>> ...
You can also loop standard functions, like this:
>>> a = {'terminating': False}
>>> @loop_while(lambda: not a['terminating'])
>>> def execute_while():
>>> ...
:param pred: predicate to evaluate. Can accept either one element, in this case
it will be fed the class instance, or accept no arguments, in which case
it will be considered to annotate a method.
Note that the function you decorate may only take arguments if it's a class method.
If it's a standard method, then it should not take any arguments.
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
pred_f = pred
if len(args):
def pred_f():
return pred(args[0])
while pred_f():
fun(*args, **kwargs)
return inner
return outer | def loop_while(pred: tp.Union[Predicate, NoArgCallable[bool]] = lambda: True):
"""
Decorator to loop the following function while predicate called on it's first argument is True.
Use to mostly loop class methods basing off classes, like:
>>> from satella.coding.predicates import x
>>> class Terminable:
>>> terminated = False
>>> @loop_while(x.terminated == False)
>>> def run(self):
>>> ...
You can also loop standard functions, like this:
>>> a = {'terminating': False}
>>> @loop_while(lambda: not a['terminating'])
>>> def execute_while():
>>> ...
:param pred: predicate to evaluate. Can accept either one element, in this case
it will be fed the class instance, or accept no arguments, in which case
it will be considered to annotate a method.
Note that the function you decorate may only take arguments if it's a class method.
If it's a standard method, then it should not take any arguments.
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
pred_f = pred
if len(args):
def pred_f():
return pred(args[0])
while pred_f():
fun(*args, **kwargs)
return inner
return outer |
Python | def locked(blocking=True, timeout=-1) -> tp.Callable[[tp.Callable], tp.Callable]:
"""
Decorator to use for annotating methods that would lock
:param blocking: whether to block at all
:param timeout: optional timeout. Default, or -1 means "return ASAP"
"""
def inner(f):
@wraps(f)
def in_ner(self, *args, **kwargs):
with self(blocking=blocking, timeout=timeout):
return f(self, *args, **kwargs)
return in_ner
return inner | def locked(blocking=True, timeout=-1) -> tp.Callable[[tp.Callable], tp.Callable]:
"""
Decorator to use for annotating methods that would lock
:param blocking: whether to block at all
:param timeout: optional timeout. Default, or -1 means "return ASAP"
"""
def inner(f):
@wraps(f)
def in_ner(self, *args, **kwargs):
with self(blocking=blocking, timeout=timeout):
return f(self, *args, **kwargs)
return in_ner
return inner |
Python | def mock_env(env: tp.Optional[str] = None, val: tp.Optional[str] = None) -> \
tp.Callable[[tp.Callable], tp.Callable]:
"""
Set an env and then clear it out
:param env:
:param val:
:return:
"""
def outer(fun: tp.Callable):
@wraps(fun)
def inner(*args, **kwargs):
try:
if env is not None:
os.environ[env] = val
return fun(*args, **kwargs)
finally:
if env is not None:
del os.environ[env]
return inner
return outer | def mock_env(env: tp.Optional[str] = None, val: tp.Optional[str] = None) -> \
tp.Callable[[tp.Callable], tp.Callable]:
"""
Set an env and then clear it out
:param env:
:param val:
:return:
"""
def outer(fun: tp.Callable):
@wraps(fun)
def inner(*args, **kwargs):
try:
if env is not None:
os.environ[env] = val
return fun(*args, **kwargs)
finally:
if env is not None:
del os.environ[env]
return inner
return outer |
Python | def wait(self, timeout: tp.Optional[float] = None, throw_exception: bool = True):
"""
Block until the atomic number changes it's value.
:param timeout: maximum time to wait. None means wait indefinitely
:param throw_exception: whether to throw WouldWaitMore on timeout
:raises WouldWaitMore: the value hasn't changed within the timeout
"""
try:
self.condition.wait(timeout)
except WouldWaitMore:
if throw_exception:
raise | def wait(self, timeout: tp.Optional[float] = None, throw_exception: bool = True):
"""
Block until the atomic number changes it's value.
:param timeout: maximum time to wait. None means wait indefinitely
:param throw_exception: whether to throw WouldWaitMore on timeout
:raises WouldWaitMore: the value hasn't changed within the timeout
"""
try:
self.condition.wait(timeout)
except WouldWaitMore:
if throw_exception:
raise |
Python | def wait_until_equal(self, v: Number, timeout: tp.Optional[float] = None) -> None:
"""
Wait until the value of this number equals v.
:param v: value to compare this number against
:param timeout: maximum time to wait
:raise WouldWaitMore: timeout expired without the value becoming equal to target
"""
if timeout is None:
while True:
if self == v:
break
self.wait()
else:
with measure(timeout=timeout) as measurement:
while not measurement.timeouted:
if self == v:
break
self.wait(measurement.time_remaining, throw_exception=False)
with Monitor.acquire(self):
if self.value != v:
raise WouldWaitMore() | def wait_until_equal(self, v: Number, timeout: tp.Optional[float] = None) -> None:
"""
Wait until the value of this number equals v.
:param v: value to compare this number against
:param timeout: maximum time to wait
:raise WouldWaitMore: timeout expired without the value becoming equal to target
"""
if timeout is None:
while True:
if self == v:
break
self.wait()
else:
with measure(timeout=timeout) as measurement:
while not measurement.timeouted:
if self == v:
break
self.wait(measurement.time_remaining, throw_exception=False)
with Monitor.acquire(self):
if self.value != v:
raise WouldWaitMore() |
Python | def precondition(*t_ops: Condition, **kw_opts: Condition):
"""
Check that a precondition happens for given parameter.
You can do it like this:
>>> @precondition(lambda x: x == 1)
>>> def return_two(x):
>>> return x*2
or
>>> @precondition('x == 1')
>>> def return_two(x):
>>> ..
You can use all standard locals in precondition.
You function call will return a PreconditionError (subclass of
ValueError) if a precondition fails.
A precondition of None will always be true.
Keyword arguments are supported as well. Note that precondition for them will be checked
only if they are passed, so make your default arguments obey the precondition, because
it won't
be checked if the default value is used.
"""
tn_ops = []
for t_op in t_ops:
if t_op is None:
precond_ = _TRUE
else:
precond_ = source_to_function(t_op)
tn_ops.append(precond_)
kw_ops = {}
for kwarg_, value in kw_opts.items():
if value is None:
precond_ = _TRUE
elif isinstance(value, str):
precond_ = source_to_function(value)
else:
precond_ = value
kw_ops[kwarg_] = precond_
from satella.coding.recast_exceptions import rethrow_as
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
assert len(args) >= len(tn_ops), 'More preconditions than positional arguments!'
for kwarg in kwargs:
if kwarg in kw_ops:
if not kw_ops[kwarg](kwargs[kwarg]):
raise PreconditionError('Argument %s failed precondition check' %
(kwarg,))
with rethrow_as(TypeError, PreconditionError):
for arg, precond in itertools.zip_longest(args, tn_ops, fillvalue=_TRUE):
if precond(arg) is False:
raise PreconditionError(
'Argument of value %s failed precondition check' % (arg,))
return fun(*args, **kwargs)
return inner
return outer | def precondition(*t_ops: Condition, **kw_opts: Condition):
"""
Check that a precondition happens for given parameter.
You can do it like this:
>>> @precondition(lambda x: x == 1)
>>> def return_two(x):
>>> return x*2
or
>>> @precondition('x == 1')
>>> def return_two(x):
>>> ..
You can use all standard locals in precondition.
You function call will return a PreconditionError (subclass of
ValueError) if a precondition fails.
A precondition of None will always be true.
Keyword arguments are supported as well. Note that precondition for them will be checked
only if they are passed, so make your default arguments obey the precondition, because
it won't
be checked if the default value is used.
"""
tn_ops = []
for t_op in t_ops:
if t_op is None:
precond_ = _TRUE
else:
precond_ = source_to_function(t_op)
tn_ops.append(precond_)
kw_ops = {}
for kwarg_, value in kw_opts.items():
if value is None:
precond_ = _TRUE
elif isinstance(value, str):
precond_ = source_to_function(value)
else:
precond_ = value
kw_ops[kwarg_] = precond_
from satella.coding.recast_exceptions import rethrow_as
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
assert len(args) >= len(tn_ops), 'More preconditions than positional arguments!'
for kwarg in kwargs:
if kwarg in kw_ops:
if not kw_ops[kwarg](kwargs[kwarg]):
raise PreconditionError('Argument %s failed precondition check' %
(kwarg,))
with rethrow_as(TypeError, PreconditionError):
for arg, precond in itertools.zip_longest(args, tn_ops, fillvalue=_TRUE):
if precond(arg) is False:
raise PreconditionError(
'Argument of value %s failed precondition check' % (arg,))
return fun(*args, **kwargs)
return inner
return outer |
Python | def postcondition(condition: Condition):
"""
Return a decorator, asserting that result of this function, called with provided
callable,
is True, else the function will raise PreconditionError.
:param condition: callable that accepts a single argument, the return value of the function.
Can be also a string, in which case it is an expression about the value x of return
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
v = fun(*args, **kwargs)
if condition(v) is False:
raise PreconditionError('Condition not true')
return v
return inner
return outer | def postcondition(condition: Condition):
"""
Return a decorator, asserting that result of this function, called with provided
callable,
is True, else the function will raise PreconditionError.
:param condition: callable that accepts a single argument, the return value of the function.
Can be also a string, in which case it is an expression about the value x of return
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
v = fun(*args, **kwargs)
if condition(v) is False:
raise PreconditionError('Condition not true')
return v
return inner
return outer |
Python | def synchronize_on_attribute(attr_name: str):
"""
When a Monitor is an attribute of a class, and you have a method instance
that you would like secure by acquiring that monitor, use this.
The first argument taken by that method instance must be self.
:param attr_name: name of the attribute that is the monitor
"""
def outer(fun):
@wraps(fun)
def method(self, *args, **kwargs):
# noinspection PyProtectedMember
with getattr(self, attr_name)._monitor_lock:
return fun(self, *args, **kwargs)
return method
return outer | def synchronize_on_attribute(attr_name: str):
"""
When a Monitor is an attribute of a class, and you have a method instance
that you would like secure by acquiring that monitor, use this.
The first argument taken by that method instance must be self.
:param attr_name: name of the attribute that is the monitor
"""
def outer(fun):
@wraps(fun)
def method(self, *args, **kwargs):
# noinspection PyProtectedMember
with getattr(self, attr_name)._monitor_lock:
return fun(self, *args, **kwargs)
return method
return outer |
Python | def synchronized(fun: tp.Callable) -> tp.Callable:
"""
This is a decorator. Class method decorated with that will lock the
global lock of given instance, making it threadsafe. Depending on
usage pattern of your class and it's data semantics, your performance
may vary
"""
@wraps(fun)
def monitored(*args, **kwargs):
# noinspection PyProtectedMember
with args[0]._monitor_lock:
return fun(*args, **kwargs)
return monitored | def synchronized(fun: tp.Callable) -> tp.Callable:
"""
This is a decorator. Class method decorated with that will lock the
global lock of given instance, making it threadsafe. Depending on
usage pattern of your class and it's data semantics, your performance
may vary
"""
@wraps(fun)
def monitored(*args, **kwargs):
# noinspection PyProtectedMember
with args[0]._monitor_lock:
return fun(*args, **kwargs)
return monitored |
Python | def synchronize_on(cls, monitor: 'Monitor') -> tp.Callable[[tp.Callable], tp.Callable]:
"""
A decorator for locking on non-self Monitor objects
Use it like:
>>> class MasterClass(Monitor):
>>> def get_object(self):
>>> class SlaveClass:
>>> @Monitor.synchronize_on(self)
>>> def get_object(self2):
>>> ...
>>> return SlaveClass
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
with cls.acquire(monitor):
return fun(*args, **kwargs)
return inner
return outer | def synchronize_on(cls, monitor: 'Monitor') -> tp.Callable[[tp.Callable], tp.Callable]:
"""
A decorator for locking on non-self Monitor objects
Use it like:
>>> class MasterClass(Monitor):
>>> def get_object(self):
>>> class SlaveClass:
>>> @Monitor.synchronize_on(self)
>>> def get_object(self2):
>>> ...
>>> return SlaveClass
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
with cls.acquire(monitor):
return fun(*args, **kwargs)
return inner
return outer |
Python | def insert_and_check(self, item) -> bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True | def insert_and_check(self, item) -> bool:
"""
Perform an atomic insert if not already in set
:param item: item to insert
:return: whether the item was successfully inserted
"""
with Monitor.acquire(self):
if item in self:
return False
self.add(item)
return True |
Python | def percentile(percent: float) -> float:
"""
Return given percentile of current CPU time's profile
:param percent: float between 0 and 1
:return: the value of the percentile
"""
return CPUProfileBuilderThread().percentile(percent) | def percentile(percent: float) -> float:
"""
Return given percentile of current CPU time's profile
:param percent: float between 0 and 1
:return: the value of the percentile
"""
return CPUProfileBuilderThread().percentile(percent) |
Python | def sleep_cpu_aware(seconds: tp.Union[str, float], of_below: tp.Optional[float] = None,
of_above: tp.Optional[float] = None,
check_each: float = 1) -> bool:
"""
Sleep for specified number of seconds.
Quit earlier if the occupancy factor goes below of_below or above of_above
:param seconds: time to sleep in seconds, or a time string
:param of_below: occupancy factor below which the sleep will return
:param of_above: occupancy factor above which the sleep will return
:param check_each: amount of seconds to sleep at once
:return: whether was awoken due to CPU time condition
"""
v = False
if of_below is None and of_above is None:
time.sleep(seconds)
else:
calculate_occupancy_factor() # prime the counter
while seconds > 0:
time_to_sleep = min(seconds, check_each)
time.sleep(time_to_sleep)
of = calculate_occupancy_factor()
if of_above is not None:
if of > of_above:
v = True
break
if of_below is not None:
if of < of_below:
v = True
break
seconds -= time_to_sleep
if seconds <= 0:
break
return v | def sleep_cpu_aware(seconds: tp.Union[str, float], of_below: tp.Optional[float] = None,
of_above: tp.Optional[float] = None,
check_each: float = 1) -> bool:
"""
Sleep for specified number of seconds.
Quit earlier if the occupancy factor goes below of_below or above of_above
:param seconds: time to sleep in seconds, or a time string
:param of_below: occupancy factor below which the sleep will return
:param of_above: occupancy factor above which the sleep will return
:param check_each: amount of seconds to sleep at once
:return: whether was awoken due to CPU time condition
"""
v = False
if of_below is None and of_above is None:
time.sleep(seconds)
else:
calculate_occupancy_factor() # prime the counter
while seconds > 0:
time_to_sleep = min(seconds, check_each)
time.sleep(time_to_sleep)
of = calculate_occupancy_factor()
if of_above is not None:
if of > of_above:
v = True
break
if of_below is not None:
if of < of_below:
v = True
break
seconds -= time_to_sleep
if seconds <= 0:
break
return v |
Python | def calculate_occupancy_factor() -> float:
"""
Get the average load between now and the time it was last called as a float,
where 0.0 is LA=0 and 1.0 is LA=max_cores.
This will be the average between now and the time it was last called.
.. warning:: This in rare cases (being called the first or the second time) may block for
up to 0.1 seconds
:return: a float between 0 and 1 telling you how occupied CPU-wise is your system.
"""
c = _calculate_occupancy_factor()
while c is None:
time.sleep(0.1)
c = _calculate_occupancy_factor()
return c | def calculate_occupancy_factor() -> float:
"""
Get the average load between now and the time it was last called as a float,
where 0.0 is LA=0 and 1.0 is LA=max_cores.
This will be the average between now and the time it was last called.
.. warning:: This in rare cases (being called the first or the second time) may block for
up to 0.1 seconds
:return: a float between 0 and 1 telling you how occupied CPU-wise is your system.
"""
c = _calculate_occupancy_factor()
while c is None:
time.sleep(0.1)
c = _calculate_occupancy_factor()
return c |
Python | def json_encode(x: tp.Any) -> str:
"""
Convert an object to JSON. Will properly handle subclasses of JSONAble
:param x: object to convert
"""
return JSONEncoder().encode(x) | def json_encode(x: tp.Any) -> str:
"""
Convert an object to JSON. Will properly handle subclasses of JSONAble
:param x: object to convert
"""
return JSONEncoder().encode(x) |
Python | def read_json_from_file(path: str) -> JSONAble:
"""
Load a JSON from a provided file, as UTF-8 encoded plain text.
:param path: path to the file
:return: JSON content
:raises ValueError: the file contained an invalid JSON
:raises OSError: the file was not readable or did not exist
"""
try:
import ujson
with open(path, 'r') as f_in:
v = ujson.load(f_in)
except ImportError:
with open(path, 'r') as f_in:
try:
v = json.load(f_in)
except json.decoder.JSONDecodeError as e:
raise ValueError(str(e))
return v | def read_json_from_file(path: str) -> JSONAble:
"""
Load a JSON from a provided file, as UTF-8 encoded plain text.
:param path: path to the file
:return: JSON content
:raises ValueError: the file contained an invalid JSON
:raises OSError: the file was not readable or did not exist
"""
try:
import ujson
with open(path, 'r') as f_in:
v = ujson.load(f_in)
except ImportError:
with open(path, 'r') as f_in:
try:
v = json.load(f_in)
except json.decoder.JSONDecodeError as e:
raise ValueError(str(e))
return v |
Python | def measure_future(self, future: Future, logging_level: MetricLevel = MetricLevel.RUNTIME,
value_getter: NoArgCallable[float] = time.monotonic, **labels):
"""
A function to measure a difference between some value after the method call
and before it.
The value will be taken at the moment this function executes, and the moment the future
completes (with or without an exception)
:param future: future that is considered
:param logging_level: one of RUNTIME or DEBUG
:param value_getter: a callable that takes no arguments and returns a float, which is
the value
:param labels: extra labels to call handle() with
"""
future.old_value = value_getter()
def on_future_done(fut: Future):
self.handle(logging_level, value_getter() - fut.old_value, **labels)
future.add_done_callback(on_future_done) | def measure_future(self, future: Future, logging_level: MetricLevel = MetricLevel.RUNTIME,
value_getter: NoArgCallable[float] = time.monotonic, **labels):
"""
A function to measure a difference between some value after the method call
and before it.
The value will be taken at the moment this function executes, and the moment the future
completes (with or without an exception)
:param future: future that is considered
:param logging_level: one of RUNTIME or DEBUG
:param value_getter: a callable that takes no arguments and returns a float, which is
the value
:param labels: extra labels to call handle() with
"""
future.old_value = value_getter()
def on_future_done(fut: Future):
self.handle(logging_level, value_getter() - fut.old_value, **labels)
future.add_done_callback(on_future_done) |
Python | def measure(self, include_exceptions: bool = True,
logging_level: MetricLevel = MetricLevel.RUNTIME,
value_getter: NoArgCallable[float] = time.monotonic, **labels):
"""
A decorator to measure a difference between some value after the method call
and before it.
By default, it will measure the execution time.
Use like:
>>> call_time = getMetric('root.metric_name.execution_time', 'summary')
>>> @call_time.measure()
>>> def measure_my_execution(args):
>>> ...
If wrapped around p_gen, it will time it from the first element to the last,
so beware that it will depend on the speed of the consumer.
It also can be used as a context manager:
>>> with call_time.measure(logging_level=MetricLevel.DEBUG, label='key'):
>>> ...
:param include_exceptions: whether to include exceptions
:param logging_level: one of RUNTIME or DEBUG
:param value_getter: a callable that takes no arguments and returns a float, which is
the value
:param labels: extra labels to call handle() with
"""
class MeasurableMixinInternal:
def __init__(self, metric_class, include_exceptions, value_getter,
logging_level, labels):
self.metric_class = metric_class
self.value_getter = value_getter
self.logging_level = logging_level
self.include_exceptions = include_exceptions
self.labels = labels
self.value = None
def __call__(self, fun):
@wraps(fun)
def inner_normal(*args, **kwargs):
start_value = value_getter()
excepted = None
try:
return fun(*args, **kwargs)
except Exception as e:
excepted = e
finally:
value_taken = self.value_getter() - start_value
if excepted is not None and not self.include_exceptions:
raise excepted
self.metric_class.handle(logging_level, value_taken, **labels)
if excepted is not None:
raise excepted
@wraps(fun)
def inner_generator(*args, **kwargs):
start_value = value_getter()
excepted = None
try:
yield from fun(*args, **kwargs)
except Exception as e:
excepted = e
finally:
value_taken = value_getter() - start_value
if excepted is not None and not self.include_exceptions:
raise excepted
self.metric_class.handle(self.logging_level, value_taken, **self.labels)
if excepted is not None:
raise excepted
if inspect.isgeneratorfunction(fun):
return inner_generator
else:
return inner_normal
def __enter__(self):
self.value = self.value_getter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.include_exceptions and exc_type is not None:
return False
elapsed = self.value_getter() - self.value
self.metric_class.handle(self.logging_level, elapsed, **self.labels)
return False
return MeasurableMixinInternal(self, include_exceptions, value_getter,
logging_level, labels) | def measure(self, include_exceptions: bool = True,
logging_level: MetricLevel = MetricLevel.RUNTIME,
value_getter: NoArgCallable[float] = time.monotonic, **labels):
"""
A decorator to measure a difference between some value after the method call
and before it.
By default, it will measure the execution time.
Use like:
>>> call_time = getMetric('root.metric_name.execution_time', 'summary')
>>> @call_time.measure()
>>> def measure_my_execution(args):
>>> ...
If wrapped around p_gen, it will time it from the first element to the last,
so beware that it will depend on the speed of the consumer.
It also can be used as a context manager:
>>> with call_time.measure(logging_level=MetricLevel.DEBUG, label='key'):
>>> ...
:param include_exceptions: whether to include exceptions
:param logging_level: one of RUNTIME or DEBUG
:param value_getter: a callable that takes no arguments and returns a float, which is
the value
:param labels: extra labels to call handle() with
"""
class MeasurableMixinInternal:
def __init__(self, metric_class, include_exceptions, value_getter,
logging_level, labels):
self.metric_class = metric_class
self.value_getter = value_getter
self.logging_level = logging_level
self.include_exceptions = include_exceptions
self.labels = labels
self.value = None
def __call__(self, fun):
@wraps(fun)
def inner_normal(*args, **kwargs):
start_value = value_getter()
excepted = None
try:
return fun(*args, **kwargs)
except Exception as e:
excepted = e
finally:
value_taken = self.value_getter() - start_value
if excepted is not None and not self.include_exceptions:
raise excepted
self.metric_class.handle(logging_level, value_taken, **labels)
if excepted is not None:
raise excepted
@wraps(fun)
def inner_generator(*args, **kwargs):
start_value = value_getter()
excepted = None
try:
yield from fun(*args, **kwargs)
except Exception as e:
excepted = e
finally:
value_taken = value_getter() - start_value
if excepted is not None and not self.include_exceptions:
raise excepted
self.metric_class.handle(self.logging_level, value_taken, **self.labels)
if excepted is not None:
raise excepted
if inspect.isgeneratorfunction(fun):
return inner_generator
else:
return inner_normal
def __enter__(self):
self.value = self.value_getter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.include_exceptions and exc_type is not None:
return False
elapsed = self.value_getter() - self.value
self.metric_class.handle(self.logging_level, elapsed, **self.labels)
return False
return MeasurableMixinInternal(self, include_exceptions, value_getter,
logging_level, labels) |
Python | def load_value(self):
"""
Return the value that this represents.
WARNING! This may result in importing things from environment, as
pickle.loads will be called.
:return: stored value - if picklable and was pickled
:raises ValueError: value has failed to be pickled or was never pickled
"""
if self.pickle_type is None:
raise ValueError('value was never pickled')
elif self.pickle_type == 'failed':
raise ValueError(
'MemoryCondition has failed to be pickled, reason is %s' % (self.pickle,))
elif self.pickle_type == 'pickle/gzip':
pickle_ = zlib.decompress(self.pickle)
elif self.pickle_type == 'pickle':
pickle_ = self.pickle
else:
raise ValueError('unknown pickle type of %s' % (self.pickle_type,))
try:
return pickle.loads(pickle_)
except pickle.UnpicklingError:
raise ValueError(
'object picklable, but cannot load in this environment') | def load_value(self):
"""
Return the value that this represents.
WARNING! This may result in importing things from environment, as
pickle.loads will be called.
:return: stored value - if picklable and was pickled
:raises ValueError: value has failed to be pickled or was never pickled
"""
if self.pickle_type is None:
raise ValueError('value was never pickled')
elif self.pickle_type == 'failed':
raise ValueError(
'MemoryCondition has failed to be pickled, reason is %s' % (self.pickle,))
elif self.pickle_type == 'pickle/gzip':
pickle_ = zlib.decompress(self.pickle)
elif self.pickle_type == 'pickle':
pickle_ = self.pickle
else:
raise ValueError('unknown pickle type of %s' % (self.pickle_type,))
try:
return pickle.loads(pickle_)
except pickle.UnpicklingError:
raise ValueError(
'object picklable, but cannot load in this environment') |
Python | def iterate_callable(clbl: tp.Callable[[int], V], start_from: int = 0,
exc_classes=(IndexError, ValueError)) -> tp.Iterator[V]:
"""
Given a callable that accepts an integer and returns the n-th entry, iterate over
it until it starts to throw some exception.
:param clbl: callable to call
:param start_from: number to start from
:param exc_classes: exceptions that being thrown show that the list was exhausted
:return: an iterator
"""
for i in itertools.count(start_from):
try:
yield clbl(i)
except exc_classes:
return | def iterate_callable(clbl: tp.Callable[[int], V], start_from: int = 0,
exc_classes=(IndexError, ValueError)) -> tp.Iterator[V]:
"""
Given a callable that accepts an integer and returns the n-th entry, iterate over
it until it starts to throw some exception.
:param clbl: callable to call
:param start_from: number to start from
:param exc_classes: exceptions that being thrown show that the list was exhausted
:return: an iterator
"""
for i in itertools.count(start_from):
try:
yield clbl(i)
except exc_classes:
return |
Python | def length(iterator: Iteratable) -> int:
"""
Return the length of an iterator, exhausting it by the way
"""
i = 0
for _ in iterator:
i += 1
return i | def length(iterator: Iteratable) -> int:
"""
Return the length of an iterator, exhausting it by the way
"""
i = 0
for _ in iterator:
i += 1
return i |
Python | def append_sequence(seq: tp.Iterator[tuple], *elems_to_append) -> tp.Iterator[tuple]:
"""
Return an iterator which append elem_to_append to every tuple in seq.
Example:
>>> a = [(1, ), (2, ), (3, )]
>>> assert list(append_sequence(a, 1, 2)) == [(1, 1, 2), (2, 1, 2), (3, 1, 2)]
If every element of seq is not a tuple, it will be cast to one.
:param seq: sequence to append
:param elems_to_append: element(s) to append
:return: an iterator
"""
for tpl in seq:
if not isinstance(tpl, tuple):
tpl = tuple(tpl)
yield tpl + elems_to_append | def append_sequence(seq: tp.Iterator[tuple], *elems_to_append) -> tp.Iterator[tuple]:
"""
Return an iterator which append elem_to_append to every tuple in seq.
Example:
>>> a = [(1, ), (2, ), (3, )]
>>> assert list(append_sequence(a, 1, 2)) == [(1, 1, 2), (2, 1, 2), (3, 1, 2)]
If every element of seq is not a tuple, it will be cast to one.
:param seq: sequence to append
:param elems_to_append: element(s) to append
:return: an iterator
"""
for tpl in seq:
if not isinstance(tpl, tuple):
tpl = tuple(tpl)
yield tpl + elems_to_append |
Python | def walk(obj: T, child_getter: tp.Callable[[T], tp.Optional[tp.List[T]]] = list,
deep_first: bool = True,
leaves_only: bool = False) -> tp.Iterator[T]:
"""
Return every node of a nested structure.
:param obj: structure to traverse. This will not appear in generator
:param child_getter: a callable to return a list of children of T.
Should return an empty list or None of there are no more children.
:param deep_first: if True, deep first will be returned, else it will be breadth first
:param leaves_only: if True, only leaf nodes (having no children) will be returned
"""
a = ConstruableIterator(child_getter(obj))
for o in a:
children = child_getter(o)
if children is not None:
try:
child_len = len(children)
except TypeError:
child_len = 0
if child_len:
if deep_first:
a.add_many_immediate(children)
else:
a.add_many(children)
if leaves_only:
continue
yield o | def walk(obj: T, child_getter: tp.Callable[[T], tp.Optional[tp.List[T]]] = list,
deep_first: bool = True,
leaves_only: bool = False) -> tp.Iterator[T]:
"""
Return every node of a nested structure.
:param obj: structure to traverse. This will not appear in generator
:param child_getter: a callable to return a list of children of T.
Should return an empty list or None of there are no more children.
:param deep_first: if True, deep first will be returned, else it will be breadth first
:param leaves_only: if True, only leaf nodes (having no children) will be returned
"""
a = ConstruableIterator(child_getter(obj))
for o in a:
children = child_getter(o)
if children is not None:
try:
child_len = len(children)
except TypeError:
child_len = 0
if child_len:
if deep_first:
a.add_many_immediate(children)
else:
a.add_many(children)
if leaves_only:
continue
yield o |
Python | def add_immediate(self, t: T) -> None:
"""
Schedule given value to be iterated over during the next __next__ call
:param t: value to iterate over
"""
self.entries.appendleft(t) | def add_immediate(self, t: T) -> None:
"""
Schedule given value to be iterated over during the next __next__ call
:param t: value to iterate over
"""
self.entries.appendleft(t) |
Python | def add_many(self, t: tp.Iterable[T]) -> None:
"""
Schedule given values to be iterated over after current items
:param t: iterable of values
"""
self.entries.extend(t) | def add_many(self, t: tp.Iterable[T]) -> None:
"""
Schedule given values to be iterated over after current items
:param t: iterable of values
"""
self.entries.extend(t) |
Python | def is_unique(self, key: K) -> bool:
"""
Has the element been spotted first time?
Add it to the set.
:param key: element to check
:return: whether the element was seen for the first time
"""
try:
hash(key)
if self.set is None:
self.set = set()
if key in self.set:
return False
self.set.add(key)
return True
except TypeError:
warnings.warn('Passed argument is not hashable, you pay with '
'O(n^2) complexity!', RuntimeWarning)
if self.set is None:
self.set = []
if key in self.set:
return False
self.set.append(key)
return True | def is_unique(self, key: K) -> bool:
"""
Has the element been spotted first time?
Add it to the set.
:param key: element to check
:return: whether the element was seen for the first time
"""
try:
hash(key)
if self.set is None:
self.set = set()
if key in self.set:
return False
self.set.add(key)
return True
except TypeError:
warnings.warn('Passed argument is not hashable, you pay with '
'O(n^2) complexity!', RuntimeWarning)
if self.set is None:
self.set = []
if key in self.set:
return False
self.set.append(key)
return True |
Python | def unique(lst: Iteratable) -> tp.Iterator[T]:
"""
Return each element from lst, but return every element only once.
Take care for elements of T to be __eq__-able and hashable!
This will keep internally a set of elements encountered, and skip them if same element
appears twice
:param lst: iterable to process
:return: a generator yielding unique items from lst
"""
already_seen = set()
for elem in lst:
if elem not in already_seen:
already_seen.add(elem)
yield elem | def unique(lst: Iteratable) -> tp.Iterator[T]:
"""
Return each element from lst, but return every element only once.
Take care for elements of T to be __eq__-able and hashable!
This will keep internally a set of elements encountered, and skip them if same element
appears twice
:param lst: iterable to process
:return: a generator yielding unique items from lst
"""
already_seen = set()
for elem in lst:
if elem not in already_seen:
already_seen.add(elem)
yield elem |
Python | def even(sq: Iteratable) -> tp.Iterator[T]:
"""
Return only elements with even indices in this iterable (first element will be returned,
as indices are counted from 0)
"""
while True:
try:
yield next(sq)
next(sq)
except StopIteration:
return | def even(sq: Iteratable) -> tp.Iterator[T]:
"""
Return only elements with even indices in this iterable (first element will be returned,
as indices are counted from 0)
"""
while True:
try:
yield next(sq)
next(sq)
except StopIteration:
return |
Python | def odd(sq: Iteratable) -> tp.Iterator[T]:
"""
Return only elements with odd indices in this iterable.
"""
while True:
try:
next(sq)
yield next(sq)
except StopIteration:
return | def odd(sq: Iteratable) -> tp.Iterator[T]:
"""
Return only elements with odd indices in this iterable.
"""
while True:
try:
next(sq)
yield next(sq)
except StopIteration:
return |
Python | def iter_dict_of_list(dct: tp.Dict[T, tp.List[U]]) -> tp.Generator[tp.Tuple[T, U], None, None]:
"""
Presents a simple way to iterate over a dictionary whose values are lists.
This will return the dictionary key and each of the value contained in the list attached to
the key.
"""
for key, items in dct.items():
for item in items:
yield key, item | def iter_dict_of_list(dct: tp.Dict[T, tp.List[U]]) -> tp.Generator[tp.Tuple[T, U], None, None]:
"""
Presents a simple way to iterate over a dictionary whose values are lists.
This will return the dictionary key and each of the value contained in the list attached to
the key.
"""
for key, items in dct.items():
for item in items:
yield key, item |
Python | def other_sequence_no_longer_than(base_sequence: Iteratable,
other_sequence: Iteratable) -> tp.Iterator[T]:
"""
Return every item in other_sequence, but limit it's p_len to that of base_sequence.
If other_sequence is shorter than base_sequence, the shorter one will be returned.
:param base_sequence: sequence whose p_len should be taken
:param other_sequence: sequence to output values from
"""
while True:
try:
next(base_sequence)
yield next(other_sequence)
except StopIteration:
return | def other_sequence_no_longer_than(base_sequence: Iteratable,
other_sequence: Iteratable) -> tp.Iterator[T]:
"""
Return every item in other_sequence, but limit it's p_len to that of base_sequence.
If other_sequence is shorter than base_sequence, the shorter one will be returned.
:param base_sequence: sequence whose p_len should be taken
:param other_sequence: sequence to output values from
"""
while True:
try:
next(base_sequence)
yield next(other_sequence)
except StopIteration:
return |
Python | def shift(iterable_: tp.Union[tp.Reversible[T], Iteratable],
shift_factor: int) -> tp.Iterator[T]:
"""
Return this sequence, but shifted by factor elements, so that elements will appear
sooner by factor.
Eg:
>>> assert list(shift([1,2, 3], 1)) == [2, 3, 1]
However note that this will result in iterators which have negative shift to be readed entirely
into memory (converted internally to lists). This can be avoided by passing in a Reversible
iterable.
:param iterable_: iterable to shift
:param shift_factor: factor by which shift elements.
:return: shifted sequence
"""
if shift_factor >= 0:
iterator = iter(iterable_)
elements = []
for i in range(shift_factor):
elements.append(next(iterator))
return itertools.chain(iterator, elements)
else:
if hasattr(iterable_, '__reversed__'):
elements = take_n(reversed(iterable_), -shift_factor)
elements = reversed(elements)
return other_sequence_no_longer_than(iterable_, itertools.chain(elements, iterable_))
else:
iterator = list(iterable_)
iterator = iterator[shift_factor:] + iterator[:shift_factor] # shift's already negative
return iterator | def shift(iterable_: tp.Union[tp.Reversible[T], Iteratable],
shift_factor: int) -> tp.Iterator[T]:
"""
Return this sequence, but shifted by factor elements, so that elements will appear
sooner by factor.
Eg:
>>> assert list(shift([1,2, 3], 1)) == [2, 3, 1]
However note that this will result in iterators which have negative shift to be readed entirely
into memory (converted internally to lists). This can be avoided by passing in a Reversible
iterable.
:param iterable_: iterable to shift
:param shift_factor: factor by which shift elements.
:return: shifted sequence
"""
if shift_factor >= 0:
iterator = iter(iterable_)
elements = []
for i in range(shift_factor):
elements.append(next(iterator))
return itertools.chain(iterator, elements)
else:
if hasattr(iterable_, '__reversed__'):
elements = take_n(reversed(iterable_), -shift_factor)
elements = reversed(elements)
return other_sequence_no_longer_than(iterable_, itertools.chain(elements, iterable_))
else:
iterator = list(iterable_)
iterator = iterator[shift_factor:] + iterator[:shift_factor] # shift's already negative
return iterator |
Python | def zip_shifted(*args: tp.Union[Iteratable, tp.Tuple[Iteratable, int]]) -> \
tp.Iterator[tp.Tuple[T, ...]]:
"""
Construct an iterator, just like zip but first by cycling it's elements by it's shift factor.
Elements will be shifted by a certain factor, this means that they will appear earlier.
Example:
>>> zip_shifted(([1, 2, 3, 4], 1), ([1, 2, 3, 4], 0)) == [(2, 1), (3, 2), (4, 3), (1, 4)]
This will work on arbitrary iterators and iterables.
Shift can be negative, in which case the last elements will appear sooner, eg.
>>> zip_shifted(([1, 2, 3, 4], -1), ([1, 2, 3, 4], 0)) == [(4, 1), (1, 2), (2, 3), (3, 4)]
Same memory considerations as :func:`shift` apply.
The resulting iterator will be as long as the shortest sequence.
.. deprecated:: 2.14.22
Use `zip(shift(...))` instead
:param args: a tuple with the iterator/iterable and amount of shift. If a non-tuple is given,
it is assumed that the shift is zero.
"""
warnings.warn('This is deprecated and will be removed in Satella 3.0. '
'Use zip(shift(...)) instead!', DeprecationWarning)
iterators = [] # type: tp.List[tp.Union[tp.Tuple[tp.Iterator[T], tp.List[T]], tp.Iterator[T]]
for row in args:
if not isinstance(row, tuple):
iterators.append(row)
else:
iterable, shift_factor = row
iterators.append(shift(iterable, shift_factor))
return zip(*iterators) | def zip_shifted(*args: tp.Union[Iteratable, tp.Tuple[Iteratable, int]]) -> \
tp.Iterator[tp.Tuple[T, ...]]:
"""
Construct an iterator, just like zip but first by cycling it's elements by it's shift factor.
Elements will be shifted by a certain factor, this means that they will appear earlier.
Example:
>>> zip_shifted(([1, 2, 3, 4], 1), ([1, 2, 3, 4], 0)) == [(2, 1), (3, 2), (4, 3), (1, 4)]
This will work on arbitrary iterators and iterables.
Shift can be negative, in which case the last elements will appear sooner, eg.
>>> zip_shifted(([1, 2, 3, 4], -1), ([1, 2, 3, 4], 0)) == [(4, 1), (1, 2), (2, 3), (3, 4)]
Same memory considerations as :func:`shift` apply.
The resulting iterator will be as long as the shortest sequence.
.. deprecated:: 2.14.22
Use `zip(shift(...))` instead
:param args: a tuple with the iterator/iterable and amount of shift. If a non-tuple is given,
it is assumed that the shift is zero.
"""
warnings.warn('This is deprecated and will be removed in Satella 3.0. '
'Use zip(shift(...)) instead!', DeprecationWarning)
iterators = [] # type: tp.List[tp.Union[tp.Tuple[tp.Iterator[T], tp.List[T]], tp.Iterator[T]]
for row in args:
if not isinstance(row, tuple):
iterators.append(row)
else:
iterable, shift_factor = row
iterators.append(shift(iterable, shift_factor))
return zip(*iterators) |
Python | def skip_first(iterator: Iteratable, n: int) -> tp.Iterator[T]:
"""
Skip first n elements from given iterator.
Returned iterator may be empty, if source iterator is shorter or equal to n.
.. deprecated:: 2.14.22
Use `itertools.islice` instead
"""
warnings.warn('This is deprecated and will be removed in Satella 3.0. '
'Please use itertools.islice instead', DeprecationWarning)
for i in range(n):
next(iterator)
yield from iterator | def skip_first(iterator: Iteratable, n: int) -> tp.Iterator[T]:
"""
Skip first n elements from given iterator.
Returned iterator may be empty, if source iterator is shorter or equal to n.
.. deprecated:: 2.14.22
Use `itertools.islice` instead
"""
warnings.warn('This is deprecated and will be removed in Satella 3.0. '
'Please use itertools.islice instead', DeprecationWarning)
for i in range(n):
next(iterator)
yield from iterator |
Python | def exhaust(self) -> None:
"""
Load all elements of this iterator into memory.
"""
if self.exhausted:
return
for elem in self.iterator:
self.list.append(elem)
self.exhausted = True | def exhaust(self) -> None:
"""
Load all elements of this iterator into memory.
"""
if self.exhausted:
return
for elem in self.iterator:
self.list.append(elem)
self.exhausted = True |
Python | def advance_to_item(self, i: int) -> None:
"""
Makes the list be at least i in size
"""
if self.exhausted:
return
while len(self.list) < i:
try:
self.list.append(next(self.iterator))
except StopIteration:
self.exhausted = True
return | def advance_to_item(self, i: int) -> None:
"""
Makes the list be at least i in size
"""
if self.exhausted:
return
while len(self.list) < i:
try:
self.list.append(next(self.iterator))
except StopIteration:
self.exhausted = True
return |
Python | def stop_after(iterator: Iteratable, n: int) -> tp.Iterator[T]:
"""
Stop this iterator after returning n elements, even if it's longer than that.
The resulting iterator may be shorter than n, if the source element is so.
.. deprecated:: 2.14.22
Use `itertools.islice` instead
:param iterator: iterator or iterable to examine
:param n: elements to return
"""
warnings.warn('This is deprecated and will be removed in Satella 3.0. '
'Please use itertools.islice instead', DeprecationWarning)
for i in range(n):
yield next(iterator) | def stop_after(iterator: Iteratable, n: int) -> tp.Iterator[T]:
"""
Stop this iterator after returning n elements, even if it's longer than that.
The resulting iterator may be shorter than n, if the source element is so.
.. deprecated:: 2.14.22
Use `itertools.islice` instead
:param iterator: iterator or iterable to examine
:param n: elements to return
"""
warnings.warn('This is deprecated and will be removed in Satella 3.0. '
'Please use itertools.islice instead', DeprecationWarning)
for i in range(n):
yield next(iterator) |
Python | def n_th(iterator: Iteratable, n: int = 0) -> T:
"""
Obtain n-th element (counting from 0) of an iterable
:param iterator: iterable to process
:param n: element to return. Note that we're counting from 0
:raises IndexError: iterable was too short
"""
try:
for i in range(n):
next(iterator)
return next(iterator)
except (StopIteration, GeneratorExit):
raise IndexError('Iterable was too short') | def n_th(iterator: Iteratable, n: int = 0) -> T:
"""
Obtain n-th element (counting from 0) of an iterable
:param iterator: iterable to process
:param n: element to return. Note that we're counting from 0
:raises IndexError: iterable was too short
"""
try:
for i in range(n):
next(iterator)
return next(iterator)
except (StopIteration, GeneratorExit):
raise IndexError('Iterable was too short') |
Python | def is_empty(iterable: Iteratable, exhaust: bool = True) -> bool:
"""
Checks whether an iterator is empty.
This will exhaust the iterator if exhaust is left at default, or True
:param iterable: iterator to check
:param exhaust: if set to False, at most a single element will be consumed
from the iterator
:return: whether the iterator is empty
"""
iterator = iter(iterable)
if exhaust:
i = 0
for _ in iterator:
i += 1
return i == 0
else:
next(iterator)
return False | def is_empty(iterable: Iteratable, exhaust: bool = True) -> bool:
"""
Checks whether an iterator is empty.
This will exhaust the iterator if exhaust is left at default, or True
:param iterable: iterator to check
:param exhaust: if set to False, at most a single element will be consumed
from the iterator
:return: whether the iterator is empty
"""
iterator = iter(iterable)
if exhaust:
i = 0
for _ in iterator:
i += 1
return i == 0
else:
next(iterator)
return False |
Python | def to_iterator(fun):
"""
Convert function to an iterator. You can replace the following code:
>>> def iterator(x):
>>> for y in x:
>>> yield fun(y)
with
>>> @to_iterator
>>> def fun(y):
>>> ...
and now call fun instead of iterator. fun will accept a single argument - the iterable,
and assume that the function you decorate also takes a single argument - the item
"""
@wraps(fun)
def inner(iterable):
for item in iterable:
yield fun(item)
return inner | def to_iterator(fun):
"""
Convert function to an iterator. You can replace the following code:
>>> def iterator(x):
>>> for y in x:
>>> yield fun(y)
with
>>> @to_iterator
>>> def fun(y):
>>> ...
and now call fun instead of iterator. fun will accept a single argument - the iterable,
and assume that the function you decorate also takes a single argument - the item
"""
@wraps(fun)
def inner(iterable):
for item in iterable:
yield fun(item)
return inner |
Python | def smart_zip(*iterators: Iteratable) -> tp.Iterator[tp.Tuple[T, ...]]:
"""
Zip in such a way that resulted tuples are automatically expanded.
Ie:
>>> b = list(smart_zip([(1, 1), (1, 2)], [1, 2]))
>>> assert b == [(1, 1, 1), (1, 2, 2)]
Note that an element of the zipped iterator must be a tuple (ie. isinstance tuple)
in order for it to be appended to resulting iterator element!
:param iterators: list of iterators to zip together
:return: an iterator zipping the arguments in a smart way
"""
for row in zip(*iterators):
a = []
for elem in row:
if isinstance(elem, tuple):
a.extend(elem)
else:
a.append(elem)
yield tuple(a) | def smart_zip(*iterators: Iteratable) -> tp.Iterator[tp.Tuple[T, ...]]:
"""
Zip in such a way that resulted tuples are automatically expanded.
Ie:
>>> b = list(smart_zip([(1, 1), (1, 2)], [1, 2]))
>>> assert b == [(1, 1, 1), (1, 2, 2)]
Note that an element of the zipped iterator must be a tuple (ie. isinstance tuple)
in order for it to be appended to resulting iterator element!
:param iterators: list of iterators to zip together
:return: an iterator zipping the arguments in a smart way
"""
for row in zip(*iterators):
a = []
for elem in row:
if isinstance(elem, tuple):
a.extend(elem)
else:
a.append(elem)
yield tuple(a) |
Python | def smart_enumerate(iterator: Iteratable, start: int = 0,
step: int = 1) -> tp.Iterator[tp.Tuple]:
"""
An enumerate that talks pretty with lists of tuples. Consider
>>> a = [(1, 2), (3, 4), (5, 6)]
>>> for i, b in enumerate(a):
>>> c, d = b
>>> ...
This function allows you just to write:
>>> for i, c, d in enumerate(a):
>>> ...
Note that elements in your iterable must be either a list of a tuple for that to work,
or need to be able to be coerced to a tuple. Otherwise, TypeError will be thrown.
:param iterator: iterator to enumerate
:param start: value to start counting at
:param step: step to advance the enumeration with
:raise TypeError: could not coerce the elements in your iterable to a tuple
"""
i = start
for row in iterator:
if isinstance(row, tuple):
yield (i,) + row
else:
yield (i,) + tuple(row)
i += step | def smart_enumerate(iterator: Iteratable, start: int = 0,
step: int = 1) -> tp.Iterator[tp.Tuple]:
"""
An enumerate that talks pretty with lists of tuples. Consider
>>> a = [(1, 2), (3, 4), (5, 6)]
>>> for i, b in enumerate(a):
>>> c, d = b
>>> ...
This function allows you just to write:
>>> for i, c, d in enumerate(a):
>>> ...
Note that elements in your iterable must be either a list of a tuple for that to work,
or need to be able to be coerced to a tuple. Otherwise, TypeError will be thrown.
:param iterator: iterator to enumerate
:param start: value to start counting at
:param step: step to advance the enumeration with
:raise TypeError: could not coerce the elements in your iterable to a tuple
"""
i = start
for row in iterator:
if isinstance(row, tuple):
yield (i,) + row
else:
yield (i,) + tuple(row)
i += step |
Python | def take_n(iterator: Iteratable, n: int, skip: int = 0) -> tp.List[T]:
"""
Take (first) n elements of an iterator, or the entire iterator, whichever comes first
:param iterator: iterator to take from
:param n: amount of elements to take
:param skip: elements from the start to skip
:return: list of p_len n (or shorter)
"""
for i in range(skip):
next(iterator)
output = []
for i in range(n):
try:
output.append(next(iterator))
except StopIteration:
return output
return output | def take_n(iterator: Iteratable, n: int, skip: int = 0) -> tp.List[T]:
"""
Take (first) n elements of an iterator, or the entire iterator, whichever comes first
:param iterator: iterator to take from
:param n: amount of elements to take
:param skip: elements from the start to skip
:return: list of p_len n (or shorter)
"""
for i in range(skip):
next(iterator)
output = []
for i in range(n):
try:
output.append(next(iterator))
except StopIteration:
return output
return output |
Python | def register_custom_descriptor(name: str, is_plain: bool = True):
"""
A decorator used for registering custom descriptors in order to be loadable via
descriptor_from_dict
Use like:
>>> @register_custom_descriptor('ipv6')
>>> class IPv6(Regexp):
>>> REGEXP = '(([0-9a-f]{1,4}:)' ...
:param name: under which it is supposed to be invokable
:param is_plain: is this a nested structure?
"""
def inner(cls):
global BASE_LOOKUP_TABLE, PLAIN_ENTRIES
if is_plain:
PLAIN_ENTRIES.add(name)
BASE_LOOKUP_TABLE[name] = cls
return cls
return inner | def register_custom_descriptor(name: str, is_plain: bool = True):
"""
A decorator used for registering custom descriptors in order to be loadable via
descriptor_from_dict
Use like:
>>> @register_custom_descriptor('ipv6')
>>> class IPv6(Regexp):
>>> REGEXP = '(([0-9a-f]{1,4}:)' ...
:param name: under which it is supposed to be invokable
:param is_plain: is this a nested structure?
"""
def inner(cls):
global BASE_LOOKUP_TABLE, PLAIN_ENTRIES
if is_plain:
PLAIN_ENTRIES.add(name)
BASE_LOOKUP_TABLE[name] = cls
return cls
return inner |
Python | def is_valid_schema(self, schema: tp.Optional[tp.Union[Descriptor, tp.Dict]] = None,
**kwarg_schema) -> bool:
"""
Check if this dictionary conforms to particular schema.
Schema is either a Descriptor, or a JSON-based schema. See satella.configuration.schema
for details.
Schema can be passed as well using kwargs. Note that the schema argument will be ignored
if kwargs are passed.
:param schema: schema to verify against
:return: whether is conformant
"""
if kwarg_schema:
schema = kwarg_schema
if isinstance(schema, Descriptor):
descriptor = schema
else:
descriptor = descriptor_from_dict(schema)
try:
descriptor(self)
except ConfigurationValidationError:
return False
else:
return True | def is_valid_schema(self, schema: tp.Optional[tp.Union[Descriptor, tp.Dict]] = None,
**kwarg_schema) -> bool:
"""
Check if this dictionary conforms to particular schema.
Schema is either a Descriptor, or a JSON-based schema. See satella.configuration.schema
for details.
Schema can be passed as well using kwargs. Note that the schema argument will be ignored
if kwargs are passed.
:param schema: schema to verify against
:return: whether is conformant
"""
if kwarg_schema:
schema = kwarg_schema
if isinstance(schema, Descriptor):
descriptor = schema
else:
descriptor = descriptor_from_dict(schema)
try:
descriptor(self)
except ConfigurationValidationError:
return False
else:
return True |
Python | def apply_dict_object(v: tp.Union[tp.Any, tp.Dict]) -> tp.Union[DictObject, tp.Any]:
"""
Apply DictObject() to every dict inside v.
This assumes that the only things that will be touched will be nested dicts and lists.
If you pass a non-dict and a non-list, they will be returned as is.
"""
if isinstance(v, DictObject):
return v
elif isinstance(v, list):
return [apply_dict_object(x) for x in v]
elif isinstance(v, dict):
return DictObject({
k: apply_dict_object(val) for k, val in v.items()
})
else:
return v | def apply_dict_object(v: tp.Union[tp.Any, tp.Dict]) -> tp.Union[DictObject, tp.Any]:
"""
Apply DictObject() to every dict inside v.
This assumes that the only things that will be touched will be nested dicts and lists.
If you pass a non-dict and a non-list, they will be returned as is.
"""
if isinstance(v, DictObject):
return v
elif isinstance(v, list):
return [apply_dict_object(x) for x in v]
elif isinstance(v, dict):
return DictObject({
k: apply_dict_object(val) for k, val in v.items()
})
else:
return v |
Python | def assert_not_timeouted(self) -> None:
"""
If the time elapsed exceeded timeout, throw WouldWaitMore.
Always returns if the timeout was not givne
"""
if self.timeout is None:
return
if self.timeouted:
raise WouldWaitMore('timeout exceeded') | def assert_not_timeouted(self) -> None:
"""
If the time elapsed exceeded timeout, throw WouldWaitMore.
Always returns if the timeout was not givne
"""
if self.timeout is None:
return
if self.timeouted:
raise WouldWaitMore('timeout exceeded') |
Python | def reset_and_start(self) -> None:
"""
Syntactic sugar for calling reset() and then start()
"""
self.reset()
self.start() | def reset_and_start(self) -> None:
"""
Syntactic sugar for calling reset() and then start()
"""
self.reset()
self.start() |
Python | def has_exceeded(self, value: float) -> bool:
"""
Return whether the timer has exceeded provided value.
.. deprecated:: 2.14.22
"""
warnings.warn('Use timeout parameter and timeouted property instead.',
PendingDeprecationWarning)
return self() > value | def has_exceeded(self, value: float) -> bool:
"""
Return whether the timer has exceeded provided value.
.. deprecated:: 2.14.22
"""
warnings.warn('Use timeout parameter and timeouted property instead.',
PendingDeprecationWarning)
return self() > value |
Python | def raise_if_exceeded(self, value: float, exc_class: tp.Type[Exception] = WouldWaitMore):
"""
Raise provided exception, with no arguments, if timer has clocked more than provided value.
If no exc_class is provided, WouldWaitMore will be raised by default.
.. deprecated:: 2.14.22
"""
warnings.warn('Use timeout parameter and assert_not_timeouted property instead.',
PendingDeprecationWarning)
if self.has_exceeded(value):
raise exc_class() | def raise_if_exceeded(self, value: float, exc_class: tp.Type[Exception] = WouldWaitMore):
"""
Raise provided exception, with no arguments, if timer has clocked more than provided value.
If no exc_class is provided, WouldWaitMore will be raised by default.
.. deprecated:: 2.14.22
"""
warnings.warn('Use timeout parameter and assert_not_timeouted property instead.',
PendingDeprecationWarning)
if self.has_exceeded(value):
raise exc_class() |
Python | def reset(self) -> None:
"""
Reset the counter, enabling it to start counting after a .stop() call.
This will put the counter in a STOPPED mode if it's running already.
"""
self.stopped_on = self.started_on = self.time_getter_callable()
self.elapsed = 0 | def reset(self) -> None:
"""
Reset the counter, enabling it to start counting after a .stop() call.
This will put the counter in a STOPPED mode if it's running already.
"""
self.stopped_on = self.started_on = self.time_getter_callable()
self.elapsed = 0 |
Python | def start(self) -> None:
"""Start measuring time or resume measuring it"""
if not self.stop_on_stop:
raise TypeError('stop_on_stop is disabled for this counter!')
if self.stopped_on is None:
raise TypeError('the counter is already running!')
if self.stopped_on is not None:
self.started_on = self.time_getter_callable() - self.elapsed
self.stopped_on = None
self.started_on = self.time_getter_callable()
self.elapsed = None | def start(self) -> None:
"""Start measuring time or resume measuring it"""
if not self.stop_on_stop:
raise TypeError('stop_on_stop is disabled for this counter!')
if self.stopped_on is None:
raise TypeError('the counter is already running!')
if self.stopped_on is not None:
self.started_on = self.time_getter_callable() - self.elapsed
self.stopped_on = None
self.started_on = self.time_getter_callable()
self.elapsed = None |
Python | def percentile(n: tp.List[float], percent: float) -> float:
"""
Find the percentile of a list of values.
:param n: - is a list of values. Note this MUST BE already sorted.
:param percent: - a float value from 0.0 to 1.0.
:return: the percentile of the values
"""
k = (len(n) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return n[int(k)]
d0 = n[int(f)] * (c - k)
d1 = n[int(c)] * (k - f)
return d0 + d1 | def percentile(n: tp.List[float], percent: float) -> float:
"""
Find the percentile of a list of values.
:param n: - is a list of values. Note this MUST BE already sorted.
:param percent: - a float value from 0.0 to 1.0.
:return: the percentile of the values
"""
k = (len(n) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return n[int(k)]
d0 = n[int(f)] * (c - k)
d1 = n[int(c)] * (k - f)
return d0 + d1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.