index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
24,767 | schedula.utils.dsp | __rfloordiv__ | null | def __rfloordiv__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x // y for x, y in zip(other, self)))
| (self, other) |
24,768 | schedula.utils.dsp | __rmod__ | null | def __rmod__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x % y for x, y in zip(other, self)))
| (self, other) |
24,770 | schedula.utils.dsp | __round__ | null | def __round__(self, n=None):
return inf(*(round(x, n) for x in self))
| (self, n=None) |
24,771 | schedula.utils.dsp | __rpow__ | null | def __rpow__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x ** y for x, y in zip(other, self)))
| (self, other) |
24,772 | schedula.utils.dsp | __rsub__ | null | def __rsub__(self, other):
other = isinstance(other, self.__class__) and other or (0, other)
return inf(*(x - y for x, y in zip(other, self)))
| (self, other) |
24,773 | schedula.utils.dsp | __rtruediv__ | null | def __rtruediv__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x / y for x, y in zip(other, self)))
| (self, other) |
24,775 | schedula.utils.dsp | __sub__ | null | def __sub__(self, other):
other = isinstance(other, self.__class__) and other or (0, other)
return inf(*(x - y for x, y in zip(self, other)))
| (self, other) |
24,776 | schedula.utils.dsp | __truediv__ | null | def __truediv__(self, other):
other = isinstance(other, self.__class__) and other or (other, other)
return inf(*(x / y for x, y in zip(self, other)))
| (self, other) |
24,777 | schedula.utils.dsp | __trunc__ | null | def __trunc__(self):
return inf(*(map(math.trunc, self)))
| (self) |
24,778 | schedula.utils.dsp | format | null | @staticmethod
def format(val):
if not isinstance(val, tuple):
val = 0, val
return inf(*val)
| (val) |
24,779 | schedula.utils.dsp | kk_dict |
Merges and defines dictionaries with values identical to keys.
:param kk:
A sequence of keys and/or dictionaries.
:type kk: object | dict, optional
:param adict:
A dictionary.
:type adict: dict, optional
:return:
Merged dictionary.
:rtype: dict
Example::
>>> sorted(kk_dict('a', 'b', 'c').items())
[('a', 'a'), ('b', 'b'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'a-c': 'c'}).items())
[('a', 'a'), ('a-c', 'c'), ('b', 'b')]
>>> sorted(kk_dict('a', {'b': 'c'}, 'c').items())
[('a', 'a'), ('b', 'c'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated (b)
>>> sorted(kk_dict({'a': 0, 'b': 1}, **{'b': 2, 'a': 3}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated (a, b)
| def kk_dict(*kk, **adict):
"""
Merges and defines dictionaries with values identical to keys.
:param kk:
A sequence of keys and/or dictionaries.
:type kk: object | dict, optional
:param adict:
A dictionary.
:type adict: dict, optional
:return:
Merged dictionary.
:rtype: dict
Example::
>>> sorted(kk_dict('a', 'b', 'c').items())
[('a', 'a'), ('b', 'b'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'a-c': 'c'}).items())
[('a', 'a'), ('a-c', 'c'), ('b', 'b')]
>>> sorted(kk_dict('a', {'b': 'c'}, 'c').items())
[('a', 'a'), ('b', 'c'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated (b)
>>> sorted(kk_dict({'a': 0, 'b': 1}, **{'b': 2, 'a': 3}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated (a, b)
"""
for k in kk:
if isinstance(k, dict):
if any(i in adict for i in k):
k = ', '.join(sorted(set(k).intersection(adict)))
raise ValueError('keyword argument repeated ({})'.format(k))
adict.update(k)
elif k in adict:
raise ValueError('keyword argument repeated ({})'.format(k))
else:
adict[k] = k
return adict
| (*kk, **adict) |
24,780 | schedula.utils.io | load_default_values |
Load Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
>>> dsp = Dispatcher(dmap=dsp.dmap)
>>> load_default_values(dsp, file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3
| def load_default_values(dsp, path):
"""
Load Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
>>> dsp = Dispatcher(dmap=dsp.dmap)
>>> load_default_values(dsp, file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3
"""
import dill
# noinspection PyArgumentList
with open(path, 'rb') as f:
dsp.__init__(dmap=dsp.dmap, default_values=dill.load(f))
| (dsp, path) |
24,781 | schedula.utils.io | load_dispatcher |
Load Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
:return:
A dispatcher that identifies the model adopted.
:rtype: schedula.Dispatcher
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name)
>>> dsp = load_dispatcher(file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3
| def load_dispatcher(path):
"""
Load Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
:return:
A dispatcher that identifies the model adopted.
:rtype: schedula.Dispatcher
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name)
>>> dsp = load_dispatcher(file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3
"""
import dill
# noinspection PyArgumentList
with open(path, 'rb') as f:
return dill.load(f)
| (path) |
24,782 | schedula.utils.io | load_map |
Load Dispatcher map in Python pickle format.
:param dsp:
A dispatcher that identifies the model to be upgraded.
:type dsp: schedula.schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_map(dsp, file_name)
>>> dsp = Dispatcher()
>>> load_map(dsp, file_name)
>>> dsp.dispatch(inputs={'a': 1, 'b': 3})['c']
3
| def load_map(dsp, path):
"""
Load Dispatcher map in Python pickle format.
:param dsp:
A dispatcher that identifies the model to be upgraded.
:type dsp: schedula.schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_map(dsp, file_name)
>>> dsp = Dispatcher()
>>> load_map(dsp, file_name)
>>> dsp.dispatch(inputs={'a': 1, 'b': 3})['c']
3
"""
import dill
with open(path, 'rb') as f:
dsp.__init__(dmap=dill.load(f), default_values=dsp.default_values)
| (dsp, path) |
24,783 | schedula.utils.dsp | map_dict |
Returns a dict with new key values.
:param key_map:
A dictionary that maps the dict keys ({old key: new key}.
:type key_map: dict
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new key values.
:rtype: dict
Example::
>>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2})
>>> sorted(d.items())
[('c', 1), ('d', 2)]
| def map_dict(key_map, *dicts, copy=False, base=None):
"""
Returns a dict with new key values.
:param key_map:
A dictionary that maps the dict keys ({old key: new key}.
:type key_map: dict
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new key values.
:rtype: dict
Example::
>>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2})
>>> sorted(d.items())
[('c', 1), ('d', 2)]
"""
it = combine_dicts(*dicts).items() # Combine dicts.
get = key_map.get # Namespace shortcut.
# Return mapped dict.
return combine_dicts({get(k, k): v for k, v in it}, copy=copy, base=base)
| (key_map, *dicts, copy=False, base=None) |
24,784 | schedula.utils.dsp | map_list |
Returns a new dict.
:param key_map:
A list that maps the dict keys ({old key: new key}
:type key_map: list[str | dict | list]
:param inputs:
A sequence of data.
:type inputs: iterable | dict | int | float | list | tuple
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new values.
:rtype: dict
Example::
>>> key_map = [
... 'a',
... {'a': 'c'},
... [
... 'a',
... {'a': 'd'}
... ]
... ]
>>> inputs = (
... 2,
... {'a': 3, 'b': 2},
... [
... 1,
... {'a': 4}
... ]
... )
>>> d = map_list(key_map, *inputs)
>>> sorted(d.items())
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
| def map_list(key_map, *inputs, copy=False, base=None):
"""
Returns a new dict.
:param key_map:
A list that maps the dict keys ({old key: new key}
:type key_map: list[str | dict | list]
:param inputs:
A sequence of data.
:type inputs: iterable | dict | int | float | list | tuple
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new values.
:rtype: dict
Example::
>>> key_map = [
... 'a',
... {'a': 'c'},
... [
... 'a',
... {'a': 'd'}
... ]
... ]
>>> inputs = (
... 2,
... {'a': 3, 'b': 2},
... [
... 1,
... {'a': 4}
... ]
... )
>>> d = map_list(key_map, *inputs)
>>> sorted(d.items())
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
"""
d = {} if base is None else base # Initialize empty dict.
for m, v in zip(key_map, inputs):
if isinstance(m, dict):
map_dict(m, v, base=d) # Apply a map dict.
elif isinstance(m, list):
map_list(m, *v, base=d) # Apply a map list.
else:
d[m] = v # Apply map.
return combine_dicts(copy=copy, base=d) # Return dict.
| (key_map, *inputs, copy=False, base=None) |
24,785 | schedula.utils.dsp | parent_func |
Return the parent function of a wrapped function (wrapped with
:class:`functools.partial` and :class:`add_args`).
:param func:
Wrapped function.
:type func: callable
:param input_id:
Index of the first input of the wrapped function.
:type input_id: int
:return:
Parent function.
:rtype: callable
| def parent_func(func, input_id=None):
"""
Return the parent function of a wrapped function (wrapped with
:class:`functools.partial` and :class:`add_args`).
:param func:
Wrapped function.
:type func: callable
:param input_id:
Index of the first input of the wrapped function.
:type input_id: int
:return:
Parent function.
:rtype: callable
"""
if isinstance(func, add_args):
if input_id is not None:
input_id -= func.n
return parent_func(func.func, input_id=input_id)
elif isinstance(func, partial):
if input_id is not None:
# noinspection PyTypeChecker
input_id += len(func.args)
return parent_func(func.func, input_id=input_id)
if input_id is None:
return func
else:
return func, input_id
| (func, input_id=None) |
24,787 | schedula.utils.asy | register_executor |
Register a new executor type.
:param name:
Executor name.
:type name: str
:param init:
Function to initialize the executor.
:type init: callable
:param executors:
Executor factory.
:type executors: ExecutorFactory
| def register_executor(name, init, executors=None):
"""
Register a new executor type.
:param name:
Executor name.
:type name: str
:param init:
Function to initialize the executor.
:type init: callable
:param executors:
Executor factory.
:type executors: ExecutorFactory
"""
if executors is None:
executors = EXECUTORS
executors[name] = init
| (name, init, executors=None) |
24,788 | schedula.utils.dsp | replicate_value |
Replicates `n` times the input value.
:param n:
Number of replications.
:type n: int
:param value:
Value to be replicated.
:type value: T
:param copy:
If True the list contains deep-copies of the value.
:type copy: bool
:return:
A list with the value replicated `n` times.
:rtype: list
Example::
>>> import schedula as sh
>>> fun = sh.partial(replicate_value, n=5)
>>> fun({'a': 3})
({'a': 3}, {'a': 3}, {'a': 3}, {'a': 3}, {'a': 3})
| def replicate_value(value, n=2, copy=True):
"""
Replicates `n` times the input value.
:param n:
Number of replications.
:type n: int
:param value:
Value to be replicated.
:type value: T
:param copy:
If True the list contains deep-copies of the value.
:type copy: bool
:return:
A list with the value replicated `n` times.
:rtype: list
Example::
>>> import schedula as sh
>>> fun = sh.partial(replicate_value, n=5)
>>> fun({'a': 3})
({'a': 3}, {'a': 3}, {'a': 3}, {'a': 3}, {'a': 3})
"""
return bypass(*[value] * n, copy=copy) # Return replicated values.
| (value, n=2, copy=True) |
24,789 | schedula.utils.dsp | run_model |
It is an utility function to execute dynamically generated function/models
and - if Dispatcher based - add their workflows to the parent solution.
:return:
A function that executes the dispatch of the given `dsp`.
:rtype: callable
**Example**:
Follows a simple example on how to use the
:func:`~schedula.utils.dsp.run_model`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(
... function_id='execute_dsp', function=run_model,
... inputs=['dsp_model', 'inputs'], outputs=['outputs']
... )
'execute_dsp'
>>> dsp_model = Dispatcher(name='Model')
>>> dsp_model.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> sol = dsp({'dsp_model': dsp_model, 'inputs': {'b': 1, 'a': 2}})
>>> sol['outputs']
Solution([('a', 2), ('b', 1), ('c', 2)])
>>> sol.workflow.nodes['execute_dsp']['solution']
Solution([('a', 2), ('b', 1), ('c', 2)])
Moreover, it can be used also with all
:func:`~schedula.utils.dsp.SubDispatcher` like objects::
>>> sub_dsp = SubDispatch(dsp_model, outputs=['c'], output_type='list')
>>> sol = dsp({'dsp_model': sub_dsp, 'inputs': {'b': 1, 'a': 2}})
>>> sol['outputs']
[2]
>>> sol.workflow.nodes['execute_dsp']['solution']
Solution([('a', 2), ('b', 1), ('c', 2)])
| class run_model:
"""
It is an utility function to execute dynamically generated function/models
and - if Dispatcher based - add their workflows to the parent solution.
:return:
A function that executes the dispatch of the given `dsp`.
:rtype: callable
**Example**:
Follows a simple example on how to use the
:func:`~schedula.utils.dsp.run_model`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> from schedula import Dispatcher
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(
... function_id='execute_dsp', function=run_model,
... inputs=['dsp_model', 'inputs'], outputs=['outputs']
... )
'execute_dsp'
>>> dsp_model = Dispatcher(name='Model')
>>> dsp_model.add_function('max', max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> sol = dsp({'dsp_model': dsp_model, 'inputs': {'b': 1, 'a': 2}})
>>> sol['outputs']
Solution([('a', 2), ('b', 1), ('c', 2)])
>>> sol.workflow.nodes['execute_dsp']['solution']
Solution([('a', 2), ('b', 1), ('c', 2)])
Moreover, it can be used also with all
:func:`~schedula.utils.dsp.SubDispatcher` like objects::
>>> sub_dsp = SubDispatch(dsp_model, outputs=['c'], output_type='list')
>>> sol = dsp({'dsp_model': sub_dsp, 'inputs': {'b': 1, 'a': 2}})
>>> sol['outputs']
[2]
>>> sol.workflow.nodes['execute_dsp']['solution']
Solution([('a', 2), ('b', 1), ('c', 2)])
"""
def __init__(self, func, *args, _init=None, **kwargs):
from .blue import Blueprint
if isinstance(func, Blueprint):
func = func.register(memo={})
self.func = func
if _init:
args, kwargs = _init(*args, **kwargs)
self.args = args
self.kwargs = kwargs
def __call__(self, **kwargs):
return self.func(*self.args, **self.kwargs, **kwargs)
| (func, *args, _init=None, **kwargs) |
24,790 | schedula.utils.dsp | __call__ | null | def __call__(self, **kwargs):
return self.func(*self.args, **self.kwargs, **kwargs)
| (self, **kwargs) |
24,791 | schedula.utils.dsp | __init__ | null | def __init__(self, func, *args, _init=None, **kwargs):
from .blue import Blueprint
if isinstance(func, Blueprint):
func = func.register(memo={})
self.func = func
if _init:
args, kwargs = _init(*args, **kwargs)
self.args = args
self.kwargs = kwargs
| (self, func, *args, _init=None, **kwargs) |
24,792 | schedula.utils.io | save_default_values |
Write Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
| def save_default_values(dsp, path):
"""
Write Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp.default_values, f)
| (dsp, path) |
24,793 | schedula.utils.io | save_dispatcher |
Write Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name)
| def save_dispatcher(dsp, path):
"""
Write Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp, f)
| (dsp, path) |
24,794 | schedula.utils.io | save_map |
Write Dispatcher graph object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_map(dsp, file_name)
| def save_map(dsp, path):
"""
Write Dispatcher graph object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_map(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp.dmap, f)
| (dsp, path) |
24,795 | schedula.utils.dsp | selector |
Selects the chosen dictionary keys from the given dictionary.
:param keys:
Keys to select.
:type keys: list, tuple, set
:param dictionary:
A dictionary.
:type dictionary: dict
:param copy:
If True the output contains deep-copies of the values.
:type copy: bool
:param output_type:
Type of function output:
+ 'list': a list with all values listed in `keys`.
+ 'dict': a dictionary with any outputs listed in `keys`.
+ 'values': if output length == 1 return a single value otherwise a
tuple with all values listed in `keys`.
:type output_type: str, optional
:param allow_miss:
If True it does not raise when some key is missing in the dictionary.
:type allow_miss: bool
:return:
A dictionary with chosen dictionary keys if present in the sequence of
dictionaries. These are combined with :func:`combine_dicts`.
:rtype: dict
Example::
>>> import schedula as sh
>>> fun = sh.partial(selector, ['a', 'b'])
>>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items())
[('a', 1), ('b', 2)]
| def selector(keys, dictionary, copy=False, output_type='dict',
allow_miss=False):
"""
Selects the chosen dictionary keys from the given dictionary.
:param keys:
Keys to select.
:type keys: list, tuple, set
:param dictionary:
A dictionary.
:type dictionary: dict
:param copy:
If True the output contains deep-copies of the values.
:type copy: bool
:param output_type:
Type of function output:
+ 'list': a list with all values listed in `keys`.
+ 'dict': a dictionary with any outputs listed in `keys`.
+ 'values': if output length == 1 return a single value otherwise a
tuple with all values listed in `keys`.
:type output_type: str, optional
:param allow_miss:
If True it does not raise when some key is missing in the dictionary.
:type allow_miss: bool
:return:
A dictionary with chosen dictionary keys if present in the sequence of
dictionaries. These are combined with :func:`combine_dicts`.
:rtype: dict
Example::
>>> import schedula as sh
>>> fun = sh.partial(selector, ['a', 'b'])
>>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items())
[('a', 1), ('b', 2)]
"""
if not allow_miss:
# noinspection PyUnusedLocal
def check(key):
return True
else:
def check(key):
return key in dictionary
if output_type == 'list': # Select as list.
res = [dictionary[k] for k in keys if check(k)]
return _copy.deepcopy(res) if copy else res
elif output_type == 'values':
return bypass(*[dictionary[k] for k in keys if check(k)], copy=copy)
# Select as dict.
return bypass({k: dictionary[k] for k in keys if check(k)}, copy=copy)
| (keys, dictionary, copy=False, output_type='dict', allow_miss=False) |
24,796 | schedula.utils.asy | shutdown_executor |
Clean-up the resources associated with the Executor.
:param name:
Executor name.
:type name: str
:param sol_id:
Solution id.
:type sol_id: int
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executor have been
reclaimed.
:type wait: bool
:param executors:
Executor factory.
:type executors: ExecutorFactory
:return:
Shutdown pool executor.
:rtype: dict[concurrent.futures.Future,Thread|Process]
| def shutdown_executor(name=EMPTY, sol_id=EMPTY, wait=True, executors=None):
"""
Clean-up the resources associated with the Executor.
:param name:
Executor name.
:type name: str
:param sol_id:
Solution id.
:type sol_id: int
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executor have been
reclaimed.
:type wait: bool
:param executors:
Executor factory.
:type executors: ExecutorFactory
:return:
Shutdown pool executor.
:rtype: dict[concurrent.futures.Future,Thread|Process]
"""
if executors is None:
executors = EXECUTORS
return executors.shutdown_executor(name, sol_id, wait)
| (name=empty, sol_id=empty, wait=True, executors=None) |
24,797 | schedula.utils.asy | shutdown_executors |
Clean-up the resources of all initialized executors.
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executors have been
reclaimed.
:type wait: bool
:param executors:
Executor factory.
:type executors: ExecutorFactory
:return:
Shutdown pool executor.
:rtype: dict[str,dict]
| def shutdown_executors(wait=True, executors=None):
"""
Clean-up the resources of all initialized executors.
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executors have been
reclaimed.
:type wait: bool
:param executors:
Executor factory.
:type executors: ExecutorFactory
:return:
Shutdown pool executor.
:rtype: dict[str,dict]
"""
return shutdown_executor(wait=wait, executors=executors)
| (wait=True, executors=None) |
24,798 | schedula.utils.dsp | stack_nested_keys |
Stacks the keys of nested-dictionaries into tuples and yields a list of
k-v pairs.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param key:
Initial keys.
:type key: tuple, optional
:param depth:
Maximum keys depth.
:type depth: int, optional
:return:
List of k-v pairs.
:rtype: generator
| def stack_nested_keys(nested_dict, key=(), depth=-1):
"""
Stacks the keys of nested-dictionaries into tuples and yields a list of
k-v pairs.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param key:
Initial keys.
:type key: tuple, optional
:param depth:
Maximum keys depth.
:type depth: int, optional
:return:
List of k-v pairs.
:rtype: generator
"""
if depth != 0 and hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
yield from stack_nested_keys(v, key=key + (k,), depth=depth - 1)
else:
yield key, nested_dict
| (nested_dict, key=(), depth=-1) |
24,799 | schedula.utils.dsp | stlp |
Converts a string in a tuple.
| def stlp(s):
"""
Converts a string in a tuple.
"""
if isinstance(s, str):
return s,
return s
| (s) |
24,800 | schedula.utils.dsp | summation |
Sums inputs values.
:param inputs:
Inputs values.
:type inputs: int, float
:return:
Sum of the input values.
:rtype: int, float
Example::
>>> summation(1, 3.0, 4, 2)
10.0
| def summation(*inputs):
"""
Sums inputs values.
:param inputs:
Inputs values.
:type inputs: int, float
:return:
Sum of the input values.
:rtype: int, float
Example::
>>> summation(1, 3.0, 4, 2)
10.0
"""
# Return the sum of the input values.
return functools.reduce(lambda x, y: x + y, inputs)
| (*inputs) |
24,801 | frozenlist._frozenlist | FrozenList | FrozenList(items=None) | from frozenlist._frozenlist import FrozenList
| null |
24,821 | frozenlist | FrozenList | null | class FrozenList(MutableSequence):
__slots__ = ("_frozen", "_items")
if sys.version_info >= (3, 9):
__class_getitem__ = classmethod(types.GenericAlias)
else:
@classmethod
def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]:
return cls
def __init__(self, items=None):
self._frozen = False
if items is not None:
items = list(items)
else:
items = []
self._items = items
@property
def frozen(self):
return self._frozen
def freeze(self):
self._frozen = True
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
self._items[index] = value
def __delitem__(self, index):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
del self._items[index]
def __len__(self):
return self._items.__len__()
def __iter__(self):
return self._items.__iter__()
def __reversed__(self):
return self._items.__reversed__()
def __eq__(self, other):
return list(self) == other
def __le__(self, other):
return list(self) <= other
def insert(self, pos, item):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
self._items.insert(pos, item)
def __repr__(self):
return f"<FrozenList(frozen={self._frozen}, {self._items!r})>"
def __hash__(self):
if self._frozen:
return hash(tuple(self))
else:
raise RuntimeError("Cannot hash unfrozen list.")
| (items=None) |
24,823 | frozenlist | __delitem__ | null | def __delitem__(self, index):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
del self._items[index]
| (self, index) |
24,824 | frozenlist | __eq__ | null | def __eq__(self, other):
return list(self) == other
| (self, other) |
24,825 | functools | __ge__ | Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b). | def _ge_from_le(self, other, NotImplemented=NotImplemented):
'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).'
op_result = type(self).__le__(self, other)
if op_result is NotImplemented:
return op_result
return not op_result or self == other
| (self, other, NotImplemented=NotImplemented) |
24,826 | frozenlist | __getitem__ | null | def __getitem__(self, index):
return self._items[index]
| (self, index) |
24,827 | functools | __gt__ | Return a > b. Computed by @total_ordering from (not a <= b). | def _gt_from_le(self, other, NotImplemented=NotImplemented):
'Return a > b. Computed by @total_ordering from (not a <= b).'
op_result = type(self).__le__(self, other)
if op_result is NotImplemented:
return op_result
return not op_result
| (self, other, NotImplemented=NotImplemented) |
24,828 | frozenlist | __hash__ | null | def __hash__(self):
if self._frozen:
return hash(tuple(self))
else:
raise RuntimeError("Cannot hash unfrozen list.")
| (self) |
24,830 | frozenlist | __init__ | null | def __init__(self, items=None):
self._frozen = False
if items is not None:
items = list(items)
else:
items = []
self._items = items
| (self, items=None) |
24,831 | frozenlist | __iter__ | null | def __iter__(self):
return self._items.__iter__()
| (self) |
24,832 | frozenlist | __le__ | null | def __le__(self, other):
return list(self) <= other
| (self, other) |
24,833 | frozenlist | __len__ | null | def __len__(self):
return self._items.__len__()
| (self) |
24,834 | functools | __lt__ | Return a < b. Computed by @total_ordering from (a <= b) and (a != b). | def _lt_from_le(self, other, NotImplemented=NotImplemented):
'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).'
op_result = type(self).__le__(self, other)
if op_result is NotImplemented:
return op_result
return op_result and self != other
| (self, other, NotImplemented=NotImplemented) |
24,835 | frozenlist | __repr__ | null | def __repr__(self):
return f"<FrozenList(frozen={self._frozen}, {self._items!r})>"
| (self) |
24,836 | frozenlist | __reversed__ | null | def __reversed__(self):
return self._items.__reversed__()
| (self) |
24,837 | frozenlist | __setitem__ | null | def __setitem__(self, index, value):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
self._items[index] = value
| (self, index, value) |
24,842 | frozenlist | freeze | null | def freeze(self):
self._frozen = True
| (self) |
24,844 | frozenlist | insert | null | def insert(self, pos, item):
if self._frozen:
raise RuntimeError("Cannot modify frozen list.")
self._items.insert(pos, item)
| (self, pos, item) |
24,851 | functools | total_ordering | Class decorator that fills in missing ordering methods | def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
# Find user-defined comparisons (not those inherited from object).
roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)}
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in _convert[root]:
if opname not in roots:
opfunc.__name__ = opname
setattr(cls, opname, opfunc)
return cls
| (cls) |
24,853 | aiohappyeyeballs.utils | addr_to_addr_infos | Convert an address tuple to a list of addr_info tuples. | def addr_to_addr_infos(
addr: Optional[
Union[Tuple[str, int, int, int], Tuple[str, int, int], Tuple[str, int]]
]
) -> Optional[List[AddrInfoType]]:
"""Convert an address tuple to a list of addr_info tuples."""
if addr is None:
return None
host = addr[0]
port = addr[1]
is_ipv6 = ":" in host
if is_ipv6:
flowinfo = 0
scopeid = 0
addr_len = len(addr)
if addr_len >= 4:
scopeid = addr[3] # type: ignore[misc]
if addr_len >= 3:
flowinfo = addr[2] # type: ignore[misc]
addr = (host, port, flowinfo, scopeid)
family = socket.AF_INET6
else:
addr = (host, port)
family = socket.AF_INET
return [(family, socket.SOCK_STREAM, socket.IPPROTO_TCP, "", addr)]
| (addr: Union[Tuple[str, int, int, int], Tuple[str, int, int], Tuple[str, int], NoneType]) -> Optional[List[Tuple[Union[int, socket.AddressFamily], Union[int, socket.SocketKind], int, str, Tuple]]] |
24,855 | aiohappyeyeballs.utils | pop_addr_infos_interleave |
Pop addr_info from the list of addr_infos by family up to interleave times.
The interleave parameter is used to know how many addr_infos for
each family should be popped of the top of the list.
| def pop_addr_infos_interleave(
addr_infos: List[AddrInfoType], interleave: Optional[int] = None
) -> None:
"""
Pop addr_info from the list of addr_infos by family up to interleave times.
The interleave parameter is used to know how many addr_infos for
each family should be popped of the top of the list.
"""
seen: Dict[int, int] = {}
if interleave is None:
interleave = 1
to_remove: List[AddrInfoType] = []
for addr_info in addr_infos:
family = addr_info[0]
if family not in seen:
seen[family] = 0
if seen[family] < interleave:
to_remove.append(addr_info)
seen[family] += 1
for addr_info in to_remove:
addr_infos.remove(addr_info)
| (addr_infos: List[Tuple[Union[int, socket.AddressFamily], Union[int, socket.SocketKind], int, str, Tuple]], interleave: Optional[int] = None) -> NoneType |
24,856 | aiohappyeyeballs.utils | remove_addr_infos |
Remove an address from the list of addr_infos.
The addr value is typically the return value of
sock.getpeername().
| def remove_addr_infos(
addr_infos: List[AddrInfoType],
addr: Union[Tuple[str, int], Tuple[str, int, int, int]],
) -> None:
"""
Remove an address from the list of addr_infos.
The addr value is typically the return value of
sock.getpeername().
"""
bad_addrs_infos: List[AddrInfoType] = []
for addr_info in addr_infos:
if addr_info[-1] == addr:
bad_addrs_infos.append(addr_info)
if bad_addrs_infos:
for bad_addr_info in bad_addrs_infos:
addr_infos.remove(bad_addr_info)
return
# Slow path in case addr is formatted differently
match_addr = _addr_tuple_to_ip_address(addr)
for addr_info in addr_infos:
if match_addr == _addr_tuple_to_ip_address(addr_info[-1]):
bad_addrs_infos.append(addr_info)
if bad_addrs_infos:
for bad_addr_info in bad_addrs_infos:
addr_infos.remove(bad_addr_info)
return
raise ValueError(f"Address {addr} not found in addr_infos")
| (addr_infos: List[Tuple[Union[int, socket.AddressFamily], Union[int, socket.SocketKind], int, str, Tuple]], addr: Union[Tuple[str, int], Tuple[str, int, int, int]]) -> NoneType |
24,857 | aiohappyeyeballs.impl | start_connection |
Connect to a TCP server.
Create a socket connection to a specified destination. The
destination is specified as a list of AddrInfoType tuples as
returned from getaddrinfo().
The arguments are, in order:
* ``family``: the address family, e.g. ``socket.AF_INET`` or
``socket.AF_INET6``.
* ``type``: the socket type, e.g. ``socket.SOCK_STREAM`` or
``socket.SOCK_DGRAM``.
* ``proto``: the protocol, e.g. ``socket.IPPROTO_TCP`` or
``socket.IPPROTO_UDP``.
* ``canonname``: the canonical name of the address, e.g.
``"www.python.org"``.
* ``sockaddr``: the socket address
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
socket.
The expected use case is to use this method in conjunction with
loop.create_connection() to establish a connection to a server::
socket = await start_connection(addr_infos)
transport, protocol = await loop.create_connection(
MyProtocol, sock=socket, ...)
| """Base implementation."""
import asyncio
import collections
import functools
import itertools
import socket
from asyncio import staggered
from typing import List, Optional, Sequence
from .types import AddrInfoType
async def start_connection(
addr_infos: Sequence[AddrInfoType],
*,
local_addr_infos: Optional[Sequence[AddrInfoType]] = None,
happy_eyeballs_delay: Optional[float] = None,
interleave: Optional[int] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> socket.socket:
"""
Connect to a TCP server.
Create a socket connection to a specified destination. The
destination is specified as a list of AddrInfoType tuples as
returned from getaddrinfo().
The arguments are, in order:
* ``family``: the address family, e.g. ``socket.AF_INET`` or
``socket.AF_INET6``.
* ``type``: the socket type, e.g. ``socket.SOCK_STREAM`` or
``socket.SOCK_DGRAM``.
* ``proto``: the protocol, e.g. ``socket.IPPROTO_TCP`` or
``socket.IPPROTO_UDP``.
* ``canonname``: the canonical name of the address, e.g.
``"www.python.org"``.
* ``sockaddr``: the socket address
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
socket.
The expected use case is to use this method in conjunction with
loop.create_connection() to establish a connection to a server::
socket = await start_connection(addr_infos)
transport, protocol = await loop.create_connection(
MyProtocol, sock=socket, ...)
"""
if not (current_loop := loop):
current_loop = asyncio.get_running_loop()
single_addr_info = len(addr_infos) == 1
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if interleave and not single_addr_info:
addr_infos = _interleave_addrinfos(addr_infos, interleave)
sock: Optional[socket.socket] = None
exceptions: List[List[Exception]] = []
if happy_eyeballs_delay is None or single_addr_info:
# not using happy eyeballs
for addrinfo in addr_infos:
try:
sock = await _connect_sock(
current_loop, exceptions, addrinfo, local_addr_infos
)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(
functools.partial(
_connect_sock, current_loop, exceptions, addrinfo, local_addr_infos
)
for addrinfo in addr_infos
),
happy_eyeballs_delay,
loop=current_loop,
)
if sock is None:
all_exceptions = [exc for sub in exceptions for exc in sub]
try:
if len(all_exceptions) == 1:
raise all_exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(all_exceptions[0])
if all(str(exc) == model for exc in all_exceptions):
raise all_exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError(
"Multiple exceptions: {}".format(
", ".join(str(exc) for exc in all_exceptions)
)
)
finally:
all_exceptions = None # type: ignore[assignment]
exceptions = None # type: ignore[assignment]
return sock
| (addr_infos: Sequence[Tuple[Union[int, socket.AddressFamily], Union[int, socket.SocketKind], int, str, Tuple]], *, local_addr_infos: Optional[Sequence[Tuple[Union[int, socket.AddressFamily], Union[int, socket.SocketKind], int, str, Tuple]]] = None, happy_eyeballs_delay: Optional[float] = None, interleave: Optional[int] = None, loop: Optional[asyncio.events.AbstractEventLoop] = None) -> socket.socket |
24,862 | lm_eval.evaluator | evaluate | Instantiate and evaluate a model on a list of tasks.
:param lm: obj
Language Model
:param task_dict: dict[str, Task]
Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
:param limit: int, optional
Limit the number of examples per task (only use this for testing)
:param bootstrap_iters:
Number of iterations for bootstrap statistics
:param write_out: bool
If True, write out an example document and model input for checking task integrity
:param log_samples: bool
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
:return
Dictionary of results
| def evaluate(
lm: "LM",
task_dict,
limit: Optional[int] = None,
cache_requests: bool = False,
rewrite_requests_cache: bool = False,
bootstrap_iters: Optional[int] = 100000,
write_out: bool = False,
log_samples: bool = True,
verbosity: str = "INFO",
):
"""Instantiate and evaluate a model on a list of tasks.
:param lm: obj
Language Model
:param task_dict: dict[str, Task]
Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
:param limit: int, optional
Limit the number of examples per task (only use this for testing)
:param bootstrap_iters:
Number of iterations for bootstrap statistics
:param write_out: bool
If True, write out an example document and model input for checking task integrity
:param log_samples: bool
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
:return
Dictionary of results
"""
eval_logger.setLevel(getattr(logging, f"{verbosity}"))
# tracks all Instances/requests a model must generate output on.
requests = defaultdict(list)
# stores the amount to pad out reqs per req. type so that
# number of fwd passes per distributed rank is equal
padding_requests = defaultdict(int)
# get lists of group hierarchy and each type of request
task_hierarchy, eval_tasks = get_task_list(task_dict)
if not log_samples:
if not all(
"bypass" not in getattr(task_output.task, "_metric_fn_list", {}).keys()
for task_output in eval_tasks
):
raise ValueError("log_samples must be True for 'bypass' metric-only tasks")
for task_output in eval_tasks:
task: Task = task_output.task
limit = get_sample_size(task, limit)
task.build_all_requests(
limit=limit,
rank=lm.rank,
world_size=lm.world_size,
cache_requests=cache_requests,
rewrite_requests_cache=rewrite_requests_cache,
)
eval_logger.debug(
f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}"
)
if write_out:
print_writeout(task)
# aggregate Instances by LM method requested to get output.
for instance in task.instances:
reqtype = instance.request_type
requests[reqtype].append(instance)
if lm.world_size > 1:
instances_rnk = torch.tensor(len(task._instances), device=lm.device)
gathered_item = (
lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
)
# "multiple_choice" task types dispatch (several) "loglikelihood" request types
reqtype = (
"loglikelihood"
if task.OUTPUT_TYPE == "multiple_choice"
else task.OUTPUT_TYPE
)
# compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks)
numpad = max(gathered_item) - gathered_item[lm.rank]
# todo: may not account for padding in cases like SquadV2 which has multiple req types
padding_requests[reqtype] += numpad
### Run LM on inputs, get all outputs ###
# execute each type of request
for reqtype, reqs in requests.items():
eval_logger.info(f"Running {reqtype} requests")
# create `K` copies of each request `req` based off `K = req.repeats`
cloned_reqs = []
for req in reqs:
cloned_reqs.extend([req] * req.repeats)
if (lm.world_size > 1) and (padding_requests[reqtype] > 0):
for _ in range(padding_requests[reqtype]):
cloned_reqs.extend([req] * req.repeats)
# run requests through model
resps = getattr(lm, reqtype)(cloned_reqs)
# put responses from model into a list of length K for each request.
for x, req in zip(resps, cloned_reqs):
req.resps.append(x)
if lm.world_size > 1:
lm.accelerator.wait_for_everyone()
RANK = lm.rank
WORLD_SIZE = lm.world_size
### Postprocess outputs ###
# TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
for task_output in eval_tasks:
task = task_output.task
task.apply_filters()
### Collect values of metrics on all datapoints ###
# # unpack results and sort back in order and return control to Task
# TODO: make it possible to use a different metric per filter
# Pre-process task.instances to group by doc_id
instances_by_doc_id = defaultdict(list)
for instance in task.instances:
instances_by_doc_id[instance.doc_id].append(instance)
# Sort instances within each group
for instances in instances_by_doc_id.values():
instances.sort(key=lambda x: x.idx)
# iterate over different filters used
for filter_key in task.instances[0].filtered_resps.keys():
doc_iterator = task.doc_iterator(
rank=RANK, limit=limit, world_size=WORLD_SIZE
)
for doc_id, doc in doc_iterator:
requests = instances_by_doc_id[doc_id]
metrics = task.process_results(
doc, [req.filtered_resps[filter_key] for req in requests]
)
if log_samples:
target = task.doc_to_target(doc)
example = {
"doc_id": doc_id,
"doc": doc,
"target": target,
"arguments": [req.args for req in requests],
"resps": [req.resps for req in requests],
"filtered_resps": [
req.filtered_resps[filter_key] for req in requests
],
}
example.update(metrics)
task_output.logged_samples.append(example)
for metric, value in metrics.items():
task_output.sample_metrics[(metric, filter_key)].append(value)
if WORLD_SIZE > 1:
# if multigpu, then gather data across all ranks to rank 0
# first gather logged samples across all ranks
for task_output in eval_tasks:
if log_samples:
# for task_name, task_samples in list(samples.items()):
full_samples = [None] * WORLD_SIZE if RANK == 0 else None
torch.distributed.gather_object(
obj=task_output.logged_samples,
object_gather_list=full_samples,
dst=0,
)
if RANK == 0:
task_output.logged_samples = list(
itertools.chain.from_iterable(full_samples)
)
# then collect metrics across all ranks
for metrics in task_output.sample_metrics:
metric_list = [None] * WORLD_SIZE if RANK == 0 else None
torch.distributed.gather_object(
obj=task_output.sample_metrics[metrics],
object_gather_list=metric_list,
dst=0,
)
if RANK == 0:
task_output.sample_metrics[metrics] = list(
itertools.chain.from_iterable(metric_list)
)
if RANK == 0:
### Aggregate results over all datapoints ###
# aggregate results ; run bootstrap CIs
for task_output in eval_tasks:
task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters)
results, samples, configs, versions, num_fewshot = consolidate_results(
eval_tasks
)
### Calculate group metrics ###
if bool(results):
for group, task_list in reversed(task_hierarchy.items()):
if len(task_list) == 0:
# task_hierarchy entries are either
# `group_name: [subtask1, subtask2, ...]`
# or `task_name: []`.
# we only want to operate on groups here.
continue
metric_list = list(
{
key
for task in task_list
for key in results[task].keys()
if "_stderr" not in key and key not in ["alias", "samples"]
}
)
for metric in metric_list:
stderr = "_stderr,".join(metric.split(","))
# gather metrics, sizes, and stderrs from subtasks
metrics = [
results[task][metric]
for task in task_list
if metric in results[task]
] # TODO: copy?
stderrs = [
results[task][stderr]
for task in task_list
if stderr in results[task]
]
sizes = [
results[task]["samples"]
for task in task_list
if metric in results[task]
]
# compute group's pooled metric and stderr
results[group][
metric
] = lm_eval.api.metrics.aggregate_subtask_metrics(metrics, sizes)
# TODO: calculate grouped metric using aggregation fn
if "N/A" in stderrs:
results[group][stderr] = "N/A"
else:
results[group][
stderr
] = lm_eval.api.metrics.pooled_sample_stderr(stderrs, sizes)
# TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility
# To use the old (likely incorrect) variance formula, comment out the above and uncomment this line:
# results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, sizes, metrics=metrics)
results[group]["samples"] = sum(sizes)
results_agg = defaultdict(dict)
groups_agg = defaultdict(dict)
all_tasks_list = list(task_hierarchy.keys())
while True:
add_tasks_list = list(k for k in results_agg.keys())
left_tasks_list = sorted(list(set(all_tasks_list) - set(add_tasks_list)))
if len(left_tasks_list) == 0:
break
_task_hierarchy = {
k: v for k, v in task_hierarchy.items() if k in left_tasks_list
}
_results_agg, _groups_agg = prepare_print_tasks(_task_hierarchy, results)
results_agg = {**results_agg, **_results_agg}
groups_agg = {**groups_agg, **_groups_agg}
for group_name, task_list in task_hierarchy.items():
if task_list:
num_fewshot[group_name] = num_fewshot[
task_list[0]
] # TODO: validate this
results_dict = {
"results": dict(results_agg.items()),
**({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}),
"group_subtasks": dict(reversed(task_hierarchy.items())),
"configs": dict(sorted(configs.items())),
"versions": dict(sorted(versions.items())),
"n-shot": dict(sorted(num_fewshot.items())),
}
if log_samples:
results_dict["samples"] = dict(samples)
return results_dict
else:
return None
| (lm: 'LM', task_dict, limit: Optional[int] = None, cache_requests: bool = False, rewrite_requests_cache: bool = False, bootstrap_iters: Optional[int] = 100000, write_out: bool = False, log_samples: bool = True, verbosity: str = 'INFO') |
24,869 | lm_eval.evaluator | simple_evaluate | Instantiate and evaluate a model on a list of tasks.
:param model: Union[str, LM]
Name of model or LM object, see lm_eval.models.get_model
:param model_args: Optional[str, dict]
String or dict arguments for each model class, see LM.create_from_arg_string and LM.create_from_arg_object.
Ignored if `model` argument is a LM object.
:param tasks: list[Union[str, dict, Task]]
List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
:param num_fewshot: int
Number of examples in few-shot context
:param batch_size: int or str, optional
Batch size for model
:param max_batch_size: int, optional
Maximal batch size to try with automatic batch size detection
:param device: str, optional
PyTorch device (e.g. "cpu" or "cuda:0") for running models
:param use_cache: str, optional
A path to a sqlite db file for caching model responses. `None` if not caching.
:param cache_requests: bool, optional
Speed up evaluation by caching the building of dataset requests. `None` if not caching.
:param rewrite_requests_cache: bool, optional
Rewrites all of the request cache if set to `True`. `None` if not desired.
:param delete_requests_cache: bool, optional
Deletes all of the request cache if set to `True`. `None` if not desired.
:param limit: int or float, optional
Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples.
:param bootstrap_iters:
Number of iterations for bootstrap statistics
:param check_integrity: bool
Whether to run the relevant part of the test suite for the tasks
:param write_out: bool
If True, write out an example document and model input for checking task integrity
:param log_samples: bool
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
:param gen_kwargs: str
String arguments for model generation
Ignored for all tasks with loglikelihood output_type
:param predict_only: bool
If true only model outputs will be generated and returned. Metrics will not be evaluated
:param random_seed: int
Random seed for python's random module. If set to None, the seed will not be set.
:param numpy_random_seed: int
Random seed for numpy. If set to None, the seed will not be set.
:param torch_random_seed: int
Random seed for torch. If set to None, the seed will not be set.
:return
Dictionary of results
| def evaluate(
lm: "LM",
task_dict,
limit: Optional[int] = None,
cache_requests: bool = False,
rewrite_requests_cache: bool = False,
bootstrap_iters: Optional[int] = 100000,
write_out: bool = False,
log_samples: bool = True,
verbosity: str = "INFO",
):
"""Instantiate and evaluate a model on a list of tasks.
:param lm: obj
Language Model
:param task_dict: dict[str, Task]
Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
:param limit: int, optional
Limit the number of examples per task (only use this for testing)
:param bootstrap_iters:
Number of iterations for bootstrap statistics
:param write_out: bool
If True, write out an example document and model input for checking task integrity
:param log_samples: bool
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
:return
Dictionary of results
"""
eval_logger.setLevel(getattr(logging, f"{verbosity}"))
# tracks all Instances/requests a model must generate output on.
requests = defaultdict(list)
# stores the amount to pad out reqs per req. type so that
# number of fwd passes per distributed rank is equal
padding_requests = defaultdict(int)
# get lists of group hierarchy and each type of request
task_hierarchy, eval_tasks = get_task_list(task_dict)
if not log_samples:
if not all(
"bypass" not in getattr(task_output.task, "_metric_fn_list", {}).keys()
for task_output in eval_tasks
):
raise ValueError("log_samples must be True for 'bypass' metric-only tasks")
for task_output in eval_tasks:
task: Task = task_output.task
limit = get_sample_size(task, limit)
task.build_all_requests(
limit=limit,
rank=lm.rank,
world_size=lm.world_size,
cache_requests=cache_requests,
rewrite_requests_cache=rewrite_requests_cache,
)
eval_logger.debug(
f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}"
)
if write_out:
print_writeout(task)
# aggregate Instances by LM method requested to get output.
for instance in task.instances:
reqtype = instance.request_type
requests[reqtype].append(instance)
if lm.world_size > 1:
instances_rnk = torch.tensor(len(task._instances), device=lm.device)
gathered_item = (
lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
)
# "multiple_choice" task types dispatch (several) "loglikelihood" request types
reqtype = (
"loglikelihood"
if task.OUTPUT_TYPE == "multiple_choice"
else task.OUTPUT_TYPE
)
# compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks)
numpad = max(gathered_item) - gathered_item[lm.rank]
# todo: may not account for padding in cases like SquadV2 which has multiple req types
padding_requests[reqtype] += numpad
### Run LM on inputs, get all outputs ###
# execute each type of request
for reqtype, reqs in requests.items():
eval_logger.info(f"Running {reqtype} requests")
# create `K` copies of each request `req` based off `K = req.repeats`
cloned_reqs = []
for req in reqs:
cloned_reqs.extend([req] * req.repeats)
if (lm.world_size > 1) and (padding_requests[reqtype] > 0):
for _ in range(padding_requests[reqtype]):
cloned_reqs.extend([req] * req.repeats)
# run requests through model
resps = getattr(lm, reqtype)(cloned_reqs)
# put responses from model into a list of length K for each request.
for x, req in zip(resps, cloned_reqs):
req.resps.append(x)
if lm.world_size > 1:
lm.accelerator.wait_for_everyone()
RANK = lm.rank
WORLD_SIZE = lm.world_size
### Postprocess outputs ###
# TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
for task_output in eval_tasks:
task = task_output.task
task.apply_filters()
### Collect values of metrics on all datapoints ###
# # unpack results and sort back in order and return control to Task
# TODO: make it possible to use a different metric per filter
# Pre-process task.instances to group by doc_id
instances_by_doc_id = defaultdict(list)
for instance in task.instances:
instances_by_doc_id[instance.doc_id].append(instance)
# Sort instances within each group
for instances in instances_by_doc_id.values():
instances.sort(key=lambda x: x.idx)
# iterate over different filters used
for filter_key in task.instances[0].filtered_resps.keys():
doc_iterator = task.doc_iterator(
rank=RANK, limit=limit, world_size=WORLD_SIZE
)
for doc_id, doc in doc_iterator:
requests = instances_by_doc_id[doc_id]
metrics = task.process_results(
doc, [req.filtered_resps[filter_key] for req in requests]
)
if log_samples:
target = task.doc_to_target(doc)
example = {
"doc_id": doc_id,
"doc": doc,
"target": target,
"arguments": [req.args for req in requests],
"resps": [req.resps for req in requests],
"filtered_resps": [
req.filtered_resps[filter_key] for req in requests
],
}
example.update(metrics)
task_output.logged_samples.append(example)
for metric, value in metrics.items():
task_output.sample_metrics[(metric, filter_key)].append(value)
if WORLD_SIZE > 1:
# if multigpu, then gather data across all ranks to rank 0
# first gather logged samples across all ranks
for task_output in eval_tasks:
if log_samples:
# for task_name, task_samples in list(samples.items()):
full_samples = [None] * WORLD_SIZE if RANK == 0 else None
torch.distributed.gather_object(
obj=task_output.logged_samples,
object_gather_list=full_samples,
dst=0,
)
if RANK == 0:
task_output.logged_samples = list(
itertools.chain.from_iterable(full_samples)
)
# then collect metrics across all ranks
for metrics in task_output.sample_metrics:
metric_list = [None] * WORLD_SIZE if RANK == 0 else None
torch.distributed.gather_object(
obj=task_output.sample_metrics[metrics],
object_gather_list=metric_list,
dst=0,
)
if RANK == 0:
task_output.sample_metrics[metrics] = list(
itertools.chain.from_iterable(metric_list)
)
if RANK == 0:
### Aggregate results over all datapoints ###
# aggregate results ; run bootstrap CIs
for task_output in eval_tasks:
task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters)
results, samples, configs, versions, num_fewshot = consolidate_results(
eval_tasks
)
### Calculate group metrics ###
if bool(results):
for group, task_list in reversed(task_hierarchy.items()):
if len(task_list) == 0:
# task_hierarchy entries are either
# `group_name: [subtask1, subtask2, ...]`
# or `task_name: []`.
# we only want to operate on groups here.
continue
metric_list = list(
{
key
for task in task_list
for key in results[task].keys()
if "_stderr" not in key and key not in ["alias", "samples"]
}
)
for metric in metric_list:
stderr = "_stderr,".join(metric.split(","))
# gather metrics, sizes, and stderrs from subtasks
metrics = [
results[task][metric]
for task in task_list
if metric in results[task]
] # TODO: copy?
stderrs = [
results[task][stderr]
for task in task_list
if stderr in results[task]
]
sizes = [
results[task]["samples"]
for task in task_list
if metric in results[task]
]
# compute group's pooled metric and stderr
results[group][
metric
] = lm_eval.api.metrics.aggregate_subtask_metrics(metrics, sizes)
# TODO: calculate grouped metric using aggregation fn
if "N/A" in stderrs:
results[group][stderr] = "N/A"
else:
results[group][
stderr
] = lm_eval.api.metrics.pooled_sample_stderr(stderrs, sizes)
# TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility
# To use the old (likely incorrect) variance formula, comment out the above and uncomment this line:
# results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, sizes, metrics=metrics)
results[group]["samples"] = sum(sizes)
results_agg = defaultdict(dict)
groups_agg = defaultdict(dict)
all_tasks_list = list(task_hierarchy.keys())
while True:
add_tasks_list = list(k for k in results_agg.keys())
left_tasks_list = sorted(list(set(all_tasks_list) - set(add_tasks_list)))
if len(left_tasks_list) == 0:
break
_task_hierarchy = {
k: v for k, v in task_hierarchy.items() if k in left_tasks_list
}
_results_agg, _groups_agg = prepare_print_tasks(_task_hierarchy, results)
results_agg = {**results_agg, **_results_agg}
groups_agg = {**groups_agg, **_groups_agg}
for group_name, task_list in task_hierarchy.items():
if task_list:
num_fewshot[group_name] = num_fewshot[
task_list[0]
] # TODO: validate this
results_dict = {
"results": dict(results_agg.items()),
**({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}),
"group_subtasks": dict(reversed(task_hierarchy.items())),
"configs": dict(sorted(configs.items())),
"versions": dict(sorted(versions.items())),
"n-shot": dict(sorted(num_fewshot.items())),
}
if log_samples:
results_dict["samples"] = dict(samples)
return results_dict
else:
return None
| (model, model_args: Union[str, dict, NoneType] = None, tasks: Optional[List[Union[str, dict, object]]] = None, num_fewshot: Optional[int] = None, batch_size: Optional[int] = None, max_batch_size: Optional[int] = None, device: Optional[str] = None, use_cache: Optional[str] = None, cache_requests: bool = False, rewrite_requests_cache: bool = False, delete_requests_cache: bool = False, limit: Union[int, float, NoneType] = None, bootstrap_iters: int = 100000, check_integrity: bool = False, write_out: bool = False, log_samples: bool = True, gen_kwargs: Optional[str] = None, task_manager: Optional[lm_eval.tasks.TaskManager] = None, verbosity: str = 'INFO', predict_only: bool = False, random_seed: int = 0, numpy_random_seed: int = 1234, torch_random_seed: int = 1234) |
24,872 | niloofar | main | Entry point for the application script | def main():
"""Entry point for the application script"""
print("Call your main application code here")
| () |
24,873 | pymisp.abstract | AbstractMISP | null | class AbstractMISP(MutableMapping, MISPFileCache, metaclass=ABCMeta): # type: ignore[type-arg]
__resources_path = resources_path
__misp_objects_path = misp_objects_path
__describe_types = describe_types
def __init__(self, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Abstract class for all the MISP objects.
NOTE: Every method in every classes inheriting this one are doing
changes in memory and do not modify data on a remote MISP instance.
To do so, you need to call the respective add_* or update_*
methods in PyMISP.
"""
super().__init__()
self.__edited: bool = True # As we create a new object, we assume it is edited
self.__not_jsonable: list[str] = []
self._fields_for_feed: set[str]
self.__self_defined_describe_types: dict[str, Any] | None = None
self.uuid: str
if kwargs.get('force_timestamps') is not None:
# Ignore the edited objects and keep the timestamps.
self.__force_timestamps: bool = True
else:
self.__force_timestamps = False
@property
def describe_types(self) -> dict[str, Any]:
if self.__self_defined_describe_types:
return self.__self_defined_describe_types
return self.__describe_types
@describe_types.setter
def describe_types(self, describe_types: dict[str, Any]) -> None:
self.__self_defined_describe_types = describe_types
@property
def resources_path(self) -> Path:
return self.__resources_path
@property
def misp_objects_path(self) -> Path:
return self.__misp_objects_path
@misp_objects_path.setter
def misp_objects_path(self, misp_objects_path: str | Path) -> None:
if isinstance(misp_objects_path, str):
misp_objects_path = Path(misp_objects_path)
self.__misp_objects_path = misp_objects_path
def from_dict(self, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Loading all the parameters as class properties, if they aren't `None`.
This method aims to be called when all the properties requiring a special
treatment are processed.
Note: This method is used when you initialize an object with existing data so by default,
the class is flaged as not edited."""
for prop, value in kwargs.items():
if value is None:
continue
setattr(self, prop, value)
# We load an existing dictionary, marking it an not-edited
self.__edited = False
def update_not_jsonable(self, *args) -> None: # type: ignore[no-untyped-def]
"""Add entries to the __not_jsonable list"""
self.__not_jsonable += args
def set_not_jsonable(self, args: list[str]) -> None:
"""Set __not_jsonable to a new list"""
self.__not_jsonable = args
def _remove_from_not_jsonable(self, *args) -> None: # type: ignore[no-untyped-def]
"""Remove the entries that are in the __not_jsonable list"""
for entry in args:
try:
self.__not_jsonable.remove(entry)
except ValueError:
pass
def from_json(self, json_string: str) -> None:
"""Load a JSON string"""
self.from_dict(**loads(json_string))
def to_dict(self, json_format: bool = False) -> dict[str, Any]:
"""Dump the class to a dictionary.
This method automatically removes the timestamp recursively in every object
that has been edited is order to let MISP update the event accordingly."""
is_edited = self.edited
to_return = {}
for attribute, val in self.items():
if val is None:
continue
elif isinstance(val, list) and len(val) == 0:
continue
elif isinstance(val, str):
val = val.strip()
elif json_format:
if isinstance(val, AbstractMISP):
val = val.to_json(True)
elif isinstance(val, (datetime, date)):
val = val.isoformat()
elif isinstance(val, Enum):
val = val.value
elif isinstance(val, UUID):
val = str(val)
if attribute == 'timestamp':
if not self.__force_timestamps and is_edited:
# In order to be accepted by MISP, the timestamp of an object
# needs to be either newer, or None.
# If the current object is marked as edited, the easiest is to
# skip the timestamp and let MISP deal with it
continue
else:
val = self._datetime_to_timestamp(val)
if (attribute in ('first_seen', 'last_seen', 'datetime')
and isinstance(val, datetime)
and not val.tzinfo):
# Need to make sure the timezone is set. Otherwise, it will be processed as UTC on the server
val = val.astimezone()
to_return[attribute] = val
to_return = _int_to_str(to_return)
return to_return
def jsonable(self) -> dict[str, Any]:
"""This method is used by the JSON encoder"""
return self.to_dict()
def _to_feed(self) -> dict[str, Any]:
if not hasattr(self, '_fields_for_feed') or not self._fields_for_feed:
raise PyMISPError('Unable to export in the feed format, _fields_for_feed is missing.')
if hasattr(self, '_set_default') and callable(self._set_default):
self._set_default()
to_return = {}
for field in sorted(self._fields_for_feed):
if getattr(self, field, None) is not None:
if field in ['timestamp', 'publish_timestamp']:
to_return[field] = self._datetime_to_timestamp(getattr(self, field))
elif isinstance(getattr(self, field), (datetime, date)):
to_return[field] = getattr(self, field).isoformat()
else:
to_return[field] = getattr(self, field)
else:
if field in ['data', 'first_seen', 'last_seen', 'deleted']:
# special fields
continue
raise PyMISPError(f'The field {field} is required in {self.__class__.__name__} when generating a feed.')
to_return = _int_to_str(to_return)
return to_return
def to_json(self, sort_keys: bool = False, indent: int | None = None) -> str:
"""Dump recursively any class of type MISPAbstract to a json string"""
if HAS_ORJSON:
option = 0
if sort_keys:
option |= orjson.OPT_SORT_KEYS
if indent:
option |= orjson.OPT_INDENT_2
# orjson dumps method returns bytes instead of bytes, to keep compatibility with json
# we have to convert output to str
return dumps(self, default=pymisp_json_default, option=option).decode()
return dumps(self, default=pymisp_json_default, sort_keys=sort_keys, indent=indent)
def __getitem__(self, key: str) -> Any:
try:
if key[0] != '_' and key not in self.__not_jsonable:
return self.__dict__[key]
raise KeyError
except AttributeError:
# Expected by pop and other dict-related methods
raise KeyError
def __setitem__(self, key: str, value: Any) -> None:
setattr(self, key, value)
def __delitem__(self, key: str) -> None:
delattr(self, key)
def __iter__(self) -> Any:
'''When we call **self, skip keys:
* starting with _
* in __not_jsonable
* timestamp if the object is edited *unless* it is forced
'''
return iter({k: v for k, v in self.__dict__.items()
if not (k[0] == '_'
or k in self.__not_jsonable
or (not self.__force_timestamps and (k == 'timestamp' and self.__edited)))})
def __len__(self) -> int:
return len([k for k in self.__dict__.keys() if not (k[0] == '_' or k in self.__not_jsonable)])
@property
def force_timestamp(self) -> bool:
return self.__force_timestamps
@force_timestamp.setter
def force_timestamp(self, force: bool) -> None:
self.__force_timestamps = force
@property
def edited(self) -> bool:
"""Recursively check if an object has been edited and update the flag accordingly
to the parent objects"""
if self.__edited:
return self.__edited
for p, val in self.items():
if isinstance(val, AbstractMISP) and val.edited:
self.__edited = True
break
elif isinstance(val, list) and all(isinstance(a, AbstractMISP) for a in val):
if any(a.edited for a in val):
self.__edited = True
break
return self.__edited
@edited.setter
def edited(self, val: bool) -> None:
"""Set the edit flag"""
if isinstance(val, bool):
self.__edited = val
else:
raise PyMISPError('edited can only be True or False')
def __setattr__(self, name: str, value: Any) -> None:
if name[0] != '_' and not self.__edited and name in self:
# The private members don't matter
# If we already have a key with that name, we're modifying it.
self.__edited = True
super().__setattr__(name, value)
def _datetime_to_timestamp(self, d: int | float | str | datetime) -> int:
"""Convert a datetime object to a timestamp (int)"""
if isinstance(d, (int, float, str)):
# Assume we already have a timestamp
return int(d)
return int(d.timestamp())
def _add_tag(self, tag: str | MISPTag | Mapping[str, Any] | None = None, **kwargs): # type: ignore[no-untyped-def]
"""Add a tag to the attribute (by name or a MISPTag object)"""
if isinstance(tag, str):
misp_tag = MISPTag()
misp_tag.from_dict(name=tag)
elif isinstance(tag, MISPTag):
misp_tag = tag
elif isinstance(tag, dict):
misp_tag = MISPTag()
misp_tag.from_dict(**tag)
elif kwargs:
misp_tag = MISPTag()
misp_tag.from_dict(**kwargs)
else:
raise PyMISPInvalidFormat(f"The tag is in an invalid format (can be either string, MISPTag, or an expanded dict): {tag}")
if misp_tag not in self.tags: # type: ignore
self.Tag.append(misp_tag)
self.edited = True
return misp_tag
def _set_tags(self, tags: list[MISPTag]) -> None:
"""Set a list of prepared MISPTag."""
if all(isinstance(x, MISPTag) for x in tags):
self.Tag = tags
else:
raise PyMISPInvalidFormat('All the attributes have to be of type MISPTag.')
def __eq__(self, other: object) -> bool:
if isinstance(other, AbstractMISP):
return self.to_dict() == other.to_dict()
elif isinstance(other, dict):
return self.to_dict() == other
else:
return False
def __repr__(self) -> str:
return f'<{self.__class__.__name__} - please define me>'
| (**kwargs) -> 'None' |
24,875 | pymisp.abstract | __delitem__ | null | def __delitem__(self, key: str) -> None:
delattr(self, key)
| (self, key: str) -> NoneType |
24,876 | pymisp.abstract | __eq__ | null | def __eq__(self, other: object) -> bool:
if isinstance(other, AbstractMISP):
return self.to_dict() == other.to_dict()
elif isinstance(other, dict):
return self.to_dict() == other
else:
return False
| (self, other: object) -> bool |
24,877 | pymisp.abstract | __getitem__ | null | def __getitem__(self, key: str) -> Any:
try:
if key[0] != '_' and key not in self.__not_jsonable:
return self.__dict__[key]
raise KeyError
except AttributeError:
# Expected by pop and other dict-related methods
raise KeyError
| (self, key: str) -> Any |
24,878 | pymisp.abstract | __init__ | Abstract class for all the MISP objects.
NOTE: Every method in every classes inheriting this one are doing
changes in memory and do not modify data on a remote MISP instance.
To do so, you need to call the respective add_* or update_*
methods in PyMISP.
| def __init__(self, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Abstract class for all the MISP objects.
NOTE: Every method in every classes inheriting this one are doing
changes in memory and do not modify data on a remote MISP instance.
To do so, you need to call the respective add_* or update_*
methods in PyMISP.
"""
super().__init__()
self.__edited: bool = True # As we create a new object, we assume it is edited
self.__not_jsonable: list[str] = []
self._fields_for_feed: set[str]
self.__self_defined_describe_types: dict[str, Any] | None = None
self.uuid: str
if kwargs.get('force_timestamps') is not None:
# Ignore the edited objects and keep the timestamps.
self.__force_timestamps: bool = True
else:
self.__force_timestamps = False
| (self, **kwargs) -> NoneType |
24,879 | pymisp.abstract | __iter__ | When we call **self, skip keys:
* starting with _
* in __not_jsonable
* timestamp if the object is edited *unless* it is forced
| def __iter__(self) -> Any:
'''When we call **self, skip keys:
* starting with _
* in __not_jsonable
* timestamp if the object is edited *unless* it is forced
'''
return iter({k: v for k, v in self.__dict__.items()
if not (k[0] == '_'
or k in self.__not_jsonable
or (not self.__force_timestamps and (k == 'timestamp' and self.__edited)))})
| (self) -> Any |
24,880 | pymisp.abstract | __len__ | null | def __len__(self) -> int:
return len([k for k in self.__dict__.keys() if not (k[0] == '_' or k in self.__not_jsonable)])
| (self) -> int |
24,881 | pymisp.abstract | __repr__ | null | def __repr__(self) -> str:
return f'<{self.__class__.__name__} - please define me>'
| (self) -> str |
24,882 | pymisp.abstract | __setattr__ | null | def __setattr__(self, name: str, value: Any) -> None:
if name[0] != '_' and not self.__edited and name in self:
# The private members don't matter
# If we already have a key with that name, we're modifying it.
self.__edited = True
super().__setattr__(name, value)
| (self, name: str, value: Any) -> NoneType |
24,883 | pymisp.abstract | __setitem__ | null | def __setitem__(self, key: str, value: Any) -> None:
setattr(self, key, value)
| (self, key: str, value: Any) -> NoneType |
24,884 | pymisp.abstract | _add_tag | Add a tag to the attribute (by name or a MISPTag object) | def _add_tag(self, tag: str | MISPTag | Mapping[str, Any] | None = None, **kwargs): # type: ignore[no-untyped-def]
"""Add a tag to the attribute (by name or a MISPTag object)"""
if isinstance(tag, str):
misp_tag = MISPTag()
misp_tag.from_dict(name=tag)
elif isinstance(tag, MISPTag):
misp_tag = tag
elif isinstance(tag, dict):
misp_tag = MISPTag()
misp_tag.from_dict(**tag)
elif kwargs:
misp_tag = MISPTag()
misp_tag.from_dict(**kwargs)
else:
raise PyMISPInvalidFormat(f"The tag is in an invalid format (can be either string, MISPTag, or an expanded dict): {tag}")
if misp_tag not in self.tags: # type: ignore
self.Tag.append(misp_tag)
self.edited = True
return misp_tag
| (self, tag: Union[str, pymisp.abstract.MISPTag, Mapping[str, Any], NoneType] = None, **kwargs) |
24,885 | pymisp.abstract | _datetime_to_timestamp | Convert a datetime object to a timestamp (int) | def _datetime_to_timestamp(self, d: int | float | str | datetime) -> int:
"""Convert a datetime object to a timestamp (int)"""
if isinstance(d, (int, float, str)):
# Assume we already have a timestamp
return int(d)
return int(d.timestamp())
| (self, d: int | float | str | datetime.datetime) -> int |
24,886 | pymisp.abstract | _remove_from_not_jsonable | Remove the entries that are in the __not_jsonable list | def _remove_from_not_jsonable(self, *args) -> None: # type: ignore[no-untyped-def]
"""Remove the entries that are in the __not_jsonable list"""
for entry in args:
try:
self.__not_jsonable.remove(entry)
except ValueError:
pass
| (self, *args) -> NoneType |
24,887 | pymisp.abstract | _set_tags | Set a list of prepared MISPTag. | def _set_tags(self, tags: list[MISPTag]) -> None:
"""Set a list of prepared MISPTag."""
if all(isinstance(x, MISPTag) for x in tags):
self.Tag = tags
else:
raise PyMISPInvalidFormat('All the attributes have to be of type MISPTag.')
| (self, tags: list[pymisp.abstract.MISPTag]) -> NoneType |
24,888 | pymisp.abstract | _to_feed | null | def _to_feed(self) -> dict[str, Any]:
if not hasattr(self, '_fields_for_feed') or not self._fields_for_feed:
raise PyMISPError('Unable to export in the feed format, _fields_for_feed is missing.')
if hasattr(self, '_set_default') and callable(self._set_default):
self._set_default()
to_return = {}
for field in sorted(self._fields_for_feed):
if getattr(self, field, None) is not None:
if field in ['timestamp', 'publish_timestamp']:
to_return[field] = self._datetime_to_timestamp(getattr(self, field))
elif isinstance(getattr(self, field), (datetime, date)):
to_return[field] = getattr(self, field).isoformat()
else:
to_return[field] = getattr(self, field)
else:
if field in ['data', 'first_seen', 'last_seen', 'deleted']:
# special fields
continue
raise PyMISPError(f'The field {field} is required in {self.__class__.__name__} when generating a feed.')
to_return = _int_to_str(to_return)
return to_return
| (self) -> dict[str, typing.Any] |
24,890 | pymisp.abstract | from_dict | Loading all the parameters as class properties, if they aren't `None`.
This method aims to be called when all the properties requiring a special
treatment are processed.
Note: This method is used when you initialize an object with existing data so by default,
the class is flaged as not edited. | def from_dict(self, **kwargs) -> None: # type: ignore[no-untyped-def]
"""Loading all the parameters as class properties, if they aren't `None`.
This method aims to be called when all the properties requiring a special
treatment are processed.
Note: This method is used when you initialize an object with existing data so by default,
the class is flaged as not edited."""
for prop, value in kwargs.items():
if value is None:
continue
setattr(self, prop, value)
# We load an existing dictionary, marking it an not-edited
self.__edited = False
| (self, **kwargs) -> NoneType |
24,891 | pymisp.abstract | from_json | Load a JSON string | def from_json(self, json_string: str) -> None:
"""Load a JSON string"""
self.from_dict(**loads(json_string))
| (self, json_string: str) -> NoneType |
24,894 | pymisp.abstract | jsonable | This method is used by the JSON encoder | def jsonable(self) -> dict[str, Any]:
"""This method is used by the JSON encoder"""
return self.to_dict()
| (self) -> dict[str, typing.Any] |
24,898 | pymisp.abstract | set_not_jsonable | Set __not_jsonable to a new list | def set_not_jsonable(self, args: list[str]) -> None:
"""Set __not_jsonable to a new list"""
self.__not_jsonable = args
| (self, args: list[str]) -> NoneType |
24,900 | pymisp.abstract | to_dict | Dump the class to a dictionary.
This method automatically removes the timestamp recursively in every object
that has been edited is order to let MISP update the event accordingly. | def to_dict(self, json_format: bool = False) -> dict[str, Any]:
"""Dump the class to a dictionary.
This method automatically removes the timestamp recursively in every object
that has been edited is order to let MISP update the event accordingly."""
is_edited = self.edited
to_return = {}
for attribute, val in self.items():
if val is None:
continue
elif isinstance(val, list) and len(val) == 0:
continue
elif isinstance(val, str):
val = val.strip()
elif json_format:
if isinstance(val, AbstractMISP):
val = val.to_json(True)
elif isinstance(val, (datetime, date)):
val = val.isoformat()
elif isinstance(val, Enum):
val = val.value
elif isinstance(val, UUID):
val = str(val)
if attribute == 'timestamp':
if not self.__force_timestamps and is_edited:
# In order to be accepted by MISP, the timestamp of an object
# needs to be either newer, or None.
# If the current object is marked as edited, the easiest is to
# skip the timestamp and let MISP deal with it
continue
else:
val = self._datetime_to_timestamp(val)
if (attribute in ('first_seen', 'last_seen', 'datetime')
and isinstance(val, datetime)
and not val.tzinfo):
# Need to make sure the timezone is set. Otherwise, it will be processed as UTC on the server
val = val.astimezone()
to_return[attribute] = val
to_return = _int_to_str(to_return)
return to_return
| (self, json_format: bool = False) -> dict[str, typing.Any] |
24,901 | pymisp.abstract | to_json | Dump recursively any class of type MISPAbstract to a json string | def to_json(self, sort_keys: bool = False, indent: int | None = None) -> str:
"""Dump recursively any class of type MISPAbstract to a json string"""
if HAS_ORJSON:
option = 0
if sort_keys:
option |= orjson.OPT_SORT_KEYS
if indent:
option |= orjson.OPT_INDENT_2
# orjson dumps method returns bytes instead of bytes, to keep compatibility with json
# we have to convert output to str
return dumps(self, default=pymisp_json_default, option=option).decode()
return dumps(self, default=pymisp_json_default, sort_keys=sort_keys, indent=indent)
| (self, sort_keys: bool = False, indent: Optional[int] = None) -> str |
24,903 | pymisp.abstract | update_not_jsonable | Add entries to the __not_jsonable list | def update_not_jsonable(self, *args) -> None: # type: ignore[no-untyped-def]
"""Add entries to the __not_jsonable list"""
self.__not_jsonable += args
| (self, *args) -> NoneType |
24,905 | pymisp.tools.abstractgenerator | AbstractMISPObjectGenerator | null | class AbstractMISPObjectGenerator(MISPObject):
def _detect_epoch(self, timestamp: str | int | float) -> bool:
try:
tmp = float(timestamp)
if tmp < 30000000:
# Assuming the user doesn't want to report anything before datetime(1970, 12, 14, 6, 20)
# The date is most probably in the format 20180301
return False
return True
except ValueError:
return False
def _sanitize_timestamp(self, timestamp: datetime | date | dict[str, Any] | str | int | float | None = None) -> datetime:
if not timestamp:
return datetime.now()
if isinstance(timestamp, datetime):
return timestamp
elif isinstance(timestamp, date):
return datetime.combine(timestamp, datetime.min.time())
elif isinstance(timestamp, dict):
if not isinstance(timestamp['value'], datetime):
timestamp['value'] = parse(timestamp['value'])
return timestamp['value']
else: # Supported: float/int/string
if isinstance(timestamp, (str, int, float)) and self._detect_epoch(timestamp):
# It converts to the *local* datetime, which is consistent with the rest of the code.
return datetime.fromtimestamp(float(timestamp))
elif isinstance(timestamp, str):
return parse(timestamp)
else:
raise Exception(f'Unable to convert {timestamp} to a datetime.')
def generate_attributes(self) -> None:
"""Contains the logic where all the values of the object are gathered"""
if hasattr(self, '_parameters') and self._definition is not None:
for object_relation in self._definition['attributes']:
value = self._parameters.pop(object_relation, None)
if not value:
continue
if isinstance(value, dict):
self.add_attribute(object_relation, **value)
elif isinstance(value, list):
self.add_attributes(object_relation, *value)
else:
# Assume it is the value only
self.add_attribute(object_relation, value=value)
if self._strict and self._known_template and self._parameters:
raise InvalidMISPObject('Some object relations are unknown in the template and could not be attached: {}'.format(', '.join(self._parameters)))
| (name: 'str', strict: 'bool' = False, standalone: 'bool' = True, default_attributes_parameters: 'dict[str, Any]' = {}, **kwargs) -> 'None' |
24,910 | pymisp.mispevent | __init__ | Master class representing a generic MISP object
:param name: Name of the object
:param strict: Enforce validation with the object templates
:param standalone: The object will be pushed as directly on MISP, not as a part of an event.
In this case the ObjectReference needs to be pushed manually and cannot be in the JSON dump.
:param default_attributes_parameters: Used as template for the attributes if they are not overwritten in add_attribute
:param misp_objects_path_custom: Path to custom object templates
:param misp_objects_template_custom: Template of the object. Expects the content (dict, loaded with json.load or json.loads) of a template definition file, see repository MISP/misp-objects.
| def __init__(self, name: str, strict: bool = False, standalone: bool = True, # type: ignore[no-untyped-def]
default_attributes_parameters: dict[str, Any] = {}, **kwargs) -> None:
''' Master class representing a generic MISP object
:param name: Name of the object
:param strict: Enforce validation with the object templates
:param standalone: The object will be pushed as directly on MISP, not as a part of an event.
In this case the ObjectReference needs to be pushed manually and cannot be in the JSON dump.
:param default_attributes_parameters: Used as template for the attributes if they are not overwritten in add_attribute
:param misp_objects_path_custom: Path to custom object templates
:param misp_objects_template_custom: Template of the object. Expects the content (dict, loaded with json.load or json.loads) of a template definition file, see repository MISP/misp-objects.
'''
super().__init__(**kwargs)
self._strict: bool = strict
self.name: str = name
self._known_template: bool = False
self.id: int
self._definition: dict[str, Any] | None
self.timestamp: float | int | datetime
misp_objects_template_custom = kwargs.pop('misp_objects_template_custom', None)
misp_objects_path_custom = kwargs.pop('misp_objects_path_custom', None)
if misp_objects_template_custom:
self._set_template(misp_objects_template_custom=misp_objects_template_custom)
else:
# Fall back to default path if None
self._set_template(misp_objects_path_custom=misp_objects_path_custom)
self.uuid: str = str(uuid.uuid4())
self.first_seen: datetime
self.last_seen: datetime
self.__fast_attribute_access: dict[str, Any] = defaultdict(list) # Hashtable object_relation: [attributes]
self.ObjectReference: list[MISPObjectReference] = []
self._standalone: bool = False
self.Attribute: list[MISPObjectAttribute] = []
self.SharingGroup: MISPSharingGroup
self._default_attributes_parameters: dict[str, Any]
if isinstance(default_attributes_parameters, MISPAttribute):
# Just make sure we're not modifying an existing MISPAttribute
self._default_attributes_parameters = default_attributes_parameters.to_dict()
else:
self._default_attributes_parameters = copy.copy(default_attributes_parameters)
if self._default_attributes_parameters:
# Let's clean that up
self._default_attributes_parameters.pop('value', None) # duh
self._default_attributes_parameters.pop('uuid', None) # duh
self._default_attributes_parameters.pop('id', None) # duh
self._default_attributes_parameters.pop('object_id', None) # duh
self._default_attributes_parameters.pop('type', None) # depends on the value
self._default_attributes_parameters.pop('object_relation', None) # depends on the value
self._default_attributes_parameters.pop('disable_correlation', None) # depends on the value
self._default_attributes_parameters.pop('to_ids', None) # depends on the value
self._default_attributes_parameters.pop('deleted', None) # doesn't make sense to pre-set it
self._default_attributes_parameters.pop('data', None) # in case the original in a sample or an attachment
# Those values are set for the current object, if they exist, but not pop'd because they are still useful for the attributes
self.distribution: int = self._default_attributes_parameters.get('distribution', 5)
self.sharing_group_id: int = self._default_attributes_parameters.get('sharing_group_id', 0)
else:
self.distribution = 5 # Default to inherit
self.sharing_group_id = 0
self.standalone = standalone
| (self, name: str, strict: bool = False, standalone: bool = True, default_attributes_parameters: dict[str, typing.Any] = {}, **kwargs) -> NoneType |
24,913 | pymisp.mispevent | __repr__ | null | def __repr__(self) -> str:
if hasattr(self, 'name'):
return '<{self.__class__.__name__}(name={self.name})'.format(self=self)
return f'<{self.__class__.__name__}(NotInitialized)'
| (self) -> str |
24,914 | pymisp.mispevent | __setattr__ | null | def __setattr__(self, name: str, value: Any) -> None:
if name in ['first_seen', 'last_seen']:
value = _make_datetime(value)
if name == 'last_seen' and hasattr(self, 'first_seen') and self.first_seen > value:
logger.warning(f'last_seen ({value}) has to be after first_seen ({self.first_seen})')
if name == 'first_seen' and hasattr(self, 'last_seen') and self.last_seen < value:
logger.warning(f'first_seen ({value}) has to be before last_seen ({self.last_seen})')
super().__setattr__(name, value)
| (self, name: str, value: Any) -> NoneType |
24,918 | pymisp.tools.abstractgenerator | _detect_epoch | null | def _detect_epoch(self, timestamp: str | int | float) -> bool:
try:
tmp = float(timestamp)
if tmp < 30000000:
# Assuming the user doesn't want to report anything before datetime(1970, 12, 14, 6, 20)
# The date is most probably in the format 20180301
return False
return True
except ValueError:
return False
| (self, timestamp: str | int | float) -> bool |
24,919 | pymisp.mispevent | _load_template | null | def _load_template(self, template: dict[str, Any]) -> None:
self._definition = template
setattr(self, 'meta-category', self._definition['meta-category'])
self.template_uuid = self._definition['uuid']
self.description = self._definition['description']
self.template_version = self._definition['version']
| (self, template: dict[str, typing.Any]) -> NoneType |
24,920 | pymisp.mispevent | _load_template_path | null | def _load_template_path(self, template_path: Path | str) -> bool:
template = self._load_json(template_path)
if not template:
self._definition = None
return False
self._load_template(template)
return True
| (self, template_path: pathlib.Path | str) -> bool |
24,922 | pymisp.tools.abstractgenerator | _sanitize_timestamp | null | def _sanitize_timestamp(self, timestamp: datetime | date | dict[str, Any] | str | int | float | None = None) -> datetime:
if not timestamp:
return datetime.now()
if isinstance(timestamp, datetime):
return timestamp
elif isinstance(timestamp, date):
return datetime.combine(timestamp, datetime.min.time())
elif isinstance(timestamp, dict):
if not isinstance(timestamp['value'], datetime):
timestamp['value'] = parse(timestamp['value'])
return timestamp['value']
else: # Supported: float/int/string
if isinstance(timestamp, (str, int, float)) and self._detect_epoch(timestamp):
# It converts to the *local* datetime, which is consistent with the rest of the code.
return datetime.fromtimestamp(float(timestamp))
elif isinstance(timestamp, str):
return parse(timestamp)
else:
raise Exception(f'Unable to convert {timestamp} to a datetime.')
| (self, timestamp: Union[datetime.datetime, datetime.date, dict[str, Any], str, int, float, NoneType] = None) -> datetime.datetime |
24,923 | pymisp.mispevent | _set_default | null | def _set_default(self) -> None:
if not hasattr(self, 'comment'):
self.comment = ''
if not hasattr(self, 'timestamp'):
self.timestamp = datetime.timestamp(datetime.now())
| (self) -> NoneType |
24,925 | pymisp.mispevent | _set_template | null | def _set_template(self, misp_objects_path_custom: Path | str | None = None, misp_objects_template_custom: dict[str, Any] | None = None) -> None:
if misp_objects_template_custom:
# A complete template was given to the constructor
self._load_template(misp_objects_template_custom)
self._known_template = True
else:
if misp_objects_path_custom:
# If misp_objects_path_custom is given, and an object with the given name exists, use that.
if isinstance(misp_objects_path_custom, str):
self.misp_objects_path = Path(misp_objects_path_custom)
else:
self.misp_objects_path = misp_objects_path_custom
# Try to get the template
self._known_template = self._load_template_path(self.misp_objects_path / self.name / 'definition.json')
if not self._known_template and self._strict:
raise UnknownMISPObjectTemplate(f'{self.name} is unknown in the MISP object directory.')
else:
# Then we have no meta-category, template_uuid, description and template_version
pass
| (self, misp_objects_path_custom: Union[pathlib.Path, str, NoneType] = None, misp_objects_template_custom: Optional[dict[str, Any]] = None) -> NoneType |
24,926 | pymisp.mispevent | _to_feed | null | def _to_feed(self, with_distribution: bool=False) -> dict[str, Any]:
if with_distribution:
self._fields_for_feed.add('distribution')
if not hasattr(self, 'template_uuid'): # workaround for old events where the template_uuid was not yet mandatory
self.template_uuid = str(uuid.uuid5(uuid.UUID("9319371e-2504-4128-8410-3741cebbcfd3"), self.name))
if not hasattr(self, 'description'): # workaround for old events where description is not always set
self.description = '<unknown>'
if not hasattr(self, 'meta-category'): # workaround for old events where meta-category is not always set
setattr(self, 'meta-category', 'misc')
to_return = super()._to_feed()
if self.references:
to_return['ObjectReference'] = [reference._to_feed() for reference in self.references]
if with_distribution:
try:
to_return['SharingGroup'] = self.SharingGroup._to_feed()
except AttributeError:
pass
return to_return
| (self, with_distribution: bool = False) -> dict[str, typing.Any] |
24,927 | pymisp.mispevent | _validate | null | def _validate(self) -> bool:
if not self._definition:
raise PyMISPError('No object definition available, unable to validate.')
"""Make sure the object we're creating has the required fields"""
if self._definition.get('required'):
required_missing = set(self._definition['required']) - set(self._fast_attribute_access.keys())
if required_missing:
raise InvalidMISPObject(f'{required_missing} are required.')
if self._definition.get('requiredOneOf'):
if not set(self._definition['requiredOneOf']) & set(self._fast_attribute_access.keys()):
# We ecpect at least one of the object_relation in requiredOneOf, and it isn't the case
raise InvalidMISPObject('At least one of the following attributes is required: {}'.format(', '.join(self._definition['requiredOneOf'])))
for rel, attrs in self._fast_attribute_access.items():
if len(attrs) == 1:
# object_relation's here only once, everything's cool, moving on
continue
if not self._definition['attributes'][rel].get('multiple'):
# object_relation's here more than once, but it isn't allowed in the template.
raise InvalidMISPObject(f'Multiple occurrences of {rel} is not allowed')
return True
| (self) -> bool |
24,928 | pymisp.mispevent | add_attribute | Add an attribute.
:param object_relation: The object relation of the attribute you're adding to the object
:param simple_value: The value
:param value: dictionary with all the keys supported by MISPAttribute
Note: as long as PyMISP knows about the object template, only the object_relation and the simple_value are required.
If PyMISP doesn't know the template, you also **must** pass a type.
All the other options that can be passed along when creating an attribute (comment, IDS flag, ...)
will be either taked out of the template, or out of the default setting for the type as defined on the MISP instance.
| def add_attribute(self, object_relation: str, simple_value: str | int | float | None = None, **value) -> MISPAttribute | None: # type: ignore[no-untyped-def]
"""Add an attribute.
:param object_relation: The object relation of the attribute you're adding to the object
:param simple_value: The value
:param value: dictionary with all the keys supported by MISPAttribute
Note: as long as PyMISP knows about the object template, only the object_relation and the simple_value are required.
If PyMISP doesn't know the template, you also **must** pass a type.
All the other options that can be passed along when creating an attribute (comment, IDS flag, ...)
will be either taked out of the template, or out of the default setting for the type as defined on the MISP instance.
"""
if simple_value is not None: # /!\ The value *can* be 0
value['value'] = simple_value
if value.get('value') is None:
logger.warning(f"The value of the attribute you're trying to add is None, skipping it. Object relation: {object_relation}")
return None
else:
if isinstance(value['value'], bytes):
# That shouldn't happen, but we live in the real world, and it does.
# So we try to decode (otherwise, MISP barf), and raise a warning if needed.
try:
value['value'] = value['value'].decode()
except Exception:
logger.warning("The value of the attribute you're trying to add is a bytestream ({!r}), and we're unable to make it a string.".format(value['value']))
return None
# Make sure we're not adding an empty value.
if isinstance(value['value'], str):
value['value'] = value['value'].strip().strip('\x00')
if value['value'] == '':
logger.warning(f"The value of the attribute you're trying to add is an empty string, skipping it. Object relation: {object_relation}")
return None
if self._known_template and self._definition:
if object_relation in self._definition['attributes']:
attribute = MISPObjectAttribute(self._definition['attributes'][object_relation])
else:
# Woopsie, this object_relation is unknown, no sane defaults for you.
logger.warning(f"The template ({self.name}) doesn't have the object_relation ({object_relation}) you're trying to add. If you are creating a new event to push to MISP, please review your code so it matches the template.")
attribute = MISPObjectAttribute({})
else:
attribute = MISPObjectAttribute({})
# Overwrite the parameters of self._default_attributes_parameters with the ones of value
attribute.from_dict(object_relation=object_relation, **{**self._default_attributes_parameters, **value})
self.__fast_attribute_access[object_relation].append(attribute)
self.Attribute.append(attribute)
self.edited = True
return attribute
| (self, object_relation: str, simple_value: Union[str, int, float, NoneType] = None, **value) -> pymisp.mispevent.MISPAttribute | None |
24,929 | pymisp.mispevent | add_attributes | Add multiple attributes with the same object_relation.
Helper for object_relation when multiple is True in the template.
It is the same as calling multiple times add_attribute with the same object_relation.
| def add_attributes(self, object_relation: str, *attributes: Sequence[str | dict[str, Any] | MISPAttribute]) -> list[MISPAttribute | None]:
'''Add multiple attributes with the same object_relation.
Helper for object_relation when multiple is True in the template.
It is the same as calling multiple times add_attribute with the same object_relation.
'''
to_return = []
for attribute in attributes:
if isinstance(attribute, MISPAttribute):
a = self.add_attribute(object_relation, **attribute.to_dict())
elif isinstance(attribute, dict):
a = self.add_attribute(object_relation, **attribute) # type: ignore[misc]
else:
a = self.add_attribute(object_relation, value=attribute)
to_return.append(a)
return to_return
| (self, object_relation: str, *attributes: Sequence[str | dict[str, Any] | pymisp.mispevent.MISPAttribute]) -> list[pymisp.mispevent.MISPAttribute | None] |
24,930 | pymisp.mispevent | add_reference | Add a link (uuid) to another object | def add_reference(self, referenced_uuid: AbstractMISP | str, relationship_type: str, comment: str | None = None, **kwargs) -> MISPObjectReference: # type: ignore[no-untyped-def]
"""Add a link (uuid) to another object"""
if isinstance(referenced_uuid, AbstractMISP):
# Allow to pass an object or an attribute instead of its UUID
referenced_uuid = referenced_uuid.uuid
if 'object_uuid' in kwargs and not kwargs.get('object_uuid'):
# Unexplained None in object_uuid key -> https://github.com/MISP/PyMISP/issues/640
kwargs.pop('object_uuid')
object_uuid = self.uuid
elif kwargs.get('object_uuid'):
# Load existing object
object_uuid = kwargs.pop('object_uuid')
else:
# New reference
object_uuid = self.uuid
reference = MISPObjectReference()
reference.from_dict(object_uuid=object_uuid, referenced_uuid=referenced_uuid,
relationship_type=relationship_type, comment=comment, **kwargs)
self.ObjectReference.append(reference)
self.edited = True
return reference
| (self, referenced_uuid: pymisp.abstract.AbstractMISP | str, relationship_type: str, comment: Optional[str] = None, **kwargs) -> pymisp.mispevent.MISPObjectReference |
24,932 | pymisp.mispevent | delete | Mark the object as deleted (soft delete) | def delete(self) -> None:
"""Mark the object as deleted (soft delete)"""
self.deleted = True
for a in self.attributes:
a.delete()
| (self) -> NoneType |
24,933 | pymisp.mispevent | force_misp_objects_path_custom | null | def force_misp_objects_path_custom(self, misp_objects_path_custom: Path | str, object_name: str | None = None) -> None:
if object_name:
self.name = object_name
self._set_template(misp_objects_path_custom)
| (self, misp_objects_path_custom: pathlib.Path | str, object_name: Optional[str] = None) -> NoneType |
24,934 | pymisp.mispevent | from_dict | null | def from_dict(self, **kwargs) -> None: # type: ignore[no-untyped-def]
if 'Object' in kwargs:
kwargs = kwargs['Object']
if self._known_template:
if kwargs.get('template_uuid') and kwargs['template_uuid'] != self.template_uuid:
if self._strict:
raise UnknownMISPObjectTemplate('UUID of the object is different from the one of the template.')
else:
self._known_template = False
if kwargs.get('template_version') and int(kwargs['template_version']) != self.template_version:
if self._strict:
raise UnknownMISPObjectTemplate('Version of the object ({}) is different from the one of the template ({}).'.format(kwargs['template_version'], self.template_version))
else:
self._known_template = False
# depending on how the object is initialized, we may have a few keys to pop
kwargs.pop('misp_objects_template_custom', None)
kwargs.pop('misp_objects_path_custom', None)
if 'distribution' in kwargs and kwargs['distribution'] is not None:
self.distribution = kwargs.pop('distribution')
self.distribution = int(self.distribution)
if self.distribution not in [0, 1, 2, 3, 4, 5]:
raise NewAttributeError(f'{self.distribution} is invalid, the distribution has to be in 0, 1, 2, 3, 4, 5')
if kwargs.get('timestamp'):
ts = kwargs.pop('timestamp')
if isinstance(ts, datetime):
self.timestamp = ts
else:
self.timestamp = datetime.fromtimestamp(int(ts), timezone.utc)
if kwargs.get('first_seen'):
fs = kwargs.pop('first_seen')
try:
# Faster
self.first_seen = datetime.fromisoformat(fs)
except Exception:
# Use __setattr__
self.first_seen = fs
if kwargs.get('last_seen'):
ls = kwargs.pop('last_seen')
try:
# Faster
self.last_seen = datetime.fromisoformat(ls)
except Exception:
# Use __setattr__
self.last_seen = ls
if kwargs.get('Attribute'):
[self.add_attribute(**a) for a in kwargs.pop('Attribute')]
if kwargs.get('ObjectReference'):
[self.add_reference(**r) for r in kwargs.pop('ObjectReference')]
if kwargs.get('SharingGroup'):
self.SharingGroup = MISPSharingGroup()
self.SharingGroup.from_dict(**kwargs.pop('SharingGroup'))
# Not supported yet - https://github.com/MISP/PyMISP/issues/168
# if kwargs.get('Tag'):
# for tag in kwargs.pop('Tag'):
# self.add_tag(tag)
super().from_dict(**kwargs)
| (self, **kwargs) -> NoneType |
24,936 | pymisp.tools.abstractgenerator | generate_attributes | Contains the logic where all the values of the object are gathered | def generate_attributes(self) -> None:
"""Contains the logic where all the values of the object are gathered"""
if hasattr(self, '_parameters') and self._definition is not None:
for object_relation in self._definition['attributes']:
value = self._parameters.pop(object_relation, None)
if not value:
continue
if isinstance(value, dict):
self.add_attribute(object_relation, **value)
elif isinstance(value, list):
self.add_attributes(object_relation, *value)
else:
# Assume it is the value only
self.add_attribute(object_relation, value=value)
if self._strict and self._known_template and self._parameters:
raise InvalidMISPObject('Some object relations are unknown in the template and could not be attached: {}'.format(', '.join(self._parameters)))
| (self) -> NoneType |
24,938 | pymisp.mispevent | get_attributes_by_relation | Returns the list of attributes with the given object relation in the object | def get_attributes_by_relation(self, object_relation: str) -> list[MISPAttribute]:
'''Returns the list of attributes with the given object relation in the object'''
return self._fast_attribute_access.get(object_relation, [])
| (self, object_relation: str) -> list[pymisp.mispevent.MISPAttribute] |
24,939 | pymisp.mispevent | has_attributes_by_relation | True if all the relations in the list are defined in the object | def has_attributes_by_relation(self, list_of_relations: list[str]) -> bool:
'''True if all the relations in the list are defined in the object'''
return all(relation in self._fast_attribute_access for relation in list_of_relations)
| (self, list_of_relations: list[str]) -> bool |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.