repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
ml31415/numpy-groupies
numpy_groupies/utils.py
get_func
def get_func(func, aliasing, implementations): """ Return the key of a found implementation or the func itself """ try: func_str = aliasing[func] except KeyError: if callable(func): return func else: if func_str in implementations: return func_str if func_str.startswith('nan') and \ func_str[3:] in funcs_no_separate_nan: raise ValueError("%s does not have a nan-version".format(func_str[3:])) else: raise NotImplementedError("No such function available") raise ValueError("func %s is neither a valid function string nor a " "callable object".format(func))
python
def get_func(func, aliasing, implementations): """ Return the key of a found implementation or the func itself """ try: func_str = aliasing[func] except KeyError: if callable(func): return func else: if func_str in implementations: return func_str if func_str.startswith('nan') and \ func_str[3:] in funcs_no_separate_nan: raise ValueError("%s does not have a nan-version".format(func_str[3:])) else: raise NotImplementedError("No such function available") raise ValueError("func %s is neither a valid function string nor a " "callable object".format(func))
Return the key of a found implementation or the func itself
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils.py#L118-L134
ml31415/numpy-groupies
numpy_groupies/utils_numpy.py
minimum_dtype
def minimum_dtype(x, dtype=np.bool_): """returns the "most basic" dtype which represents `x` properly, which provides at least the same value range as the specified dtype.""" def check_type(x, dtype): try: converted = dtype.type(x) except (ValueError, OverflowError): return False # False if some overflow has happened return converted == x or np.isnan(x) def type_loop(x, dtype, dtype_dict, default=None): while True: try: dtype = np.dtype(dtype_dict[dtype.name]) if check_type(x, dtype): return np.dtype(dtype) except KeyError: if default is not None: return np.dtype(default) raise ValueError("Can not determine dtype of %r" % x) dtype = np.dtype(dtype) if check_type(x, dtype): return dtype if np.issubdtype(dtype, np.inexact): return type_loop(x, dtype, _next_float_dtype) else: return type_loop(x, dtype, _next_int_dtype, default=np.float32)
python
def minimum_dtype(x, dtype=np.bool_): """returns the "most basic" dtype which represents `x` properly, which provides at least the same value range as the specified dtype.""" def check_type(x, dtype): try: converted = dtype.type(x) except (ValueError, OverflowError): return False # False if some overflow has happened return converted == x or np.isnan(x) def type_loop(x, dtype, dtype_dict, default=None): while True: try: dtype = np.dtype(dtype_dict[dtype.name]) if check_type(x, dtype): return np.dtype(dtype) except KeyError: if default is not None: return np.dtype(default) raise ValueError("Can not determine dtype of %r" % x) dtype = np.dtype(dtype) if check_type(x, dtype): return dtype if np.issubdtype(dtype, np.inexact): return type_loop(x, dtype, _next_float_dtype) else: return type_loop(x, dtype, _next_int_dtype, default=np.float32)
returns the "most basic" dtype which represents `x` properly, which provides at least the same value range as the specified dtype.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L60-L90
ml31415/numpy-groupies
numpy_groupies/utils_numpy.py
input_validation
def input_validation(group_idx, a, size=None, order='C', axis=None, ravel_group_idx=True, check_bounds=True): """ Do some fairly extensive checking of group_idx and a, trying to give the user as much help as possible with what is wrong. Also, convert ndim-indexing to 1d indexing. """ if not isinstance(a, (int, float, complex)): a = np.asanyarray(a) group_idx = np.asanyarray(group_idx) if not np.issubdtype(group_idx.dtype, np.integer): raise TypeError("group_idx must be of integer type") # This check works for multidimensional indexing as well if check_bounds and np.any(group_idx < 0): raise ValueError("negative indices not supported") ndim_idx = np.ndim(group_idx) ndim_a = np.ndim(a) # Deal with the axis arg: if present, then turn 1d indexing into # multi-dimensional indexing along the specified axis. if axis is None: if ndim_a > 1: raise ValueError("a must be scalar or 1 dimensional, use .ravel to" " flatten. Alternatively specify axis.") elif axis >= ndim_a or axis < -ndim_a: raise ValueError("axis arg too large for np.ndim(a)") else: axis = axis if axis >= 0 else ndim_a + axis # negative indexing if ndim_idx > 1: # TODO: we could support a sequence of axis values for multiple # dimensions of group_idx. raise NotImplementedError("only 1d indexing currently" "supported with axis arg.") elif a.shape[axis] != len(group_idx): raise ValueError("a.shape[axis] doesn't match length of group_idx.") elif size is not None and not np.isscalar(size): raise NotImplementedError("when using axis arg, size must be" "None or scalar.") else: # Create the broadcast-ready multidimensional indexing. # Note the user could do this themselves, so this is # very much just a convenience. size_in = np.max(group_idx) + 1 if size is None else size group_idx_in = group_idx group_idx = [] size = [] for ii, s in enumerate(a.shape): ii_idx = group_idx_in if ii == axis else np.arange(s) ii_shape = [1] * ndim_a ii_shape[ii] = s group_idx.append(ii_idx.reshape(ii_shape)) size.append(size_in if ii == axis else s) # Use the indexing, and return. It's a bit simpler than # using trying to keep all the logic below happy group_idx = np.ravel_multi_index(group_idx, size, order=order, mode='raise') flat_size = np.prod(size) ndim_idx = ndim_a return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size if ndim_idx == 1: if size is None: size = np.max(group_idx) + 1 else: if not np.isscalar(size): raise ValueError("output size must be scalar or None") if check_bounds and np.any(group_idx > size - 1): raise ValueError("one or more indices are too large for " "size %d" % size) flat_size = size else: if size is None: size = np.max(group_idx, axis=1) + 1 elif np.isscalar(size): raise ValueError("output size must be of length %d" % len(group_idx)) elif len(size) != len(group_idx): raise ValueError("%d sizes given, but %d output dimensions " "specified in index" % (len(size), len(group_idx))) if ravel_group_idx: group_idx = np.ravel_multi_index(group_idx, size, order=order, mode='raise') flat_size = np.prod(size) if not (np.ndim(a) == 0 or len(a) == group_idx.size): raise ValueError("group_idx and a must be of the same length, or a" " can be scalar") return group_idx, a, flat_size, ndim_idx, size
python
def input_validation(group_idx, a, size=None, order='C', axis=None, ravel_group_idx=True, check_bounds=True): """ Do some fairly extensive checking of group_idx and a, trying to give the user as much help as possible with what is wrong. Also, convert ndim-indexing to 1d indexing. """ if not isinstance(a, (int, float, complex)): a = np.asanyarray(a) group_idx = np.asanyarray(group_idx) if not np.issubdtype(group_idx.dtype, np.integer): raise TypeError("group_idx must be of integer type") # This check works for multidimensional indexing as well if check_bounds and np.any(group_idx < 0): raise ValueError("negative indices not supported") ndim_idx = np.ndim(group_idx) ndim_a = np.ndim(a) # Deal with the axis arg: if present, then turn 1d indexing into # multi-dimensional indexing along the specified axis. if axis is None: if ndim_a > 1: raise ValueError("a must be scalar or 1 dimensional, use .ravel to" " flatten. Alternatively specify axis.") elif axis >= ndim_a or axis < -ndim_a: raise ValueError("axis arg too large for np.ndim(a)") else: axis = axis if axis >= 0 else ndim_a + axis # negative indexing if ndim_idx > 1: # TODO: we could support a sequence of axis values for multiple # dimensions of group_idx. raise NotImplementedError("only 1d indexing currently" "supported with axis arg.") elif a.shape[axis] != len(group_idx): raise ValueError("a.shape[axis] doesn't match length of group_idx.") elif size is not None and not np.isscalar(size): raise NotImplementedError("when using axis arg, size must be" "None or scalar.") else: # Create the broadcast-ready multidimensional indexing. # Note the user could do this themselves, so this is # very much just a convenience. size_in = np.max(group_idx) + 1 if size is None else size group_idx_in = group_idx group_idx = [] size = [] for ii, s in enumerate(a.shape): ii_idx = group_idx_in if ii == axis else np.arange(s) ii_shape = [1] * ndim_a ii_shape[ii] = s group_idx.append(ii_idx.reshape(ii_shape)) size.append(size_in if ii == axis else s) # Use the indexing, and return. It's a bit simpler than # using trying to keep all the logic below happy group_idx = np.ravel_multi_index(group_idx, size, order=order, mode='raise') flat_size = np.prod(size) ndim_idx = ndim_a return group_idx.ravel(), a.ravel(), flat_size, ndim_idx, size if ndim_idx == 1: if size is None: size = np.max(group_idx) + 1 else: if not np.isscalar(size): raise ValueError("output size must be scalar or None") if check_bounds and np.any(group_idx > size - 1): raise ValueError("one or more indices are too large for " "size %d" % size) flat_size = size else: if size is None: size = np.max(group_idx, axis=1) + 1 elif np.isscalar(size): raise ValueError("output size must be of length %d" % len(group_idx)) elif len(size) != len(group_idx): raise ValueError("%d sizes given, but %d output dimensions " "specified in index" % (len(size), len(group_idx))) if ravel_group_idx: group_idx = np.ravel_multi_index(group_idx, size, order=order, mode='raise') flat_size = np.prod(size) if not (np.ndim(a) == 0 or len(a) == group_idx.size): raise ValueError("group_idx and a must be of the same length, or a" " can be scalar") return group_idx, a, flat_size, ndim_idx, size
Do some fairly extensive checking of group_idx and a, trying to give the user as much help as possible with what is wrong. Also, convert ndim-indexing to 1d indexing.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L189-L280
ml31415/numpy-groupies
numpy_groupies/utils_numpy.py
multi_arange
def multi_arange(n): """By example: # 0 1 2 3 4 5 6 7 8 n = [0, 0, 3, 0, 0, 2, 0, 2, 1] res = [0, 1, 2, 0, 1, 0, 1, 0] That is it is equivalent to something like this : hstack((arange(n_i) for n_i in n)) This version seems quite a bit faster, at least for some possible inputs, and at any rate it encapsulates a task in a function. """ if n.ndim != 1: raise ValueError("n is supposed to be 1d array.") n_mask = n.astype(bool) n_cumsum = np.cumsum(n) ret = np.ones(n_cumsum[-1] + 1, dtype=int) ret[n_cumsum[n_mask]] -= n[n_mask] ret[0] -= 1 return np.cumsum(ret)[:-1]
python
def multi_arange(n): """By example: # 0 1 2 3 4 5 6 7 8 n = [0, 0, 3, 0, 0, 2, 0, 2, 1] res = [0, 1, 2, 0, 1, 0, 1, 0] That is it is equivalent to something like this : hstack((arange(n_i) for n_i in n)) This version seems quite a bit faster, at least for some possible inputs, and at any rate it encapsulates a task in a function. """ if n.ndim != 1: raise ValueError("n is supposed to be 1d array.") n_mask = n.astype(bool) n_cumsum = np.cumsum(n) ret = np.ones(n_cumsum[-1] + 1, dtype=int) ret[n_cumsum[n_mask]] -= n[n_mask] ret[0] -= 1 return np.cumsum(ret)[:-1]
By example: # 0 1 2 3 4 5 6 7 8 n = [0, 0, 3, 0, 0, 2, 0, 2, 1] res = [0, 1, 2, 0, 1, 0, 1, 0] That is it is equivalent to something like this : hstack((arange(n_i) for n_i in n)) This version seems quite a bit faster, at least for some possible inputs, and at any rate it encapsulates a task in a function.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L309-L332
ml31415/numpy-groupies
numpy_groupies/utils_numpy.py
label_contiguous_1d
def label_contiguous_1d(X): """ WARNING: API for this function is not liable to change!!! By example: X = [F T T F F T F F F T T T] result = [0 1 1 0 0 2 0 0 0 3 3 3] Or: X = [0 3 3 0 0 5 5 5 1 1 0 2] result = [0 1 1 0 0 2 2 2 3 3 0 4] The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X`` is a boolean array, each contiguous block of ``True`` is given an integer label, if ``X`` is not boolean, then each contiguous block of identical values is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1 and increase by 1 for each block with no skipped numbers.) """ if X.ndim != 1: raise ValueError("this is for 1d masks only.") is_start = np.empty(len(X), dtype=bool) is_start[0] = X[0] # True if X[0] is True or non-zero if X.dtype.kind == 'b': is_start[1:] = ~X[:-1] & X[1:] M = X else: M = X.astype(bool) is_start[1:] = X[:-1] != X[1:] is_start[~M] = False L = np.cumsum(is_start) L[~M] = 0 return L
python
def label_contiguous_1d(X): """ WARNING: API for this function is not liable to change!!! By example: X = [F T T F F T F F F T T T] result = [0 1 1 0 0 2 0 0 0 3 3 3] Or: X = [0 3 3 0 0 5 5 5 1 1 0 2] result = [0 1 1 0 0 2 2 2 3 3 0 4] The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X`` is a boolean array, each contiguous block of ``True`` is given an integer label, if ``X`` is not boolean, then each contiguous block of identical values is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1 and increase by 1 for each block with no skipped numbers.) """ if X.ndim != 1: raise ValueError("this is for 1d masks only.") is_start = np.empty(len(X), dtype=bool) is_start[0] = X[0] # True if X[0] is True or non-zero if X.dtype.kind == 'b': is_start[1:] = ~X[:-1] & X[1:] M = X else: M = X.astype(bool) is_start[1:] = X[:-1] != X[1:] is_start[~M] = False L = np.cumsum(is_start) L[~M] = 0 return L
WARNING: API for this function is not liable to change!!! By example: X = [F T T F F T F F F T T T] result = [0 1 1 0 0 2 0 0 0 3 3 3] Or: X = [0 3 3 0 0 5 5 5 1 1 0 2] result = [0 1 1 0 0 2 2 2 3 3 0 4] The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X`` is a boolean array, each contiguous block of ``True`` is given an integer label, if ``X`` is not boolean, then each contiguous block of identical values is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1 and increase by 1 for each block with no skipped numbers.)
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L335-L372
ml31415/numpy-groupies
numpy_groupies/utils_numpy.py
relabel_groups_unique
def relabel_groups_unique(group_idx): """ See also ``relabel_groups_masked``. keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4] Description of above: unique groups in input was ``1,2,3,5``, i.e. ``4`` was missing, so group 5 was relabled to be ``4``. Relabeling maintains order, just "compressing" the higher numbers to fill gaps. """ keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool) keep_group[0] = True keep_group[group_idx] = True return relabel_groups_masked(group_idx, keep_group)
python
def relabel_groups_unique(group_idx): """ See also ``relabel_groups_masked``. keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4] Description of above: unique groups in input was ``1,2,3,5``, i.e. ``4`` was missing, so group 5 was relabled to be ``4``. Relabeling maintains order, just "compressing" the higher numbers to fill gaps. """ keep_group = np.zeros(np.max(group_idx) + 1, dtype=bool) keep_group[0] = True keep_group[group_idx] = True return relabel_groups_masked(group_idx, keep_group)
See also ``relabel_groups_masked``. keep_group: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] ret: [0 3 3 3 0 2 4 2 0 1 1 0 3 4 4] Description of above: unique groups in input was ``1,2,3,5``, i.e. ``4`` was missing, so group 5 was relabled to be ``4``. Relabeling maintains order, just "compressing" the higher numbers to fill gaps.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L375-L391
ml31415/numpy-groupies
numpy_groupies/utils_numpy.py
relabel_groups_masked
def relabel_groups_masked(group_idx, keep_group): """ group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] 0 1 2 3 4 5 keep_group: [0 1 0 1 1 1] ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4] Description of above in words: remove group 2, and relabel group 3,4, and 5 to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used in the input group_idx, but the user supplied mask said to keep group 4, so group 5 is only moved up by one place to fill the gap created by removing group 2. That is, the mask describes which groups to remove, the remaining groups are relabled to remove the gaps created by the falsy elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers to the zero group which cannot be "removed". ``keep_group`` should be bool and ``group_idx`` int. Values in ``group_idx`` can be any order, and """ keep_group = keep_group.astype(bool, copy=not keep_group[0]) if not keep_group[0]: # ensuring keep_group[0] is True makes life easier keep_group[0] = True relabel = np.zeros(keep_group.size, dtype=group_idx.dtype) relabel[keep_group] = np.arange(np.count_nonzero(keep_group)) return relabel[group_idx]
python
def relabel_groups_masked(group_idx, keep_group): """ group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] 0 1 2 3 4 5 keep_group: [0 1 0 1 1 1] ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4] Description of above in words: remove group 2, and relabel group 3,4, and 5 to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used in the input group_idx, but the user supplied mask said to keep group 4, so group 5 is only moved up by one place to fill the gap created by removing group 2. That is, the mask describes which groups to remove, the remaining groups are relabled to remove the gaps created by the falsy elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers to the zero group which cannot be "removed". ``keep_group`` should be bool and ``group_idx`` int. Values in ``group_idx`` can be any order, and """ keep_group = keep_group.astype(bool, copy=not keep_group[0]) if not keep_group[0]: # ensuring keep_group[0] is True makes life easier keep_group[0] = True relabel = np.zeros(keep_group.size, dtype=group_idx.dtype) relabel[keep_group] = np.arange(np.count_nonzero(keep_group)) return relabel[group_idx]
group_idx: [0 3 3 3 0 2 5 2 0 1 1 0 3 5 5] 0 1 2 3 4 5 keep_group: [0 1 0 1 1 1] ret: [0 2 2 2 0 0 4 0 0 1 1 0 2 4 4] Description of above in words: remove group 2, and relabel group 3,4, and 5 to be 2, 3 and 4 respecitvely, in order to fill the gap. Note that group 4 was never used in the input group_idx, but the user supplied mask said to keep group 4, so group 5 is only moved up by one place to fill the gap created by removing group 2. That is, the mask describes which groups to remove, the remaining groups are relabled to remove the gaps created by the falsy elements in ``keep_group``. Note that ``keep_group[0]`` has no particular meaning because it refers to the zero group which cannot be "removed". ``keep_group`` should be bool and ``group_idx`` int. Values in ``group_idx`` can be any order, and
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L394-L423
ml31415/numpy-groupies
numpy_groupies/aggregate_numpy.py
_array
def _array(group_idx, a, size, fill_value, dtype=None): """groups a into separate arrays, keeping the order intact.""" if fill_value is not None and not (np.isscalar(fill_value) or len(fill_value) == 0): raise ValueError("fill_value must be None, a scalar or an empty " "sequence") order_group_idx = np.argsort(group_idx, kind='mergesort') counts = np.bincount(group_idx, minlength=size) ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1]) ret = np.asanyarray(ret) if fill_value is None or np.isscalar(fill_value): _fill_untouched(group_idx, ret, fill_value) return ret
python
def _array(group_idx, a, size, fill_value, dtype=None): """groups a into separate arrays, keeping the order intact.""" if fill_value is not None and not (np.isscalar(fill_value) or len(fill_value) == 0): raise ValueError("fill_value must be None, a scalar or an empty " "sequence") order_group_idx = np.argsort(group_idx, kind='mergesort') counts = np.bincount(group_idx, minlength=size) ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1]) ret = np.asanyarray(ret) if fill_value is None or np.isscalar(fill_value): _fill_untouched(group_idx, ret, fill_value) return ret
groups a into separate arrays, keeping the order intact.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L188-L200
ml31415/numpy-groupies
numpy_groupies/aggregate_numpy.py
_generic_callable
def _generic_callable(group_idx, a, size, fill_value, dtype=None, func=lambda g: g, **kwargs): """groups a by inds, and then applies foo to each group in turn, placing the results in an array.""" groups = _array(group_idx, a, size, ()) ret = np.full(size, fill_value, dtype=dtype or np.float64) for i, grp in enumerate(groups): if np.ndim(grp) == 1 and len(grp) > 0: ret[i] = func(grp) return ret
python
def _generic_callable(group_idx, a, size, fill_value, dtype=None, func=lambda g: g, **kwargs): """groups a by inds, and then applies foo to each group in turn, placing the results in an array.""" groups = _array(group_idx, a, size, ()) ret = np.full(size, fill_value, dtype=dtype or np.float64) for i, grp in enumerate(groups): if np.ndim(grp) == 1 and len(grp) > 0: ret[i] = func(grp) return ret
groups a by inds, and then applies foo to each group in turn, placing the results in an array.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L203-L213
ml31415/numpy-groupies
numpy_groupies/aggregate_numpy.py
_cumsum
def _cumsum(group_idx, a, size, fill_value=None, dtype=None): """ N to N aggregate operation of cumsum. Perform cumulative sum for each group. group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1]) a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8]) _cumsum(group_idx, a, np.max(group_idx) + 1) >>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39]) """ sortidx = np.argsort(group_idx, kind='mergesort') invsortidx = np.argsort(sortidx, kind='mergesort') group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] a_srt_cumsum = np.cumsum(a_srt, dtype=dtype) increasing = np.arange(len(a), dtype=int) group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt] a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts] return a_srt_cumsum[invsortidx]
python
def _cumsum(group_idx, a, size, fill_value=None, dtype=None): """ N to N aggregate operation of cumsum. Perform cumulative sum for each group. group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1]) a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8]) _cumsum(group_idx, a, np.max(group_idx) + 1) >>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39]) """ sortidx = np.argsort(group_idx, kind='mergesort') invsortidx = np.argsort(sortidx, kind='mergesort') group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] a_srt_cumsum = np.cumsum(a_srt, dtype=dtype) increasing = np.arange(len(a), dtype=int) group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt] a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts] return a_srt_cumsum[invsortidx]
N to N aggregate operation of cumsum. Perform cumulative sum for each group. group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1]) a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8]) _cumsum(group_idx, a, np.max(group_idx) + 1) >>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L216-L235
ml31415/numpy-groupies
numpy_groupies/aggregate_numpy.py
_fill_untouched
def _fill_untouched(idx, ret, fill_value): """any elements of ret not indexed by idx are set to fill_value.""" untouched = np.ones_like(ret, dtype=bool) untouched[idx] = False ret[untouched] = fill_value
python
def _fill_untouched(idx, ret, fill_value): """any elements of ret not indexed by idx are set to fill_value.""" untouched = np.ones_like(ret, dtype=bool) untouched[idx] = False ret[untouched] = fill_value
any elements of ret not indexed by idx are set to fill_value.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L296-L300
ml31415/numpy-groupies
numpy_groupies/benchmarks/generic.py
aggregate_grouploop
def aggregate_grouploop(*args, **kwargs): """wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.""" extrafuncs = {'allnan': allnan, 'anynan': anynan, 'first': itemgetter(0), 'last': itemgetter(-1), 'nanfirst': nanfirst, 'nanlast': nanlast} func = kwargs.pop('func') func = extrafuncs.get(func, func) if isinstance(func, str): raise NotImplementedError("Grouploop needs to be called with a function") return aggregate_numpy.aggregate(*args, func=lambda x: func(x), **kwargs)
python
def aggregate_grouploop(*args, **kwargs): """wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.""" extrafuncs = {'allnan': allnan, 'anynan': anynan, 'first': itemgetter(0), 'last': itemgetter(-1), 'nanfirst': nanfirst, 'nanlast': nanlast} func = kwargs.pop('func') func = extrafuncs.get(func, func) if isinstance(func, str): raise NotImplementedError("Grouploop needs to be called with a function") return aggregate_numpy.aggregate(*args, func=lambda x: func(x), **kwargs)
wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/benchmarks/generic.py#L13-L23
ml31415/numpy-groupies
numpy_groupies/aggregate_numpy_ufunc.py
_prod
def _prod(group_idx, a, size, fill_value, dtype=None): """Same as aggregate_numpy.py""" dtype = minimum_dtype_scalar(fill_value, dtype, a) ret = np.full(size, fill_value, dtype=dtype) if fill_value != 1: ret[group_idx] = 1 # product should start from 1 np.multiply.at(ret, group_idx, a) return ret
python
def _prod(group_idx, a, size, fill_value, dtype=None): """Same as aggregate_numpy.py""" dtype = minimum_dtype_scalar(fill_value, dtype, a) ret = np.full(size, fill_value, dtype=dtype) if fill_value != 1: ret[group_idx] = 1 # product should start from 1 np.multiply.at(ret, group_idx, a) return ret
Same as aggregate_numpy.py
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy_ufunc.py#L50-L57
ml31415/numpy-groupies
numpy_groupies/aggregate_weave.py
c_func
def c_func(funcname, reverse=False, nans=False, scalar=False): """ Fill c_funcs with constructed code from the templates """ varnames = ['group_idx', 'a', 'ret', 'counter'] codebase = c_base_reverse if reverse else c_base iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname] if scalar: varnames.remove('a') return codebase % dict(init=c_init(varnames), iter=iteration, finish=c_finish.get(funcname, ''), ri_redir=(c_ri_redir if nans else c_ri))
python
def c_func(funcname, reverse=False, nans=False, scalar=False): """ Fill c_funcs with constructed code from the templates """ varnames = ['group_idx', 'a', 'ret', 'counter'] codebase = c_base_reverse if reverse else c_base iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname] if scalar: varnames.remove('a') return codebase % dict(init=c_init(varnames), iter=iteration, finish=c_finish.get(funcname, ''), ri_redir=(c_ri_redir if nans else c_ri))
Fill c_funcs with constructed code from the templates
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_weave.py#L154-L163
ml31415/numpy-groupies
numpy_groupies/aggregate_weave.py
step_indices
def step_indices(group_idx): """ Get the edges of areas within group_idx, which are filled with the same value """ ilen = step_count(group_idx) + 1 indices = np.empty(ilen, int) indices[0] = 0 indices[-1] = group_idx.size inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args) return indices
python
def step_indices(group_idx): """ Get the edges of areas within group_idx, which are filled with the same value """ ilen = step_count(group_idx) + 1 indices = np.empty(ilen, int) indices[0] = 0 indices[-1] = group_idx.size inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args) return indices
Get the edges of areas within group_idx, which are filled with the same value
https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_weave.py#L212-L221
takuti/flurs
flurs/utils/projection.py
RandomProjection.__create_proj_mat
def __create_proj_mat(self, size): """Create a random projection matrix [1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. [2] P. Li, et al. Very sparse random projections. http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection """ # [1] # return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6]) # [2] s = 1 / self.density return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)], size=size, p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)])
python
def __create_proj_mat(self, size): """Create a random projection matrix [1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. [2] P. Li, et al. Very sparse random projections. http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection """ # [1] # return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6]) # [2] s = 1 / self.density return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)], size=size, p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)])
Create a random projection matrix [1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. [2] P. Li, et al. Very sparse random projections. http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/projection.py#L72-L88
takuti/flurs
flurs/datasets/movielens.py
load_movies
def load_movies(data_home, size): """Load movie genres as a context. Returns: dict of movie vectors: item_id -> numpy array (n_genre,) """ all_genres = ['Action', 'Adventure', 'Animation', "Children's", 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'] n_genre = len(all_genres) movies = {} if size == '100k': with open(os.path.join(data_home, 'u.item'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('|'), f.readlines())) for line in lines: movie_vec = np.zeros(n_genre) for i, flg_chr in enumerate(line[-n_genre:]): if flg_chr == '1': movie_vec[i] = 1. movie_id = int(line[0]) movies[movie_id] = movie_vec elif size == '1m': with open(os.path.join(data_home, 'movies.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('::'), f.readlines())) for item_id_str, title, genres in lines: movie_vec = np.zeros(n_genre) for genre in genres.split('|'): i = all_genres.index(genre) movie_vec[i] = 1. item_id = int(item_id_str) movies[item_id] = movie_vec return movies
python
def load_movies(data_home, size): """Load movie genres as a context. Returns: dict of movie vectors: item_id -> numpy array (n_genre,) """ all_genres = ['Action', 'Adventure', 'Animation', "Children's", 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'] n_genre = len(all_genres) movies = {} if size == '100k': with open(os.path.join(data_home, 'u.item'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('|'), f.readlines())) for line in lines: movie_vec = np.zeros(n_genre) for i, flg_chr in enumerate(line[-n_genre:]): if flg_chr == '1': movie_vec[i] = 1. movie_id = int(line[0]) movies[movie_id] = movie_vec elif size == '1m': with open(os.path.join(data_home, 'movies.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('::'), f.readlines())) for item_id_str, title, genres in lines: movie_vec = np.zeros(n_genre) for genre in genres.split('|'): i = all_genres.index(genre) movie_vec[i] = 1. item_id = int(item_id_str) movies[item_id] = movie_vec return movies
Load movie genres as a context. Returns: dict of movie vectors: item_id -> numpy array (n_genre,)
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/datasets/movielens.py#L12-L62
takuti/flurs
flurs/datasets/movielens.py
load_users
def load_users(data_home, size): """Load user demographics as contexts.User ID -> {sex (M/F), age (7 groupd), occupation(0-20; 21)} Returns: dict of user vectors: user_id -> numpy array (1+1+21,); (sex_flg + age_group + n_occupation, ) """ ages = [1, 18, 25, 35, 45, 50, 56, 999] users = {} if size == '100k': all_occupations = ['administrator', 'artist', 'doctor', 'educator', 'engineer', 'entertainment', 'executive', 'healthcare', 'homemaker', 'lawyer', 'librarian', 'marketing', 'none', 'other', 'programmer', 'retired', 'salesman', 'scientist', 'student', 'technician', 'writer'] with open(os.path.join(data_home, 'u.user'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('|'), f.readlines())) for user_id_str, age_str, sex_str, occupation_str, zip_code in lines: user_vec = np.zeros(1 + 1 + 21) # 1 categorical, 1 value, 21 categorical user_vec[0] = 0 if sex_str == 'M' else 1 # sex # age (ML1M is "age group", but 100k has actual "age") age = int(age_str) for i in range(7): if age >= ages[i] and age < ages[i + 1]: user_vec[1] = i break user_vec[2 + all_occupations.index(occupation_str)] = 1 # occupation (1-of-21) users[int(user_id_str)] = user_vec elif size == '1m': with open(os.path.join(data_home, 'users.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('::'), f.readlines())) for user_id_str, sex_str, age_str, occupation_str, zip_code in lines: user_vec = np.zeros(1 + 1 + 21) # 1 categorical, 1 value, 21 categorical user_vec[0] = 0 if sex_str == 'M' else 1 # sex user_vec[1] = ages.index(int(age_str)) # age group (1, 18, ...) user_vec[2 + int(occupation_str)] = 1 # occupation (1-of-21) users[int(user_id_str)] = user_vec return users
python
def load_users(data_home, size): """Load user demographics as contexts.User ID -> {sex (M/F), age (7 groupd), occupation(0-20; 21)} Returns: dict of user vectors: user_id -> numpy array (1+1+21,); (sex_flg + age_group + n_occupation, ) """ ages = [1, 18, 25, 35, 45, 50, 56, 999] users = {} if size == '100k': all_occupations = ['administrator', 'artist', 'doctor', 'educator', 'engineer', 'entertainment', 'executive', 'healthcare', 'homemaker', 'lawyer', 'librarian', 'marketing', 'none', 'other', 'programmer', 'retired', 'salesman', 'scientist', 'student', 'technician', 'writer'] with open(os.path.join(data_home, 'u.user'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('|'), f.readlines())) for user_id_str, age_str, sex_str, occupation_str, zip_code in lines: user_vec = np.zeros(1 + 1 + 21) # 1 categorical, 1 value, 21 categorical user_vec[0] = 0 if sex_str == 'M' else 1 # sex # age (ML1M is "age group", but 100k has actual "age") age = int(age_str) for i in range(7): if age >= ages[i] and age < ages[i + 1]: user_vec[1] = i break user_vec[2 + all_occupations.index(occupation_str)] = 1 # occupation (1-of-21) users[int(user_id_str)] = user_vec elif size == '1m': with open(os.path.join(data_home, 'users.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: l.rstrip().split('::'), f.readlines())) for user_id_str, sex_str, age_str, occupation_str, zip_code in lines: user_vec = np.zeros(1 + 1 + 21) # 1 categorical, 1 value, 21 categorical user_vec[0] = 0 if sex_str == 'M' else 1 # sex user_vec[1] = ages.index(int(age_str)) # age group (1, 18, ...) user_vec[2 + int(occupation_str)] = 1 # occupation (1-of-21) users[int(user_id_str)] = user_vec return users
Load user demographics as contexts.User ID -> {sex (M/F), age (7 groupd), occupation(0-20; 21)} Returns: dict of user vectors: user_id -> numpy array (1+1+21,); (sex_flg + age_group + n_occupation, )
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/datasets/movielens.py#L65-L124
takuti/flurs
flurs/datasets/movielens.py
load_ratings
def load_ratings(data_home, size): """Load all samples in the dataset. """ if size == '100k': with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines())) elif size == '1m': with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines())) ratings = [] for l in lines: # Since we consider positive-only feedback setting, ratings < 5 will be excluded. if l[2] == 5: ratings.append(l) ratings = np.asarray(ratings) # sorted by timestamp return ratings[np.argsort(ratings[:, 3])]
python
def load_ratings(data_home, size): """Load all samples in the dataset. """ if size == '100k': with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines())) elif size == '1m': with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f: lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines())) ratings = [] for l in lines: # Since we consider positive-only feedback setting, ratings < 5 will be excluded. if l[2] == 5: ratings.append(l) ratings = np.asarray(ratings) # sorted by timestamp return ratings[np.argsort(ratings[:, 3])]
Load all samples in the dataset.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/datasets/movielens.py#L127-L148
takuti/flurs
flurs/datasets/movielens.py
delta
def delta(d1, d2, opt='d'): """Compute difference between given 2 dates in month/day. """ delta = 0 if opt == 'm': while True: mdays = monthrange(d1.year, d1.month)[1] d1 += timedelta(days=mdays) if d1 <= d2: delta += 1 else: break else: delta = (d2 - d1).days return delta
python
def delta(d1, d2, opt='d'): """Compute difference between given 2 dates in month/day. """ delta = 0 if opt == 'm': while True: mdays = monthrange(d1.year, d1.month)[1] d1 += timedelta(days=mdays) if d1 <= d2: delta += 1 else: break else: delta = (d2 - d1).days return delta
Compute difference between given 2 dates in month/day.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/datasets/movielens.py#L151-L167
takuti/flurs
flurs/utils/feature_hash.py
n_feature_hash
def n_feature_hash(feature, dims, seeds): """N-hot-encoded feature hashing. Args: feature (str): Target feature represented as string. dims (list of int): Number of dimensions for each hash value. seeds (list of float): Seed of each hash function (mmh3). Returns: numpy 1d array: n-hot-encoded feature vector for `s`. """ vec = np.zeros(sum(dims)) offset = 0 for seed, dim in zip(seeds, dims): vec[offset:(offset + dim)] = feature_hash(feature, dim, seed) offset += dim return vec
python
def n_feature_hash(feature, dims, seeds): """N-hot-encoded feature hashing. Args: feature (str): Target feature represented as string. dims (list of int): Number of dimensions for each hash value. seeds (list of float): Seed of each hash function (mmh3). Returns: numpy 1d array: n-hot-encoded feature vector for `s`. """ vec = np.zeros(sum(dims)) offset = 0 for seed, dim in zip(seeds, dims): vec[offset:(offset + dim)] = feature_hash(feature, dim, seed) offset += dim return vec
N-hot-encoded feature hashing. Args: feature (str): Target feature represented as string. dims (list of int): Number of dimensions for each hash value. seeds (list of float): Seed of each hash function (mmh3). Returns: numpy 1d array: n-hot-encoded feature vector for `s`.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/feature_hash.py#L5-L24
takuti/flurs
flurs/utils/feature_hash.py
feature_hash
def feature_hash(feature, dim, seed=123): """Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`. """ vec = np.zeros(dim) i = mmh3.hash(feature, seed) % dim vec[i] = 1 return vec
python
def feature_hash(feature, dim, seed=123): """Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`. """ vec = np.zeros(dim) i = mmh3.hash(feature, seed) % dim vec[i] = 1 return vec
Feature hashing. Args: feature (str): Target feature represented as string. dim (int): Number of dimensions for a hash value. seed (float): Seed of a MurmurHash3 hash function. Returns: numpy 1d array: one-hot-encoded feature vector for `s`.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/feature_hash.py#L27-L42
takuti/flurs
flurs/utils/metric.py
count_true_positive
def count_true_positive(truth, recommend): """Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives. """ tp = 0 for r in recommend: if r in truth: tp += 1 return tp
python
def count_true_positive(truth, recommend): """Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives. """ tp = 0 for r in recommend: if r in truth: tp += 1 return tp
Count number of true positives from given sets of samples. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: int: Number of true positives.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L4-L19
takuti/flurs
flurs/utils/metric.py
recall
def recall(truth, recommend, k=None): """Recall@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Recall@k. """ if len(truth) == 0: if len(recommend) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(truth.size)
python
def recall(truth, recommend, k=None): """Recall@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Recall@k. """ if len(truth) == 0: if len(recommend) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(truth.size)
Recall@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Recall@k.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L22-L41
takuti/flurs
flurs/utils/metric.py
precision
def precision(truth, recommend, k=None): """Precision@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Precision@k. """ if len(recommend) == 0: if len(truth) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(k)
python
def precision(truth, recommend, k=None): """Precision@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Precision@k. """ if len(recommend) == 0: if len(truth) == 0: return 1. return 0. if k is None: k = len(recommend) return count_true_positive(truth, recommend[:k]) / float(k)
Precision@k. Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: Precision@k.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L44-L63
takuti/flurs
flurs/utils/metric.py
average_precision
def average_precision(truth, recommend): """Average Precision (AP). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AP. """ if len(truth) == 0: if len(recommend) == 0: return 1. return 0. tp = accum = 0. for n in range(recommend.size): if recommend[n] in truth: tp += 1. accum += (tp / (n + 1.)) return accum / truth.size
python
def average_precision(truth, recommend): """Average Precision (AP). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AP. """ if len(truth) == 0: if len(recommend) == 0: return 1. return 0. tp = accum = 0. for n in range(recommend.size): if recommend[n] in truth: tp += 1. accum += (tp / (n + 1.)) return accum / truth.size
Average Precision (AP). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AP.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L66-L87
takuti/flurs
flurs/utils/metric.py
auc
def auc(truth, recommend): """Area under the ROC curve (AUC). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AUC. """ tp = correct = 0. for r in recommend: if r in truth: # keep track number of true positives placed before tp += 1. else: correct += tp # number of all possible tp-fp pairs pairs = tp * (recommend.size - tp) # if there is no TP (or no FP), it's meaningless for this metric (i.e., AUC=0.5) if pairs == 0: return 0.5 return correct / pairs
python
def auc(truth, recommend): """Area under the ROC curve (AUC). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AUC. """ tp = correct = 0. for r in recommend: if r in truth: # keep track number of true positives placed before tp += 1. else: correct += tp # number of all possible tp-fp pairs pairs = tp * (recommend.size - tp) # if there is no TP (or no FP), it's meaningless for this metric (i.e., AUC=0.5) if pairs == 0: return 0.5 return correct / pairs
Area under the ROC curve (AUC). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: AUC.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L90-L115
takuti/flurs
flurs/utils/metric.py
reciprocal_rank
def reciprocal_rank(truth, recommend): """Reciprocal Rank (RR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: RR. """ for n in range(recommend.size): if recommend[n] in truth: return 1. / (n + 1) return 0.
python
def reciprocal_rank(truth, recommend): """Reciprocal Rank (RR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: RR. """ for n in range(recommend.size): if recommend[n] in truth: return 1. / (n + 1) return 0.
Reciprocal Rank (RR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: RR.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L118-L132
takuti/flurs
flurs/utils/metric.py
mpr
def mpr(truth, recommend): """Mean Percentile Rank (MPR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: MPR. """ if len(recommend) == 0 and len(truth) == 0: return 0. # best elif len(truth) == 0 or len(truth) == 0: return 100. # worst accum = 0. n_recommend = recommend.size for t in truth: r = np.where(recommend == t)[0][0] / float(n_recommend) accum += r return accum * 100. / truth.size
python
def mpr(truth, recommend): """Mean Percentile Rank (MPR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: MPR. """ if len(recommend) == 0 and len(truth) == 0: return 0. # best elif len(truth) == 0 or len(truth) == 0: return 100. # worst accum = 0. n_recommend = recommend.size for t in truth: r = np.where(recommend == t)[0][0] / float(n_recommend) accum += r return accum * 100. / truth.size
Mean Percentile Rank (MPR). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. Returns: float: MPR.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L135-L156
takuti/flurs
flurs/utils/metric.py
ndcg
def ndcg(truth, recommend, k=None): """Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG. """ if k is None: k = len(recommend) def idcg(n_possible_truth): res = 0. for n in range(n_possible_truth): res += 1. / np.log2(n + 2) return res dcg = 0. for n, r in enumerate(recommend[:k]): if r not in truth: continue dcg += 1. / np.log2(n + 2) res_idcg = idcg(np.min([truth.size, k])) if res_idcg == 0.: return 0. return dcg / res_idcg
python
def ndcg(truth, recommend, k=None): """Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG. """ if k is None: k = len(recommend) def idcg(n_possible_truth): res = 0. for n in range(n_possible_truth): res += 1. / np.log2(n + 2) return res dcg = 0. for n, r in enumerate(recommend[:k]): if r not in truth: continue dcg += 1. / np.log2(n + 2) res_idcg = idcg(np.min([truth.size, k])) if res_idcg == 0.: return 0. return dcg / res_idcg
Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L159-L189
takuti/flurs
flurs/base.py
RecommenderMixin.initialize
def initialize(self, *args): """Initialize a recommender by resetting stored users and items. """ # number of observed users self.n_user = 0 # store user data self.users = {} # number of observed items self.n_item = 0 # store item data self.items = {}
python
def initialize(self, *args): """Initialize a recommender by resetting stored users and items. """ # number of observed users self.n_user = 0 # store user data self.users = {} # number of observed items self.n_item = 0 # store item data self.items = {}
Initialize a recommender by resetting stored users and items.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/base.py#L11-L24
takuti/flurs
flurs/base.py
RecommenderMixin.register_user
def register_user(self, user): """For new users, append their information into the dictionaries. Args: user (User): User. """ self.users[user.index] = {'known_items': set()} self.n_user += 1
python
def register_user(self, user): """For new users, append their information into the dictionaries. Args: user (User): User. """ self.users[user.index] = {'known_items': set()} self.n_user += 1
For new users, append their information into the dictionaries. Args: user (User): User.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/base.py#L45-L53
takuti/flurs
flurs/base.py
RecommenderMixin.scores2recos
def scores2recos(self, scores, candidates, rev=False): """Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates. rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default. Returns: (numpy array, numpy array) : (Sorted list of items, Sorted scores). """ sorted_indices = np.argsort(scores) if rev: sorted_indices = sorted_indices[::-1] return candidates[sorted_indices], scores[sorted_indices]
python
def scores2recos(self, scores, candidates, rev=False): """Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates. rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default. Returns: (numpy array, numpy array) : (Sorted list of items, Sorted scores). """ sorted_indices = np.argsort(scores) if rev: sorted_indices = sorted_indices[::-1] return candidates[sorted_indices], scores[sorted_indices]
Get recommendation list for a user u_index based on scores. Args: scores (numpy array; (n_target_items,)): Scores for the target items. Smaller score indicates a promising item. candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates. rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default. Returns: (numpy array, numpy array) : (Sorted list of items, Sorted scores).
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/base.py#L115-L133
takuti/flurs
flurs/evaluator.py
Evaluator.fit
def fit(self, train_events, test_events, n_epoch=1): """Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event): Positive training events (0-30%). test_events (list of Event): Test events (30-50%). n_epoch (int): Number of epochs for the batch training. """ # make initial status for batch training for e in train_events: self.__validate(e) self.rec.users[e.user.index]['known_items'].add(e.item.index) self.item_buffer.append(e.item.index) # for batch evaluation, temporarily save new users info for e in test_events: self.__validate(e) self.item_buffer.append(e.item.index) self.__batch_update(train_events, test_events, n_epoch) # batch test events are considered as a new observations; # the model is incrementally updated based on them before the incremental evaluation step for e in test_events: self.rec.users[e.user.index]['known_items'].add(e.item.index) self.rec.update(e)
python
def fit(self, train_events, test_events, n_epoch=1): """Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event): Positive training events (0-30%). test_events (list of Event): Test events (30-50%). n_epoch (int): Number of epochs for the batch training. """ # make initial status for batch training for e in train_events: self.__validate(e) self.rec.users[e.user.index]['known_items'].add(e.item.index) self.item_buffer.append(e.item.index) # for batch evaluation, temporarily save new users info for e in test_events: self.__validate(e) self.item_buffer.append(e.item.index) self.__batch_update(train_events, test_events, n_epoch) # batch test events are considered as a new observations; # the model is incrementally updated based on them before the incremental evaluation step for e in test_events: self.rec.users[e.user.index]['known_items'].add(e.item.index) self.rec.update(e)
Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event): Positive training events (0-30%). test_events (list of Event): Test events (30-50%). n_epoch (int): Number of epochs for the batch training.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/evaluator.py#L35-L64
takuti/flurs
flurs/evaluator.py
Evaluator.evaluate
def evaluate(self, test_events): """Iterate recommend/update procedure and compute incremental recall. Args: test_events (list of Event): Positive test events. Returns: list of tuples: (rank, recommend time, update time) """ for i, e in enumerate(test_events): self.__validate(e) # target items (all or unobserved depending on a detaset) unobserved = set(self.item_buffer) if not self.repeat: unobserved -= self.rec.users[e.user.index]['known_items'] # item i interacted by user u must be in the recommendation candidate # even if it is a new item unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) # make top-{at} recommendation for the 1001 items start = time.clock() recos, scores = self.__recommend(e, candidates) recommend_time = (time.clock() - start) rank = np.where(recos == e.item.index)[0][0] # Step 2: update the model with the observed event self.rec.users[e.user.index]['known_items'].add(e.item.index) start = time.clock() self.rec.update(e) update_time = (time.clock() - start) self.item_buffer.append(e.item.index) # (top-1 score, where the correct item is ranked, rec time, update time) yield scores[0], rank, recommend_time, update_time
python
def evaluate(self, test_events): """Iterate recommend/update procedure and compute incremental recall. Args: test_events (list of Event): Positive test events. Returns: list of tuples: (rank, recommend time, update time) """ for i, e in enumerate(test_events): self.__validate(e) # target items (all or unobserved depending on a detaset) unobserved = set(self.item_buffer) if not self.repeat: unobserved -= self.rec.users[e.user.index]['known_items'] # item i interacted by user u must be in the recommendation candidate # even if it is a new item unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) # make top-{at} recommendation for the 1001 items start = time.clock() recos, scores = self.__recommend(e, candidates) recommend_time = (time.clock() - start) rank = np.where(recos == e.item.index)[0][0] # Step 2: update the model with the observed event self.rec.users[e.user.index]['known_items'].add(e.item.index) start = time.clock() self.rec.update(e) update_time = (time.clock() - start) self.item_buffer.append(e.item.index) # (top-1 score, where the correct item is ranked, rec time, update time) yield scores[0], rank, recommend_time, update_time
Iterate recommend/update procedure and compute incremental recall. Args: test_events (list of Event): Positive test events. Returns: list of tuples: (rank, recommend time, update time)
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/evaluator.py#L66-L106
takuti/flurs
flurs/evaluator.py
Evaluator.__batch_update
def __batch_update(self, train_events, test_events, n_epoch): """Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training. """ for epoch in range(n_epoch): # SGD requires us to shuffle events in each iteration # * if n_epoch == 1 # => shuffle is not required because it is a deterministic training (i.e. matrix sketching) if n_epoch != 1: np.random.shuffle(train_events) # train for e in train_events: self.rec.update(e, batch_train=True) # test MPR = self.__batch_evaluate(test_events) if self.debug: logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))
python
def __batch_update(self, train_events, test_events, n_epoch): """Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training. """ for epoch in range(n_epoch): # SGD requires us to shuffle events in each iteration # * if n_epoch == 1 # => shuffle is not required because it is a deterministic training (i.e. matrix sketching) if n_epoch != 1: np.random.shuffle(train_events) # train for e in train_events: self.rec.update(e, batch_train=True) # test MPR = self.__batch_evaluate(test_events) if self.debug: logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR))
Batch update called by the fitting method. Args: train_events (list of Event): Positive training events. test_events (list of Event): Test events. n_epoch (int): Number of epochs for the batch training.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/evaluator.py#L126-L149
takuti/flurs
flurs/evaluator.py
Evaluator.__batch_evaluate
def __batch_evaluate(self, test_events): """Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set. """ percentiles = np.zeros(len(test_events)) all_items = set(self.item_buffer) for i, e in enumerate(test_events): # check if the data allows users to interact the same items repeatedly unobserved = all_items if not self.repeat: # make recommendation for all unobserved items unobserved -= self.rec.users[e.user.index]['known_items'] # true item itself must be in the recommendation candidates unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) recos, scores = self.__recommend(e, candidates) pos = np.where(recos == e.item.index)[0][0] percentiles[i] = pos / (len(recos) - 1) * 100 return np.mean(percentiles)
python
def __batch_evaluate(self, test_events): """Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set. """ percentiles = np.zeros(len(test_events)) all_items = set(self.item_buffer) for i, e in enumerate(test_events): # check if the data allows users to interact the same items repeatedly unobserved = all_items if not self.repeat: # make recommendation for all unobserved items unobserved -= self.rec.users[e.user.index]['known_items'] # true item itself must be in the recommendation candidates unobserved.add(e.item.index) candidates = np.asarray(list(unobserved)) recos, scores = self.__recommend(e, candidates) pos = np.where(recos == e.item.index)[0][0] percentiles[i] = pos / (len(recos) - 1) * 100 return np.mean(percentiles)
Evaluate the current model by using the given test events. Args: test_events (list of Event): Current model is evaluated by these events. Returns: float: Mean Percentile Rank for the test set.
https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/evaluator.py#L151-L180
linkedin/asciietch
asciietch/graph.py
Grapher._scale_x_values
def _scale_x_values(self, values, max_width): '''Scale X values to new width''' if type(values) == dict: values = self._scale_x_values_timestamps(values=values, max_width=max_width) adjusted_values = list(values) if len(adjusted_values) > max_width: def get_position(current_pos): return len(adjusted_values) * current_pos // max_width adjusted_values = [statistics.mean(adjusted_values[get_position(i):get_position(i + 1)]) for i in range(max_width)] return adjusted_values
python
def _scale_x_values(self, values, max_width): '''Scale X values to new width''' if type(values) == dict: values = self._scale_x_values_timestamps(values=values, max_width=max_width) adjusted_values = list(values) if len(adjusted_values) > max_width: def get_position(current_pos): return len(adjusted_values) * current_pos // max_width adjusted_values = [statistics.mean(adjusted_values[get_position(i):get_position(i + 1)]) for i in range(max_width)] return adjusted_values
Scale X values to new width
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L11-L25
linkedin/asciietch
asciietch/graph.py
Grapher._scale_x_values_timestamps
def _scale_x_values_timestamps(self, values, max_width): '''Scale X values to new width based on timestamps''' first_timestamp = float(values[0][0]) last_timestamp = float(values[-1][0]) step_size = (last_timestamp - first_timestamp) / max_width values_by_column = [[] for i in range(max_width)] for timestamp, value in values: if value is None: continue timestamp = float(timestamp) column = (timestamp - first_timestamp) // step_size column = int(min(column, max_width - 1)) # Don't go beyond the last column values_by_column[column].append(value) adjusted_values = [statistics.mean(values) if values else 0 for values in values_by_column] # Average each column, 0 if no values return adjusted_values
python
def _scale_x_values_timestamps(self, values, max_width): '''Scale X values to new width based on timestamps''' first_timestamp = float(values[0][0]) last_timestamp = float(values[-1][0]) step_size = (last_timestamp - first_timestamp) / max_width values_by_column = [[] for i in range(max_width)] for timestamp, value in values: if value is None: continue timestamp = float(timestamp) column = (timestamp - first_timestamp) // step_size column = int(min(column, max_width - 1)) # Don't go beyond the last column values_by_column[column].append(value) adjusted_values = [statistics.mean(values) if values else 0 for values in values_by_column] # Average each column, 0 if no values return adjusted_values
Scale X values to new width based on timestamps
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L27-L44
linkedin/asciietch
asciietch/graph.py
Grapher._scale_y_values
def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True): ''' Take values and transmute them into a new range ''' # Scale Y values - Create a scaled list of values to use for the visual graph scaled_values = [] y_min_value = min(values) if scale_old_from_zero: y_min_value = 0 y_max_value = max(values) new_min = 0 OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same NewRange = (new_max - new_min) # max_height is new_max for old_value in values: new_value = (((old_value - y_min_value) * NewRange) / OldRange) + new_min scaled_values.append(new_value) return scaled_values
python
def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True): ''' Take values and transmute them into a new range ''' # Scale Y values - Create a scaled list of values to use for the visual graph scaled_values = [] y_min_value = min(values) if scale_old_from_zero: y_min_value = 0 y_max_value = max(values) new_min = 0 OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same NewRange = (new_max - new_min) # max_height is new_max for old_value in values: new_value = (((old_value - y_min_value) * NewRange) / OldRange) + new_min scaled_values.append(new_value) return scaled_values
Take values and transmute them into a new range
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L46-L62
linkedin/asciietch
asciietch/graph.py
Grapher._get_ascii_field
def _get_ascii_field(self, values): '''Create a representation of an ascii graph using two lists in this format: field[x][y] = "char"''' empty_space = ' ' # This formats as field[x][y] field = [[empty_space for y in range(max(values) + 1)] for x in range(len(values))] # Draw graph into field for x in range(len(values)): y = values[x] y_prev = values[x - 1] if x - 1 in range(len(values)) else y y_next = values[x + 1] if x + 1 in range(len(values)) else y # Fill empty space if abs(y_prev - y) > 1: # Fill space between y and y_prev step = 1 if y_prev - y > 0 else -1 # We don't want the first item to be inclusive, so we use step instead of y+1 since step can be negative for h in range(y + step, y_prev, step): if field[x][h] is empty_space: field[x][h] = '|' # Assign the character to be placed into the graph char = self._assign_ascii_character(y_prev, y, y_next) field[x][y] = char return field
python
def _get_ascii_field(self, values): '''Create a representation of an ascii graph using two lists in this format: field[x][y] = "char"''' empty_space = ' ' # This formats as field[x][y] field = [[empty_space for y in range(max(values) + 1)] for x in range(len(values))] # Draw graph into field for x in range(len(values)): y = values[x] y_prev = values[x - 1] if x - 1 in range(len(values)) else y y_next = values[x + 1] if x + 1 in range(len(values)) else y # Fill empty space if abs(y_prev - y) > 1: # Fill space between y and y_prev step = 1 if y_prev - y > 0 else -1 # We don't want the first item to be inclusive, so we use step instead of y+1 since step can be negative for h in range(y + step, y_prev, step): if field[x][h] is empty_space: field[x][h] = '|' # Assign the character to be placed into the graph char = self._assign_ascii_character(y_prev, y, y_next) field[x][y] = char return field
Create a representation of an ascii graph using two lists in this format: field[x][y] = "char"
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L68-L95
linkedin/asciietch
asciietch/graph.py
Grapher._assign_ascii_character
def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity '''Assign the character to be placed into the graph''' char = '?' if y_next > y and y_prev > y: char = '-' elif y_next < y and y_prev < y: char = '-' elif y_prev < y and y == y_next: char = '-' elif y_prev == y and y_next < y: char = '-' elif y_next > y: char = '/' elif y_next < y: char = '\\' elif y_prev < y: char = '/' elif y_prev > y: char = '\\' elif y_next == y: char = '-' elif y == y_prev: char = '-' return char
python
def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity '''Assign the character to be placed into the graph''' char = '?' if y_next > y and y_prev > y: char = '-' elif y_next < y and y_prev < y: char = '-' elif y_prev < y and y == y_next: char = '-' elif y_prev == y and y_next < y: char = '-' elif y_next > y: char = '/' elif y_next < y: char = '\\' elif y_prev < y: char = '/' elif y_prev > y: char = '\\' elif y_next == y: char = '-' elif y == y_prev: char = '-' return char
Assign the character to be placed into the graph
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L97-L120
linkedin/asciietch
asciietch/graph.py
Grapher._draw_ascii_graph
def _draw_ascii_graph(self, field): '''Draw graph from field double nested list, format field[x][y] = char''' row_strings = [] for y in range(len(field[0])): row = '' for x in range(len(field)): row += field[x][y] row_strings.insert(0, row) graph_string = '\n'.join(row_strings) return graph_string
python
def _draw_ascii_graph(self, field): '''Draw graph from field double nested list, format field[x][y] = char''' row_strings = [] for y in range(len(field[0])): row = '' for x in range(len(field)): row += field[x][y] row_strings.insert(0, row) graph_string = '\n'.join(row_strings) return graph_string
Draw graph from field double nested list, format field[x][y] = char
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L122-L131
linkedin/asciietch
asciietch/graph.py
Grapher.asciigraph
def asciigraph(self, values=None, max_height=None, max_width=None, label=False): ''' Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example. ''' result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width: max_width = 180 # If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values if isinstance(values, dict): time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps start_timestamp = time_series_sorted[0][0] end_timestamp = time_series_sorted[-1][0] start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime() end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime() values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width) values = [value for value in values if value is not None] if not max_height: max_height = min(20, max(values)) stdev = statistics.stdev(values) mean = statistics.mean(values) # Do value adjustments adjusted_values = list(values) adjusted_values = self._scale_x_values(values=values, max_width=max_width) upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see lower_value = min(adjusted_values) adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False) adjusted_values = self._round_floats_to_ints(values=adjusted_values) # Obtain Ascii Graph String field = self._get_ascii_field(adjusted_values) graph_string = self._draw_ascii_graph(field=field) # Label the graph if label: top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char) result += top_label + '\n' result += '{graph_string}\n'.format(graph_string=graph_string) if label: lower = f'Lower value: {lower_value:.2f} ' stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******' fill_length = max_width - len(lower) - len(stats) stat_label = f'{lower}{"*" * fill_length}{stats}\n' result += stat_label if start_ctime and end_ctime: fill_length = max_width - len(start_ctime) - len(end_ctime) result += f'{start_ctime}{" " * fill_length}{end_ctime}\n' return result
python
def asciigraph(self, values=None, max_height=None, max_width=None, label=False): ''' Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example. ''' result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width: max_width = 180 # If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values if isinstance(values, dict): time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps start_timestamp = time_series_sorted[0][0] end_timestamp = time_series_sorted[-1][0] start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime() end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime() values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width) values = [value for value in values if value is not None] if not max_height: max_height = min(20, max(values)) stdev = statistics.stdev(values) mean = statistics.mean(values) # Do value adjustments adjusted_values = list(values) adjusted_values = self._scale_x_values(values=values, max_width=max_width) upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see lower_value = min(adjusted_values) adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False) adjusted_values = self._round_floats_to_ints(values=adjusted_values) # Obtain Ascii Graph String field = self._get_ascii_field(adjusted_values) graph_string = self._draw_ascii_graph(field=field) # Label the graph if label: top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char) result += top_label + '\n' result += '{graph_string}\n'.format(graph_string=graph_string) if label: lower = f'Lower value: {lower_value:.2f} ' stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******' fill_length = max_width - len(lower) - len(stats) stat_label = f'{lower}{"*" * fill_length}{stats}\n' result += stat_label if start_ctime and end_ctime: fill_length = max_width - len(start_ctime) - len(end_ctime) result += f'{start_ctime}{" " * fill_length}{end_ctime}\n' return result
Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L133-L192
HPAC/matchpy
matchpy/functions.py
substitute
def substitute(expression: Union[Expression, Pattern], substitution: Substitution) -> Replacement: """Replaces variables in the given *expression* using the given *substitution*. >>> print(substitute(f(x_), {'x': a})) f(a) If nothing was substituted, the original expression is returned: >>> expression = f(x_) >>> result = substitute(expression, {'y': a}) >>> print(result) f(x_) >>> expression is result True Note that this function returns a list of expressions iff the expression is a variable and its substitution is a list of expressions. In other cases were a substitution is a list of expressions, the expressions will be integrated as operands in the surrounding operation: >>> print(substitute(f(x_, c), {'x': [a, b]})) f(a, b, c) If you substitute with a `Multiset` of values, they will be sorted: >>> replacement = Multiset([b, a, b]) >>> print(substitute(f(x_, c), {'x': replacement})) f(a, b, b, c) Parameters: expression: An expression in which variables are substituted. substitution: A substitution dictionary. The key is the name of the variable, the value either an expression or a list of expression to use as a replacement for the variable. Returns: The expression resulting from applying the substitution. """ if isinstance(expression, Pattern): expression = expression.expression return _substitute(expression, substitution)[0]
python
def substitute(expression: Union[Expression, Pattern], substitution: Substitution) -> Replacement: """Replaces variables in the given *expression* using the given *substitution*. >>> print(substitute(f(x_), {'x': a})) f(a) If nothing was substituted, the original expression is returned: >>> expression = f(x_) >>> result = substitute(expression, {'y': a}) >>> print(result) f(x_) >>> expression is result True Note that this function returns a list of expressions iff the expression is a variable and its substitution is a list of expressions. In other cases were a substitution is a list of expressions, the expressions will be integrated as operands in the surrounding operation: >>> print(substitute(f(x_, c), {'x': [a, b]})) f(a, b, c) If you substitute with a `Multiset` of values, they will be sorted: >>> replacement = Multiset([b, a, b]) >>> print(substitute(f(x_, c), {'x': replacement})) f(a, b, b, c) Parameters: expression: An expression in which variables are substituted. substitution: A substitution dictionary. The key is the name of the variable, the value either an expression or a list of expression to use as a replacement for the variable. Returns: The expression resulting from applying the substitution. """ if isinstance(expression, Pattern): expression = expression.expression return _substitute(expression, substitution)[0]
Replaces variables in the given *expression* using the given *substitution*. >>> print(substitute(f(x_), {'x': a})) f(a) If nothing was substituted, the original expression is returned: >>> expression = f(x_) >>> result = substitute(expression, {'y': a}) >>> print(result) f(x_) >>> expression is result True Note that this function returns a list of expressions iff the expression is a variable and its substitution is a list of expressions. In other cases were a substitution is a list of expressions, the expressions will be integrated as operands in the surrounding operation: >>> print(substitute(f(x_, c), {'x': [a, b]})) f(a, b, c) If you substitute with a `Multiset` of values, they will be sorted: >>> replacement = Multiset([b, a, b]) >>> print(substitute(f(x_, c), {'x': replacement})) f(a, b, b, c) Parameters: expression: An expression in which variables are substituted. substitution: A substitution dictionary. The key is the name of the variable, the value either an expression or a list of expression to use as a replacement for the variable. Returns: The expression resulting from applying the substitution.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/functions.py#L30-L71
HPAC/matchpy
matchpy/functions.py
replace
def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement: r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`. The original `expression` itself is not modified, but a modified copy is returned. If the replacement is a list of expressions, it will be expanded into the list of operands of the respective operation: >>> print(replace(f(a), (0, ), [b, c])) f(b, c) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. position: A tuple of indices, e.g. the empty tuple refers to the `expression` itself, `(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first child of the first child etc. replacement: Either an :class:`Expression` or a list of :class:`Expression`\s to be inserted into the `expression` instead of the original expression at that `position`. Returns: The resulting expression from the replacement. Raises: IndexError: If the position is invalid or out of range. """ if len(position) == 0: return replacement if not isinstance(expression, Operation): raise IndexError("Invalid position {!r} for expression {!s}".format(position, expression)) if position[0] >= op_len(expression): raise IndexError("Position {!r} out of range for expression {!s}".format(position, expression)) pos = position[0] operands = list(op_iter(expression)) subexpr = replace(operands[pos], position[1:], replacement) if isinstance(subexpr, Sequence): new_operands = tuple(operands[:pos]) + tuple(subexpr) + tuple(operands[pos + 1:]) return create_operation_expression(expression, new_operands) operands[pos] = subexpr return create_operation_expression(expression, operands)
python
def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement: r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`. The original `expression` itself is not modified, but a modified copy is returned. If the replacement is a list of expressions, it will be expanded into the list of operands of the respective operation: >>> print(replace(f(a), (0, ), [b, c])) f(b, c) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. position: A tuple of indices, e.g. the empty tuple refers to the `expression` itself, `(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first child of the first child etc. replacement: Either an :class:`Expression` or a list of :class:`Expression`\s to be inserted into the `expression` instead of the original expression at that `position`. Returns: The resulting expression from the replacement. Raises: IndexError: If the position is invalid or out of range. """ if len(position) == 0: return replacement if not isinstance(expression, Operation): raise IndexError("Invalid position {!r} for expression {!s}".format(position, expression)) if position[0] >= op_len(expression): raise IndexError("Position {!r} out of range for expression {!s}".format(position, expression)) pos = position[0] operands = list(op_iter(expression)) subexpr = replace(operands[pos], position[1:], replacement) if isinstance(subexpr, Sequence): new_operands = tuple(operands[:pos]) + tuple(subexpr) + tuple(operands[pos + 1:]) return create_operation_expression(expression, new_operands) operands[pos] = subexpr return create_operation_expression(expression, operands)
r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`. The original `expression` itself is not modified, but a modified copy is returned. If the replacement is a list of expressions, it will be expanded into the list of operands of the respective operation: >>> print(replace(f(a), (0, ), [b, c])) f(b, c) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. position: A tuple of indices, e.g. the empty tuple refers to the `expression` itself, `(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first child of the first child etc. replacement: Either an :class:`Expression` or a list of :class:`Expression`\s to be inserted into the `expression` instead of the original expression at that `position`. Returns: The resulting expression from the replacement. Raises: IndexError: If the position is invalid or out of range.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/functions.py#L96-L135
HPAC/matchpy
matchpy/functions.py
replace_many
def replace_many(expression: Expression, replacements: Sequence[Tuple[Sequence[int], Replacement]]) -> Replacement: r"""Replaces the subexpressions of *expression* at the given positions with the given replacements. The original *expression* itself is not modified, but a modified copy is returned. If the replacement is a sequence of expressions, it will be expanded into the list of operands of the respective operation. This function works the same as `replace`, but allows multiple positions to be replaced at the same time. However, compared to just replacing each position individually with `replace`, this does work when positions are modified due to replacing a position with a sequence: >>> expr = f(a, b) >>> expected_result = replace_many(expr, [((0, ), [c, c]), ((1, ), a)]) >>> print(expected_result) f(c, c, a) However, using `replace` for one position at a time gives the wrong result: >>> step1 = replace(expr, (0, ), [c, c]) >>> print(step1) f(c, c, b) >>> step2 = replace(step1, (1, ), a) >>> print(step2) f(c, a, b) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. replacements: A collection of tuples consisting of a position in the expression and a replacement for that position. With just a single replacement pair, this is equivalent to using `replace`: >>> replace(a, (), b) == replace_many(a, [((), b)]) True Returns: The resulting expression from the replacements. Raises: IndexError: If a position is invalid or out of range or if you try to replace a subterm of a term you are already replacing. """ if len(replacements) == 0: return expression replacements = sorted(replacements) if len(replacements[0][0]) == 0: if len(replacements) > 1: raise IndexError( "Cannot replace child positions for expression {}, got {!r}".format(expression, replacements[1:]) ) return replacements[0][1] if len(replacements) == 1: return replace(expression, replacements[0][0], replacements[0][1]) if not isinstance(expression, Operation): raise IndexError("Invalid replacements {!r} for expression {!s}".format(replacements, expression)) operands = list(op_iter(expression)) new_operands = [] last_index = 0 for index, group in itertools.groupby(replacements, lambda r: r[0][0]): new_operands.extend(operands[last_index:index]) replacements = [(pos[1:], r) for pos, r in group] if len(replacements) == 1: replacement = replace(operands[index], replacements[0][0], replacements[0][1]) else: replacement = replace_many(operands[index], replacements) if isinstance(replacement, (list, tuple, Multiset)): new_operands.extend(replacement) else: new_operands.append(replacement) last_index = index + 1 new_operands.extend(operands[last_index:len(operands)]) return create_operation_expression(expression, new_operands)
python
def replace_many(expression: Expression, replacements: Sequence[Tuple[Sequence[int], Replacement]]) -> Replacement: r"""Replaces the subexpressions of *expression* at the given positions with the given replacements. The original *expression* itself is not modified, but a modified copy is returned. If the replacement is a sequence of expressions, it will be expanded into the list of operands of the respective operation. This function works the same as `replace`, but allows multiple positions to be replaced at the same time. However, compared to just replacing each position individually with `replace`, this does work when positions are modified due to replacing a position with a sequence: >>> expr = f(a, b) >>> expected_result = replace_many(expr, [((0, ), [c, c]), ((1, ), a)]) >>> print(expected_result) f(c, c, a) However, using `replace` for one position at a time gives the wrong result: >>> step1 = replace(expr, (0, ), [c, c]) >>> print(step1) f(c, c, b) >>> step2 = replace(step1, (1, ), a) >>> print(step2) f(c, a, b) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. replacements: A collection of tuples consisting of a position in the expression and a replacement for that position. With just a single replacement pair, this is equivalent to using `replace`: >>> replace(a, (), b) == replace_many(a, [((), b)]) True Returns: The resulting expression from the replacements. Raises: IndexError: If a position is invalid or out of range or if you try to replace a subterm of a term you are already replacing. """ if len(replacements) == 0: return expression replacements = sorted(replacements) if len(replacements[0][0]) == 0: if len(replacements) > 1: raise IndexError( "Cannot replace child positions for expression {}, got {!r}".format(expression, replacements[1:]) ) return replacements[0][1] if len(replacements) == 1: return replace(expression, replacements[0][0], replacements[0][1]) if not isinstance(expression, Operation): raise IndexError("Invalid replacements {!r} for expression {!s}".format(replacements, expression)) operands = list(op_iter(expression)) new_operands = [] last_index = 0 for index, group in itertools.groupby(replacements, lambda r: r[0][0]): new_operands.extend(operands[last_index:index]) replacements = [(pos[1:], r) for pos, r in group] if len(replacements) == 1: replacement = replace(operands[index], replacements[0][0], replacements[0][1]) else: replacement = replace_many(operands[index], replacements) if isinstance(replacement, (list, tuple, Multiset)): new_operands.extend(replacement) else: new_operands.append(replacement) last_index = index + 1 new_operands.extend(operands[last_index:len(operands)]) return create_operation_expression(expression, new_operands)
r"""Replaces the subexpressions of *expression* at the given positions with the given replacements. The original *expression* itself is not modified, but a modified copy is returned. If the replacement is a sequence of expressions, it will be expanded into the list of operands of the respective operation. This function works the same as `replace`, but allows multiple positions to be replaced at the same time. However, compared to just replacing each position individually with `replace`, this does work when positions are modified due to replacing a position with a sequence: >>> expr = f(a, b) >>> expected_result = replace_many(expr, [((0, ), [c, c]), ((1, ), a)]) >>> print(expected_result) f(c, c, a) However, using `replace` for one position at a time gives the wrong result: >>> step1 = replace(expr, (0, ), [c, c]) >>> print(step1) f(c, c, b) >>> step2 = replace(step1, (1, ), a) >>> print(step2) f(c, a, b) Parameters: expression: An :class:`Expression` where a (sub)expression is to be replaced. replacements: A collection of tuples consisting of a position in the expression and a replacement for that position. With just a single replacement pair, this is equivalent to using `replace`: >>> replace(a, (), b) == replace_many(a, [((), b)]) True Returns: The resulting expression from the replacements. Raises: IndexError: If a position is invalid or out of range or if you try to replace a subterm of a term you are already replacing.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/functions.py#L138-L208
HPAC/matchpy
matchpy/functions.py
replace_all
def replace_all(expression: Expression, rules: Iterable[ReplacementRule], max_count: int=math.inf) \ -> Union[Expression, Sequence[Expression]]: """Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule. """ rules = [ReplacementRule(pattern, replacement) for pattern, replacement in rules] expression = expression replaced = True replace_count = 0 while replaced and replace_count < max_count: replaced = False for subexpr, pos in preorder_iter_with_position(expression): for pattern, replacement in rules: try: subst = next(match(subexpr, pattern)) result = replacement(**subst) expression = replace(expression, pos, result) replaced = True break except StopIteration: pass if replaced: break replace_count += 1 return expression
python
def replace_all(expression: Expression, rules: Iterable[ReplacementRule], max_count: int=math.inf) \ -> Union[Expression, Sequence[Expression]]: """Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule. """ rules = [ReplacementRule(pattern, replacement) for pattern, replacement in rules] expression = expression replaced = True replace_count = 0 while replaced and replace_count < max_count: replaced = False for subexpr, pos in preorder_iter_with_position(expression): for pattern, replacement in rules: try: subst = next(match(subexpr, pattern)) result = replacement(**subst) expression = replace(expression, pos, result) replaced = True break except StopIteration: pass if replaced: break replace_count += 1 return expression
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/functions.py#L214-L261
HPAC/matchpy
matchpy/functions.py
replace_all_post_order
def replace_all_post_order(expression: Expression, rules: Iterable[ReplacementRule]) \ -> Union[Expression, Sequence[Expression]]: """Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule. """ return _replace_all_post_order(expression, rules)[0]
python
def replace_all_post_order(expression: Expression, rules: Iterable[ReplacementRule]) \ -> Union[Expression, Sequence[Expression]]: """Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule. """ return _replace_all_post_order(expression, rules)[0]
Replace all occurrences of the patterns according to the replacement rules. A replacement rule consists of a *pattern*, that is matched against any subexpression of the expression. If a match is found, the *replacement* callback of the rule is called with the variables from the match substitution. Whatever the callback returns is used as a replacement for the matched subexpression. This can either be a single expression or a sequence of expressions, which is then integrated into the surrounding operation in place of the subexpression. Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions will be matched. Args: expression: The expression to which the replacement rules are applied. rules: A collection of replacement rules that are applied to the expression. max_count: If given, at most *max_count* applications of the rules are performed. Otherwise, the rules are applied until there is no more match. If the set of replacement rules is not confluent, the replacement might not terminate without a *max_count* set. Returns: The resulting expression after the application of the replacement rules. This can also be a sequence of expressions, if the root expression is replaced with a sequence of expressions by a rule.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/functions.py#L264-L291
HPAC/matchpy
matchpy/functions.py
is_match
def is_match(subject: Expression, pattern: Expression) -> bool: """ Check whether the given *subject* matches given *pattern*. Args: subject: The subject. pattern: The pattern. Returns: True iff the subject matches the pattern. """ return any(True for _ in match(subject, pattern))
python
def is_match(subject: Expression, pattern: Expression) -> bool: """ Check whether the given *subject* matches given *pattern*. Args: subject: The subject. pattern: The pattern. Returns: True iff the subject matches the pattern. """ return any(True for _ in match(subject, pattern))
Check whether the given *subject* matches given *pattern*. Args: subject: The subject. pattern: The pattern. Returns: True iff the subject matches the pattern.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/functions.py#L315-L328
HPAC/matchpy
matchpy/matching/bipartite.py
BipartiteGraph.as_graph
def as_graph(self) -> Graph: # pragma: no cover """Returns a :class:`graphviz.Graph` representation of this bipartite graph.""" if Graph is None: raise ImportError('The graphviz package is required to draw the graph.') graph = Graph() nodes_left = {} # type: Dict[TLeft, str] nodes_right = {} # type: Dict[TRight, str] node_id = 0 for (left, right), value in self._edges.items(): if left not in nodes_left: name = 'node{:d}'.format(node_id) nodes_left[left] = name graph.node(name, label=str(left)) node_id += 1 if right not in nodes_right: name = 'node{:d}'.format(node_id) nodes_right[right] = name graph.node(name, label=str(right)) node_id += 1 edge_label = value is not True and str(value) or '' graph.edge(nodes_left[left], nodes_right[right], edge_label) return graph
python
def as_graph(self) -> Graph: # pragma: no cover """Returns a :class:`graphviz.Graph` representation of this bipartite graph.""" if Graph is None: raise ImportError('The graphviz package is required to draw the graph.') graph = Graph() nodes_left = {} # type: Dict[TLeft, str] nodes_right = {} # type: Dict[TRight, str] node_id = 0 for (left, right), value in self._edges.items(): if left not in nodes_left: name = 'node{:d}'.format(node_id) nodes_left[left] = name graph.node(name, label=str(left)) node_id += 1 if right not in nodes_right: name = 'node{:d}'.format(node_id) nodes_right[right] = name graph.node(name, label=str(right)) node_id += 1 edge_label = value is not True and str(value) or '' graph.edge(nodes_left[left], nodes_right[right], edge_label) return graph
Returns a :class:`graphviz.Graph` representation of this bipartite graph.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L121-L142
HPAC/matchpy
matchpy/matching/bipartite.py
BipartiteGraph.find_matching
def find_matching(self) -> Dict[TLeft, TRight]: """Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part of the graph and the value from te right part. """ # The directed graph is represented as a dictionary of edges # The key is the tail of all edges which are represented by the value # The value is a set of heads for the all edges originating from the tail (key) # In addition, the graph stores which part of the bipartite graph a node originated from # to avoid problems when a value exists in both halfs. # Only one direction of the undirected edge is needed for the HopcroftKarp class directed_graph = {} # type: Dict[Tuple[int, TLeft], Set[Tuple[int, TRight]]] for (left, right) in self._edges: tail = (LEFT, left) head = (RIGHT, right) if tail not in directed_graph: directed_graph[tail] = {head} else: directed_graph[tail].add(head) matching = HopcroftKarp(directed_graph).maximum_matching() # Filter out the partitions (LEFT and RIGHT) and only return the matching edges # that go from LEFT to RIGHT return dict((tail[1], head[1]) for tail, head in matching.items() if tail[0] == LEFT)
python
def find_matching(self) -> Dict[TLeft, TRight]: """Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part of the graph and the value from te right part. """ # The directed graph is represented as a dictionary of edges # The key is the tail of all edges which are represented by the value # The value is a set of heads for the all edges originating from the tail (key) # In addition, the graph stores which part of the bipartite graph a node originated from # to avoid problems when a value exists in both halfs. # Only one direction of the undirected edge is needed for the HopcroftKarp class directed_graph = {} # type: Dict[Tuple[int, TLeft], Set[Tuple[int, TRight]]] for (left, right) in self._edges: tail = (LEFT, left) head = (RIGHT, right) if tail not in directed_graph: directed_graph[tail] = {head} else: directed_graph[tail].add(head) matching = HopcroftKarp(directed_graph).maximum_matching() # Filter out the partitions (LEFT and RIGHT) and only return the matching edges # that go from LEFT to RIGHT return dict((tail[1], head[1]) for tail, head in matching.items() if tail[0] == LEFT)
Finds a matching in the bipartite graph. This is done using the Hopcroft-Karp algorithm with an implementation from the `hopcroftkarp` package. Returns: A dictionary where each edge of the matching is represented by a key-value pair with the key being from the left part of the graph and the value from te right part.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L144-L174
HPAC/matchpy
matchpy/matching/bipartite.py
BipartiteGraph.without_nodes
def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed.""" return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1])
python
def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed.""" return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1])
Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L176-L178
HPAC/matchpy
matchpy/matching/bipartite.py
BipartiteGraph.without_edge
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge removed.""" return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2)
python
def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns a copy of this bipartite graph with the given edge removed.""" return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2)
Returns a copy of this bipartite graph with the given edge removed.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L180-L182
HPAC/matchpy
matchpy/matching/bipartite.py
BipartiteGraph.limited_to
def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns the induced subgraph where only the nodes from the given sets are included.""" return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right)
python
def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]': """Returns the induced subgraph where only the nodes from the given sets are included.""" return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right)
Returns the induced subgraph where only the nodes from the given sets are included.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L184-L186
HPAC/matchpy
matchpy/matching/bipartite.py
_DirectedMatchGraph.as_graph
def as_graph(self) -> Digraph: # pragma: no cover """Returns a :class:`graphviz.Digraph` representation of this directed match graph.""" if Digraph is None: raise ImportError('The graphviz package is required to draw the graph.') graph = Digraph() subgraphs = [Digraph(graph_attr={'rank': 'same'}), Digraph(graph_attr={'rank': 'same'})] nodes = [{}, {}] # type: List[Dict[Union[TLeft, TRight], str]] edges = [] # type: List [Tuple[str, str]] node_id = 0 for (tail_part, tail), head_set in self.items(): if tail not in nodes[tail_part]: name = 'node{:d}'.format(node_id) nodes[tail_part][tail] = name subgraphs[tail_part].node(name, label=str(tail)) node_id += 1 for head_part, head in head_set: if head not in nodes[head_part]: name = 'node{:d}'.format(node_id) nodes[head_part][head] = name subgraphs[head_part].node(name, label=str(head)) node_id += 1 edges.append((nodes[tail_part][tail], nodes[head_part][head])) graph.subgraph(subgraphs[0]) graph.subgraph(subgraphs[1]) for tail_node, head_node in edges: graph.edge(tail_node, head_node) return graph
python
def as_graph(self) -> Digraph: # pragma: no cover """Returns a :class:`graphviz.Digraph` representation of this directed match graph.""" if Digraph is None: raise ImportError('The graphviz package is required to draw the graph.') graph = Digraph() subgraphs = [Digraph(graph_attr={'rank': 'same'}), Digraph(graph_attr={'rank': 'same'})] nodes = [{}, {}] # type: List[Dict[Union[TLeft, TRight], str]] edges = [] # type: List [Tuple[str, str]] node_id = 0 for (tail_part, tail), head_set in self.items(): if tail not in nodes[tail_part]: name = 'node{:d}'.format(node_id) nodes[tail_part][tail] = name subgraphs[tail_part].node(name, label=str(tail)) node_id += 1 for head_part, head in head_set: if head not in nodes[head_part]: name = 'node{:d}'.format(node_id) nodes[head_part][head] = name subgraphs[head_part].node(name, label=str(head)) node_id += 1 edges.append((nodes[tail_part][tail], nodes[head_part][head])) graph.subgraph(subgraphs[0]) graph.subgraph(subgraphs[1]) for tail_node, head_node in edges: graph.edge(tail_node, head_node) return graph
Returns a :class:`graphviz.Digraph` representation of this directed match graph.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L203-L230
HPAC/matchpy
matchpy/expressions/functions.py
is_constant
def is_constant(expression): """Check if the given expression is constant, i.e. it does not contain Wildcards.""" if isinstance(expression, Wildcard): return False if isinstance(expression, Expression): return expression.is_constant if isinstance(expression, Operation): return all(is_constant(o) for o in op_iter(expression)) return True
python
def is_constant(expression): """Check if the given expression is constant, i.e. it does not contain Wildcards.""" if isinstance(expression, Wildcard): return False if isinstance(expression, Expression): return expression.is_constant if isinstance(expression, Operation): return all(is_constant(o) for o in op_iter(expression)) return True
Check if the given expression is constant, i.e. it does not contain Wildcards.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L15-L23
HPAC/matchpy
matchpy/expressions/functions.py
is_syntactic
def is_syntactic(expression): """ Check if the given expression is syntactic, i.e. it does not contain sequence wildcards or associative/commutative operations. """ if isinstance(expression, Wildcard): return expression.fixed_size if isinstance(expression, Expression): return expression.is_syntactic if isinstance(expression, (AssociativeOperation, CommutativeOperation)): return False if isinstance(expression, Operation): return all(is_syntactic(o) for o in op_iter(expression)) return True
python
def is_syntactic(expression): """ Check if the given expression is syntactic, i.e. it does not contain sequence wildcards or associative/commutative operations. """ if isinstance(expression, Wildcard): return expression.fixed_size if isinstance(expression, Expression): return expression.is_syntactic if isinstance(expression, (AssociativeOperation, CommutativeOperation)): return False if isinstance(expression, Operation): return all(is_syntactic(o) for o in op_iter(expression)) return True
Check if the given expression is syntactic, i.e. it does not contain sequence wildcards or associative/commutative operations.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L26-L39
HPAC/matchpy
matchpy/expressions/functions.py
get_head
def get_head(expression): """Returns the given expression's head.""" if isinstance(expression, Wildcard): if isinstance(expression, SymbolWildcard): return expression.symbol_type return None return type(expression)
python
def get_head(expression): """Returns the given expression's head.""" if isinstance(expression, Wildcard): if isinstance(expression, SymbolWildcard): return expression.symbol_type return None return type(expression)
Returns the given expression's head.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L42-L48
HPAC/matchpy
matchpy/expressions/functions.py
match_head
def match_head(subject, pattern): """Checks if the head of subject matches the pattern's head.""" if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern) if pattern_head is None: return True if issubclass(pattern_head, OneIdentityOperation): return True subject_head = get_head(subject) assert subject_head is not None return issubclass(subject_head, pattern_head)
python
def match_head(subject, pattern): """Checks if the head of subject matches the pattern's head.""" if isinstance(pattern, Pattern): pattern = pattern.expression pattern_head = get_head(pattern) if pattern_head is None: return True if issubclass(pattern_head, OneIdentityOperation): return True subject_head = get_head(subject) assert subject_head is not None return issubclass(subject_head, pattern_head)
Checks if the head of subject matches the pattern's head.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L51-L62
HPAC/matchpy
matchpy/expressions/functions.py
preorder_iter
def preorder_iter(expression): """Iterate over the expression in preorder.""" yield expression if isinstance(expression, Operation): for operand in op_iter(expression): yield from preorder_iter(operand)
python
def preorder_iter(expression): """Iterate over the expression in preorder.""" yield expression if isinstance(expression, Operation): for operand in op_iter(expression): yield from preorder_iter(operand)
Iterate over the expression in preorder.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L65-L70
HPAC/matchpy
matchpy/expressions/functions.py
preorder_iter_with_position
def preorder_iter_with_position(expression): """Iterate over the expression in preorder. Also yields the position of each subexpression. """ yield expression, () if isinstance(expression, Operation): for i, operand in enumerate(op_iter(expression)): for child, pos in preorder_iter_with_position(operand): yield child, (i, ) + pos
python
def preorder_iter_with_position(expression): """Iterate over the expression in preorder. Also yields the position of each subexpression. """ yield expression, () if isinstance(expression, Operation): for i, operand in enumerate(op_iter(expression)): for child, pos in preorder_iter_with_position(operand): yield child, (i, ) + pos
Iterate over the expression in preorder. Also yields the position of each subexpression.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L73-L82
HPAC/matchpy
matchpy/expressions/functions.py
is_anonymous
def is_anonymous(expression): """Returns True iff the expression does not contain any variables.""" if hasattr(expression, 'variable_name') and expression.variable_name: return False if isinstance(expression, Operation): return all(is_anonymous(o) for o in op_iter(expression)) return True
python
def is_anonymous(expression): """Returns True iff the expression does not contain any variables.""" if hasattr(expression, 'variable_name') and expression.variable_name: return False if isinstance(expression, Operation): return all(is_anonymous(o) for o in op_iter(expression)) return True
Returns True iff the expression does not contain any variables.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L85-L91
HPAC/matchpy
matchpy/expressions/functions.py
contains_variables_from_set
def contains_variables_from_set(expression, variables): """Returns True iff the expression contains any of the variables from the given set.""" if hasattr(expression, 'variable_name') and expression.variable_name in variables: return True if isinstance(expression, Operation): return any(contains_variables_from_set(o, variables) for o in op_iter(expression)) return False
python
def contains_variables_from_set(expression, variables): """Returns True iff the expression contains any of the variables from the given set.""" if hasattr(expression, 'variable_name') and expression.variable_name in variables: return True if isinstance(expression, Operation): return any(contains_variables_from_set(o, variables) for o in op_iter(expression)) return False
Returns True iff the expression contains any of the variables from the given set.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L94-L100
HPAC/matchpy
matchpy/expressions/functions.py
get_variables
def get_variables(expression, variables=None): """Returns the set of variable names in the given expression.""" if variables is None: variables = set() if hasattr(expression, 'variable_name') and expression.variable_name is not None: variables.add(expression.variable_name) if isinstance(expression, Operation): for operand in op_iter(expression): get_variables(operand, variables) return variables
python
def get_variables(expression, variables=None): """Returns the set of variable names in the given expression.""" if variables is None: variables = set() if hasattr(expression, 'variable_name') and expression.variable_name is not None: variables.add(expression.variable_name) if isinstance(expression, Operation): for operand in op_iter(expression): get_variables(operand, variables) return variables
Returns the set of variable names in the given expression.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L103-L112
HPAC/matchpy
matchpy/expressions/functions.py
rename_variables
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: """Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables. """ if isinstance(expression, Operation): if hasattr(expression, 'variable_name'): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name ) operands = [rename_variables(o, renaming) for o in op_iter(expression)] return create_operation_expression(expression, operands) elif isinstance(expression, Expression): expression = expression.__copy__() expression.variable_name = renaming.get(expression.variable_name, expression.variable_name) return expression
python
def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: """Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables. """ if isinstance(expression, Operation): if hasattr(expression, 'variable_name'): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name ) operands = [rename_variables(o, renaming) for o in op_iter(expression)] return create_operation_expression(expression, operands) elif isinstance(expression, Expression): expression = expression.__copy__() expression.variable_name = renaming.get(expression.variable_name, expression.variable_name) return expression
Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L115-L139
HPAC/matchpy
matchpy/utils.py
fixed_integer_vector_iter
def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]: """ Return an iterator over the integer vectors which - are componentwise less than or equal to *max_vector*, and - are non-negative, and where - the sum of their components is exactly *vector_sum*. The iterator yields the vectors in lexicographical order. Examples: List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2: >>> vectors = list(fixed_integer_vector_iter([2, 2], 2)) >>> vectors [(0, 2), (1, 1), (2, 0)] >>> list(map(sum, vectors)) [2, 2, 2] Args: max_vector: Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise. vector_sum: Every iterated vector will have a component sum equal to this value. Yields: All non-negative vectors that have the given sum and are not larger than the given maximum. Raises: ValueError: If *vector_sum* is negative. """ if vector_sum < 0: raise ValueError("Vector sum must not be negative") if len(max_vector) == 0: if vector_sum == 0: yield tuple() return total = sum(max_vector) if vector_sum <= total: start = max(max_vector[0] + vector_sum - total, 0) end = min(max_vector[0], vector_sum) for j in range(start, end + 1): for vec in fixed_integer_vector_iter(max_vector[1:], vector_sum - j): yield (j, ) + vec
python
def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]: """ Return an iterator over the integer vectors which - are componentwise less than or equal to *max_vector*, and - are non-negative, and where - the sum of their components is exactly *vector_sum*. The iterator yields the vectors in lexicographical order. Examples: List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2: >>> vectors = list(fixed_integer_vector_iter([2, 2], 2)) >>> vectors [(0, 2), (1, 1), (2, 0)] >>> list(map(sum, vectors)) [2, 2, 2] Args: max_vector: Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise. vector_sum: Every iterated vector will have a component sum equal to this value. Yields: All non-negative vectors that have the given sum and are not larger than the given maximum. Raises: ValueError: If *vector_sum* is negative. """ if vector_sum < 0: raise ValueError("Vector sum must not be negative") if len(max_vector) == 0: if vector_sum == 0: yield tuple() return total = sum(max_vector) if vector_sum <= total: start = max(max_vector[0] + vector_sum - total, 0) end = min(max_vector[0], vector_sum) for j in range(start, end + 1): for vec in fixed_integer_vector_iter(max_vector[1:], vector_sum - j): yield (j, ) + vec
Return an iterator over the integer vectors which - are componentwise less than or equal to *max_vector*, and - are non-negative, and where - the sum of their components is exactly *vector_sum*. The iterator yields the vectors in lexicographical order. Examples: List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2: >>> vectors = list(fixed_integer_vector_iter([2, 2], 2)) >>> vectors [(0, 2), (1, 1), (2, 0)] >>> list(map(sum, vectors)) [2, 2, 2] Args: max_vector: Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise. vector_sum: Every iterated vector will have a component sum equal to this value. Yields: All non-negative vectors that have the given sum and are not larger than the given maximum. Raises: ValueError: If *vector_sum* is negative.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L30-L75
HPAC/matchpy
matchpy/utils.py
weak_composition_iter
def weak_composition_iter(n: int, num_parts: int) -> Iterator[Tuple[int, ...]]: """Yield all weak compositions of integer *n* into *num_parts* parts. Each composition is yielded as a tuple. The generated partitions are order-dependant and not unique when ignoring the order of the components. The partitions are yielded in lexicographical order. Example: >>> compositions = list(weak_composition_iter(5, 2)) >>> compositions [(0, 5), (1, 4), (2, 3), (3, 2), (4, 1), (5, 0)] We can easily verify that all compositions are indeed valid: >>> list(map(sum, compositions)) [5, 5, 5, 5, 5, 5] The algorithm was adapted from an answer to this `Stackoverflow question`_. Args: n: The integer to partition. num_parts: The number of parts for the combination. Yields: All non-negative tuples that have the given sum and size. Raises: ValueError: If *n* or *num_parts* are negative. .. _Stackoverflow question: http://stackoverflow.com/questions/40538923/40540014#40540014 """ if n < 0: raise ValueError("Total must not be negative") if num_parts < 0: raise ValueError("Number of num_parts must not be negative") if num_parts == 0: if n == 0: yield tuple() return m = n + num_parts - 1 last = (m, ) first = (-1, ) for t in itertools.combinations(range(m), num_parts - 1): yield tuple(v - u - 1 for u, v in zip(first + t, t + last))
python
def weak_composition_iter(n: int, num_parts: int) -> Iterator[Tuple[int, ...]]: """Yield all weak compositions of integer *n* into *num_parts* parts. Each composition is yielded as a tuple. The generated partitions are order-dependant and not unique when ignoring the order of the components. The partitions are yielded in lexicographical order. Example: >>> compositions = list(weak_composition_iter(5, 2)) >>> compositions [(0, 5), (1, 4), (2, 3), (3, 2), (4, 1), (5, 0)] We can easily verify that all compositions are indeed valid: >>> list(map(sum, compositions)) [5, 5, 5, 5, 5, 5] The algorithm was adapted from an answer to this `Stackoverflow question`_. Args: n: The integer to partition. num_parts: The number of parts for the combination. Yields: All non-negative tuples that have the given sum and size. Raises: ValueError: If *n* or *num_parts* are negative. .. _Stackoverflow question: http://stackoverflow.com/questions/40538923/40540014#40540014 """ if n < 0: raise ValueError("Total must not be negative") if num_parts < 0: raise ValueError("Number of num_parts must not be negative") if num_parts == 0: if n == 0: yield tuple() return m = n + num_parts - 1 last = (m, ) first = (-1, ) for t in itertools.combinations(range(m), num_parts - 1): yield tuple(v - u - 1 for u, v in zip(first + t, t + last))
Yield all weak compositions of integer *n* into *num_parts* parts. Each composition is yielded as a tuple. The generated partitions are order-dependant and not unique when ignoring the order of the components. The partitions are yielded in lexicographical order. Example: >>> compositions = list(weak_composition_iter(5, 2)) >>> compositions [(0, 5), (1, 4), (2, 3), (3, 2), (4, 1), (5, 0)] We can easily verify that all compositions are indeed valid: >>> list(map(sum, compositions)) [5, 5, 5, 5, 5, 5] The algorithm was adapted from an answer to this `Stackoverflow question`_. Args: n: The integer to partition. num_parts: The number of parts for the combination. Yields: All non-negative tuples that have the given sum and size. Raises: ValueError: If *n* or *num_parts* are negative. .. _Stackoverflow question: http://stackoverflow.com/questions/40538923/40540014#40540014
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L78-L124
HPAC/matchpy
matchpy/utils.py
commutative_sequence_variable_partition_iter
def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount] ) -> Iterator[Dict[str, Multiset]]: """Yield all possible variable substitutions for given values and variables. .. note:: The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until Python 3.6 do not keep track of the insertion order. Example: For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the following input parameters for the partitioning: >>> x = VariableWithCount(name='x', count=1, minimum=1, default=None) >>> y = VariableWithCount(name='y', count=2, minimum=0, default=None) >>> values = Multiset('aaabbc') Then the solutions are found (and sorted to get a unique output): >>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y]) >>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions) >>> for substitution in sorted(as_strings): ... print(substitution) {x ↦ {a, a, a, b, b, c}, y ↦ {}} {x ↦ {a, a, a, c}, y ↦ {b}} {x ↦ {a, b, b, c}, y ↦ {a}} {x ↦ {a, c}, y ↦ {a, b}} Args: values: The multiset of values which are partitioned and distributed among the variables. variables: A list of the variables to distribute the values among. Each variable has a name, a count of how many times it occurs and a minimum number of values it needs. Yields: Each possible substitutions that is a valid partitioning of the values among the variables. """ if len(variables) == 1: yield from _commutative_single_variable_partiton_iter(values, variables[0]) return generators = [] for value, count in values.items(): generators.append(_make_variable_generator_factory(value, count, variables)) initial = dict((var.name, Multiset()) for var in variables) # type: Dict[str, 'Multiset[T]'] for subst in generator_chain(initial, *generators): valid = True for var in variables: if var.default is not None and len(subst[var.name]) == 0: subst[var.name] = var.default elif len(subst[var.name]) < var.minimum: valid = False break if valid: if None in subst: del subst[None] yield subst
python
def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount] ) -> Iterator[Dict[str, Multiset]]: """Yield all possible variable substitutions for given values and variables. .. note:: The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until Python 3.6 do not keep track of the insertion order. Example: For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the following input parameters for the partitioning: >>> x = VariableWithCount(name='x', count=1, minimum=1, default=None) >>> y = VariableWithCount(name='y', count=2, minimum=0, default=None) >>> values = Multiset('aaabbc') Then the solutions are found (and sorted to get a unique output): >>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y]) >>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions) >>> for substitution in sorted(as_strings): ... print(substitution) {x ↦ {a, a, a, b, b, c}, y ↦ {}} {x ↦ {a, a, a, c}, y ↦ {b}} {x ↦ {a, b, b, c}, y ↦ {a}} {x ↦ {a, c}, y ↦ {a, b}} Args: values: The multiset of values which are partitioned and distributed among the variables. variables: A list of the variables to distribute the values among. Each variable has a name, a count of how many times it occurs and a minimum number of values it needs. Yields: Each possible substitutions that is a valid partitioning of the values among the variables. """ if len(variables) == 1: yield from _commutative_single_variable_partiton_iter(values, variables[0]) return generators = [] for value, count in values.items(): generators.append(_make_variable_generator_factory(value, count, variables)) initial = dict((var.name, Multiset()) for var in variables) # type: Dict[str, 'Multiset[T]'] for subst in generator_chain(initial, *generators): valid = True for var in variables: if var.default is not None and len(subst[var.name]) == 0: subst[var.name] = var.default elif len(subst[var.name]) < var.minimum: valid = False break if valid: if None in subst: del subst[None] yield subst
Yield all possible variable substitutions for given values and variables. .. note:: The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until Python 3.6 do not keep track of the insertion order. Example: For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the following input parameters for the partitioning: >>> x = VariableWithCount(name='x', count=1, minimum=1, default=None) >>> y = VariableWithCount(name='y', count=2, minimum=0, default=None) >>> values = Multiset('aaabbc') Then the solutions are found (and sorted to get a unique output): >>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y]) >>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions) >>> for substitution in sorted(as_strings): ... print(substitution) {x ↦ {a, a, a, b, b, c}, y ↦ {}} {x ↦ {a, a, a, c}, y ↦ {b}} {x ↦ {a, b, b, c}, y ↦ {a}} {x ↦ {a, c}, y ↦ {a, b}} Args: values: The multiset of values which are partitioned and distributed among the variables. variables: A list of the variables to distribute the values among. Each variable has a name, a count of how many times it occurs and a minimum number of values it needs. Yields: Each possible substitutions that is a valid partitioning of the values among the variables.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L173-L232
HPAC/matchpy
matchpy/utils.py
get_short_lambda_source
def get_short_lambda_source(lambda_func: LambdaType) -> Optional[str]: """Return the source of a (short) lambda function. If it's impossible to obtain, return ``None``. The source is returned without the ``lambda`` and signature parts: >>> get_short_lambda_source(lambda x, y: x < y) 'x < y' This should work well for most lambda definitions, however for multi-line or highly nested lambdas, the source extraction might not succeed. Args: lambda_func: The lambda function. Returns: The source of the lambda function without its signature. """ try: all_source_lines, lnum = inspect.findsource(lambda_func) source_lines, _ = inspect.getsourcelines(lambda_func) except (IOError, TypeError): return None all_source_lines = [l.rstrip('\r\n') for l in all_source_lines] block_end = lnum + len(source_lines) source_ast = None for i in range(lnum, -1, -1): try: block = all_source_lines[i:block_end] if block[0].startswith(' ') or block[0].startswith('\t'): block.insert(0, 'with 0:') source_ast = ast.parse(os.linesep.join(block)) except (SyntaxError, tokenize.TokenError): pass else: break nv = LambdaNodeVisitor(block) nv.visit(source_ast) lambda_code = lambda_func.__code__ for candidate_code, lambda_text in nv.lambdas: candidate_code = candidate_code.co_consts[0] # We don't check for direct equivalence since the flags can be different if (candidate_code.co_code == lambda_code.co_code and candidate_code.co_consts == lambda_code.co_consts and candidate_code.co_names == lambda_code.co_names and candidate_code.co_varnames == lambda_code.co_varnames and candidate_code.co_cellvars == lambda_code.co_cellvars and candidate_code.co_freevars == lambda_code.co_freevars): return lambda_text[lambda_text.index(':')+1:].strip() return None
python
def get_short_lambda_source(lambda_func: LambdaType) -> Optional[str]: """Return the source of a (short) lambda function. If it's impossible to obtain, return ``None``. The source is returned without the ``lambda`` and signature parts: >>> get_short_lambda_source(lambda x, y: x < y) 'x < y' This should work well for most lambda definitions, however for multi-line or highly nested lambdas, the source extraction might not succeed. Args: lambda_func: The lambda function. Returns: The source of the lambda function without its signature. """ try: all_source_lines, lnum = inspect.findsource(lambda_func) source_lines, _ = inspect.getsourcelines(lambda_func) except (IOError, TypeError): return None all_source_lines = [l.rstrip('\r\n') for l in all_source_lines] block_end = lnum + len(source_lines) source_ast = None for i in range(lnum, -1, -1): try: block = all_source_lines[i:block_end] if block[0].startswith(' ') or block[0].startswith('\t'): block.insert(0, 'with 0:') source_ast = ast.parse(os.linesep.join(block)) except (SyntaxError, tokenize.TokenError): pass else: break nv = LambdaNodeVisitor(block) nv.visit(source_ast) lambda_code = lambda_func.__code__ for candidate_code, lambda_text in nv.lambdas: candidate_code = candidate_code.co_consts[0] # We don't check for direct equivalence since the flags can be different if (candidate_code.co_code == lambda_code.co_code and candidate_code.co_consts == lambda_code.co_consts and candidate_code.co_names == lambda_code.co_names and candidate_code.co_varnames == lambda_code.co_varnames and candidate_code.co_cellvars == lambda_code.co_cellvars and candidate_code.co_freevars == lambda_code.co_freevars): return lambda_text[lambda_text.index(':')+1:].strip() return None
Return the source of a (short) lambda function. If it's impossible to obtain, return ``None``. The source is returned without the ``lambda`` and signature parts: >>> get_short_lambda_source(lambda x, y: x < y) 'x < y' This should work well for most lambda definitions, however for multi-line or highly nested lambdas, the source extraction might not succeed. Args: lambda_func: The lambda function. Returns: The source of the lambda function without its signature.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L270-L321
HPAC/matchpy
matchpy/utils.py
extended_euclid
def extended_euclid(a: int, b: int) -> Tuple[int, int, int]: """Extended Euclidean algorithm that computes the Bézout coefficients as well as :math:`gcd(a, b)` Returns ``x, y, d`` where *x* and *y* are a solution to :math:`ax + by = d` and :math:`d = gcd(a, b)`. *x* and *y* are a minimal pair of Bézout's coefficients. See `Extended Euclidean algorithm <https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm>`_ or `Bézout's identity <https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity>`_ for more information. Example: Compute the Bézout coefficients and GCD of 42 and 12: >>> a, b = 42, 12 >>> x, y, d = extended_euclid(a, b) >>> x, y, d (1, -3, 6) Verify the results: >>> import math >>> d == math.gcd(a, b) True >>> a * x + b * y == d True Args: a: The first integer. b: The second integer. Returns: A tuple with the Bézout coefficients and the greatest common divider of the arguments. """ if b == 0: return (1, 0, a) x0, y0, d = extended_euclid(b, a % b) x, y = y0, x0 - (a // b) * y0 return (x, y, d)
python
def extended_euclid(a: int, b: int) -> Tuple[int, int, int]: """Extended Euclidean algorithm that computes the Bézout coefficients as well as :math:`gcd(a, b)` Returns ``x, y, d`` where *x* and *y* are a solution to :math:`ax + by = d` and :math:`d = gcd(a, b)`. *x* and *y* are a minimal pair of Bézout's coefficients. See `Extended Euclidean algorithm <https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm>`_ or `Bézout's identity <https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity>`_ for more information. Example: Compute the Bézout coefficients and GCD of 42 and 12: >>> a, b = 42, 12 >>> x, y, d = extended_euclid(a, b) >>> x, y, d (1, -3, 6) Verify the results: >>> import math >>> d == math.gcd(a, b) True >>> a * x + b * y == d True Args: a: The first integer. b: The second integer. Returns: A tuple with the Bézout coefficients and the greatest common divider of the arguments. """ if b == 0: return (1, 0, a) x0, y0, d = extended_euclid(b, a % b) x, y = y0, x0 - (a // b) * y0 return (x, y, d)
Extended Euclidean algorithm that computes the Bézout coefficients as well as :math:`gcd(a, b)` Returns ``x, y, d`` where *x* and *y* are a solution to :math:`ax + by = d` and :math:`d = gcd(a, b)`. *x* and *y* are a minimal pair of Bézout's coefficients. See `Extended Euclidean algorithm <https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm>`_ or `Bézout's identity <https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity>`_ for more information. Example: Compute the Bézout coefficients and GCD of 42 and 12: >>> a, b = 42, 12 >>> x, y, d = extended_euclid(a, b) >>> x, y, d (1, -3, 6) Verify the results: >>> import math >>> d == math.gcd(a, b) True >>> a * x + b * y == d True Args: a: The first integer. b: The second integer. Returns: A tuple with the Bézout coefficients and the greatest common divider of the arguments.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L323-L364
HPAC/matchpy
matchpy/utils.py
base_solution_linear
def base_solution_linear(a: int, b: int, c: int) -> Iterator[Tuple[int, int]]: r"""Yield solutions for a basic linear Diophantine equation of the form :math:`ax + by = c`. First, the equation is normalized by dividing :math:`a, b, c` by their gcd. Then, the extended Euclidean algorithm (:func:`extended_euclid`) is used to find a base solution :math:`(x_0, y_0)`. All non-negative solutions are generated by using that the general solution is :math:`(x_0 + b t, y_0 - a t)`. Because the base solution is one of the minimal pairs of Bézout's coefficients, for all non-negative solutions either :math:`t \geq 0` or :math:`t \leq 0` must hold. Also, all the non-negative solutions are consecutive with respect to :math:`t`. Hence, by adding or subtracting :math:`a` resp. :math:`b` from the base solution, all non-negative solutions can be efficiently generated. Args: a: The first coefficient of the equation. b: The second coefficient of the equation. c: The constant of the equation. Yields: Each non-negative integer solution of the equation as a tuple ``(x, y)``. Raises: ValueError: If any of the coefficients is not a positive integer. """ if a <= 0 or b <= 0: raise ValueError('Coefficients a and b must be positive integers.') if c < 0: raise ValueError('Constant c must not be negative.') d = math.gcd(a, math.gcd(b, c)) a = a // d b = b // d c = c // d if c == 0: yield (0, 0) else: x0, y0, d = extended_euclid(a, b) # If c is not divisible by gcd(a, b), then there is no solution if c % d != 0: return x, y = c * x0, c * y0 if x <= 0: while y >= 0: if x >= 0: yield (x, y) x += b y -= a else: while x >= 0: if y >= 0: yield (x, y) x -= b y += a
python
def base_solution_linear(a: int, b: int, c: int) -> Iterator[Tuple[int, int]]: r"""Yield solutions for a basic linear Diophantine equation of the form :math:`ax + by = c`. First, the equation is normalized by dividing :math:`a, b, c` by their gcd. Then, the extended Euclidean algorithm (:func:`extended_euclid`) is used to find a base solution :math:`(x_0, y_0)`. All non-negative solutions are generated by using that the general solution is :math:`(x_0 + b t, y_0 - a t)`. Because the base solution is one of the minimal pairs of Bézout's coefficients, for all non-negative solutions either :math:`t \geq 0` or :math:`t \leq 0` must hold. Also, all the non-negative solutions are consecutive with respect to :math:`t`. Hence, by adding or subtracting :math:`a` resp. :math:`b` from the base solution, all non-negative solutions can be efficiently generated. Args: a: The first coefficient of the equation. b: The second coefficient of the equation. c: The constant of the equation. Yields: Each non-negative integer solution of the equation as a tuple ``(x, y)``. Raises: ValueError: If any of the coefficients is not a positive integer. """ if a <= 0 or b <= 0: raise ValueError('Coefficients a and b must be positive integers.') if c < 0: raise ValueError('Constant c must not be negative.') d = math.gcd(a, math.gcd(b, c)) a = a // d b = b // d c = c // d if c == 0: yield (0, 0) else: x0, y0, d = extended_euclid(a, b) # If c is not divisible by gcd(a, b), then there is no solution if c % d != 0: return x, y = c * x0, c * y0 if x <= 0: while y >= 0: if x >= 0: yield (x, y) x += b y -= a else: while x >= 0: if y >= 0: yield (x, y) x -= b y += a
r"""Yield solutions for a basic linear Diophantine equation of the form :math:`ax + by = c`. First, the equation is normalized by dividing :math:`a, b, c` by their gcd. Then, the extended Euclidean algorithm (:func:`extended_euclid`) is used to find a base solution :math:`(x_0, y_0)`. All non-negative solutions are generated by using that the general solution is :math:`(x_0 + b t, y_0 - a t)`. Because the base solution is one of the minimal pairs of Bézout's coefficients, for all non-negative solutions either :math:`t \geq 0` or :math:`t \leq 0` must hold. Also, all the non-negative solutions are consecutive with respect to :math:`t`. Hence, by adding or subtracting :math:`a` resp. :math:`b` from the base solution, all non-negative solutions can be efficiently generated. Args: a: The first coefficient of the equation. b: The second coefficient of the equation. c: The constant of the equation. Yields: Each non-negative integer solution of the equation as a tuple ``(x, y)``. Raises: ValueError: If any of the coefficients is not a positive integer.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L367-L428
HPAC/matchpy
matchpy/utils.py
solve_linear_diop
def solve_linear_diop(total: int, *coeffs: int) -> Iterator[Tuple[int, ...]]: r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of variables in each recursion: 1. Compute :math:`d := gcd(c_2, \dots , c_n)` 2. Solve :math:`c_1 x + d y = total` 3. Recursively solve :math:`c_2 x_2 + \dots + c_n x_n = y` for each solution for :math:`y` 4. Combine these solutions to form a solution for the whole equation Args: total: The constant of the equation. *coeffs: The coefficients :math:`c_i` of the equation. Yields: The non-negative integer solutions of the equation as a tuple :math:`(x_1, \dots, x_n)`. """ if len(coeffs) == 0: if total == 0: yield tuple() return if len(coeffs) == 1: if total % coeffs[0] == 0: yield (total // coeffs[0], ) return if len(coeffs) == 2: yield from base_solution_linear(coeffs[0], coeffs[1], total) return # calculate gcd(coeffs[1:]) remainder_gcd = math.gcd(coeffs[1], coeffs[2]) for coeff in coeffs[3:]: remainder_gcd = math.gcd(remainder_gcd, coeff) # solve coeffs[0] * x + remainder_gcd * y = total for coeff0_solution, remainder_gcd_solution in base_solution_linear(coeffs[0], remainder_gcd, total): new_coeffs = [c // remainder_gcd for c in coeffs[1:]] # use the solutions for y to solve the remaining variables recursively for remainder_solution in solve_linear_diop(remainder_gcd_solution, *new_coeffs): yield (coeff0_solution, ) + remainder_solution
python
def solve_linear_diop(total: int, *coeffs: int) -> Iterator[Tuple[int, ...]]: r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of variables in each recursion: 1. Compute :math:`d := gcd(c_2, \dots , c_n)` 2. Solve :math:`c_1 x + d y = total` 3. Recursively solve :math:`c_2 x_2 + \dots + c_n x_n = y` for each solution for :math:`y` 4. Combine these solutions to form a solution for the whole equation Args: total: The constant of the equation. *coeffs: The coefficients :math:`c_i` of the equation. Yields: The non-negative integer solutions of the equation as a tuple :math:`(x_1, \dots, x_n)`. """ if len(coeffs) == 0: if total == 0: yield tuple() return if len(coeffs) == 1: if total % coeffs[0] == 0: yield (total // coeffs[0], ) return if len(coeffs) == 2: yield from base_solution_linear(coeffs[0], coeffs[1], total) return # calculate gcd(coeffs[1:]) remainder_gcd = math.gcd(coeffs[1], coeffs[2]) for coeff in coeffs[3:]: remainder_gcd = math.gcd(remainder_gcd, coeff) # solve coeffs[0] * x + remainder_gcd * y = total for coeff0_solution, remainder_gcd_solution in base_solution_linear(coeffs[0], remainder_gcd, total): new_coeffs = [c // remainder_gcd for c in coeffs[1:]] # use the solutions for y to solve the remaining variables recursively for remainder_solution in solve_linear_diop(remainder_gcd_solution, *new_coeffs): yield (coeff0_solution, ) + remainder_solution
r"""Yield non-negative integer solutions of a linear Diophantine equation of the format :math:`c_1 x_1 + \dots + c_n x_n = total`. If there are at most two coefficients, :func:`base_solution_linear()` is used to find the solutions. Otherwise, the solutions are found recursively, by reducing the number of variables in each recursion: 1. Compute :math:`d := gcd(c_2, \dots , c_n)` 2. Solve :math:`c_1 x + d y = total` 3. Recursively solve :math:`c_2 x_2 + \dots + c_n x_n = y` for each solution for :math:`y` 4. Combine these solutions to form a solution for the whole equation Args: total: The constant of the equation. *coeffs: The coefficients :math:`c_i` of the equation. Yields: The non-negative integer solutions of the equation as a tuple :math:`(x_1, \dots, x_n)`.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L431-L474
HPAC/matchpy
matchpy/utils.py
generator_chain
def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]: """Chain multiple generators together by passing results from one to the next. This helper function allows to create a chain of generator where each generator is constructed by a factory that gets the data yielded by the previous generator. So each generator can generate new data dependant on the data yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the next factory. Example: Lets say for every number from 0 to 4, we want to count up to that number. Then we can do something like this using list comprehensions: >>> [i for n in range(1, 5) for i in range(1, n + 1)] [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] You can use this function to achieve the same thing: >>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1)))) [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] The advantage is, that this is independent of the number of dependant generators you have. Also, this function does not use recursion so it is safe to use even with large generator counts. Args: initial_data: The initial data that is passed to the first generator factory. *factories: The generator factories. Each of them gets passed its predecessors data and has to return an iterable. The data from this iterable is passed to the next factory. Yields: Every data item yielded by the generators of the final factory. """ generator_count = len(factories) if generator_count == 0: yield initial_data return generators = [None] * generator_count # type: List[Optional[Iterator[T]]] next_data = initial_data generator_index = 0 while True: try: while generator_index < generator_count: if generators[generator_index] is None: generators[generator_index] = factories[generator_index](next_data) next_data = next(generators[generator_index]) generator_index += 1 yield next_data generator_index -= 1 except StopIteration: generators[generator_index] = None generator_index -= 1 if generator_index < 0: break
python
def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]: """Chain multiple generators together by passing results from one to the next. This helper function allows to create a chain of generator where each generator is constructed by a factory that gets the data yielded by the previous generator. So each generator can generate new data dependant on the data yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the next factory. Example: Lets say for every number from 0 to 4, we want to count up to that number. Then we can do something like this using list comprehensions: >>> [i for n in range(1, 5) for i in range(1, n + 1)] [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] You can use this function to achieve the same thing: >>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1)))) [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] The advantage is, that this is independent of the number of dependant generators you have. Also, this function does not use recursion so it is safe to use even with large generator counts. Args: initial_data: The initial data that is passed to the first generator factory. *factories: The generator factories. Each of them gets passed its predecessors data and has to return an iterable. The data from this iterable is passed to the next factory. Yields: Every data item yielded by the generators of the final factory. """ generator_count = len(factories) if generator_count == 0: yield initial_data return generators = [None] * generator_count # type: List[Optional[Iterator[T]]] next_data = initial_data generator_index = 0 while True: try: while generator_index < generator_count: if generators[generator_index] is None: generators[generator_index] = factories[generator_index](next_data) next_data = next(generators[generator_index]) generator_index += 1 yield next_data generator_index -= 1 except StopIteration: generators[generator_index] = None generator_index -= 1 if generator_index < 0: break
Chain multiple generators together by passing results from one to the next. This helper function allows to create a chain of generator where each generator is constructed by a factory that gets the data yielded by the previous generator. So each generator can generate new data dependant on the data yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the next factory. Example: Lets say for every number from 0 to 4, we want to count up to that number. Then we can do something like this using list comprehensions: >>> [i for n in range(1, 5) for i in range(1, n + 1)] [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] You can use this function to achieve the same thing: >>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1)))) [1, 1, 2, 1, 2, 3, 1, 2, 3, 4] The advantage is, that this is independent of the number of dependant generators you have. Also, this function does not use recursion so it is safe to use even with large generator counts. Args: initial_data: The initial data that is passed to the first generator factory. *factories: The generator factories. Each of them gets passed its predecessors data and has to return an iterable. The data from this iterable is passed to the next factory. Yields: Every data item yielded by the generators of the final factory.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L477-L532
HPAC/matchpy
matchpy/expressions/substitution.py
Substitution.try_add_variable
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None: """Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the old replacement for the variable_name was unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it: >>> subst = Substitution({'x': Multiset(['a', 'b'])}) >>> subst.try_add_variable('x', ('a', 'b')) >>> print(subst) {x ↦ (a, b)} Args: variable: The name of the variable to add. replacement: The replacement for the variable. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable_name. """ if variable_name not in self: self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement else: existing_value = self[variable_name] if isinstance(existing_value, tuple): if isinstance(replacement, Multiset): if Multiset(existing_value) != replacement: raise ValueError elif replacement != existing_value: raise ValueError elif isinstance(existing_value, Multiset): if not isinstance(replacement, (tuple, list, Multiset)): raise ValueError compare_value = Multiset(replacement) if existing_value == compare_value: if not isinstance(replacement, Multiset): self[variable_name] = replacement else: raise ValueError elif replacement != existing_value: raise ValueError
python
def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None: """Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the old replacement for the variable_name was unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it: >>> subst = Substitution({'x': Multiset(['a', 'b'])}) >>> subst.try_add_variable('x', ('a', 'b')) >>> print(subst) {x ↦ (a, b)} Args: variable: The name of the variable to add. replacement: The replacement for the variable. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable_name. """ if variable_name not in self: self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement else: existing_value = self[variable_name] if isinstance(existing_value, tuple): if isinstance(replacement, Multiset): if Multiset(existing_value) != replacement: raise ValueError elif replacement != existing_value: raise ValueError elif isinstance(existing_value, Multiset): if not isinstance(replacement, (tuple, list, Multiset)): raise ValueError compare_value = Multiset(replacement) if existing_value == compare_value: if not isinstance(replacement, Multiset): self[variable_name] = replacement else: raise ValueError elif replacement != existing_value: raise ValueError
Try to add the variable with its replacement to the substitution. This considers an existing replacement and will only succeed if the new replacement can be merged with the old replacement. Merging can occur if either the two replacements are equivalent. Replacements can also be merged if the old replacement for the variable_name was unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it: >>> subst = Substitution({'x': Multiset(['a', 'b'])}) >>> subst.try_add_variable('x', ('a', 'b')) >>> print(subst) {x ↦ (a, b)} Args: variable: The name of the variable to add. replacement: The replacement for the variable. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable_name.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L32-L77
HPAC/matchpy
matchpy/expressions/substitution.py
Substitution.union_with_variable
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution': """Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable. """ new_subst = Substitution(self) new_subst.try_add_variable(variable, replacement) return new_subst
python
def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution': """Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable. """ new_subst = Substitution(self) new_subst.try_add_variable(variable, replacement) return new_subst
Try to create a new substitution with the given variable added. See :meth:`try_add_variable` for a version of this method that modifies the substitution in place. Args: variable_name: The name of the variable to add. replacement: The substitution for the variable. Returns: The new substitution with the variable_name added or merged. Raises: ValueError: if the variable cannot be merged because it conflicts with the existing substitution for the variable.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L79-L101
HPAC/matchpy
matchpy/expressions/substitution.py
Substitution.extract_substitution
def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool: """Extract the variable substitution for the given pattern and subject. This assumes that subject and pattern already match when being considered as linear. Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here. All that this method does is checking whether all the substitutions for the variables can be unified. So, in case it returns ``False``, the substitution is invalid for the match. ..warning:: This method mutates the substitution and will even do so in case the extraction fails. Create a copy before using this method if you need to preserve the original substitution. Example: With an empty initial substitution and a linear pattern, the extraction will always succeed: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, y_)) True >>> print(subst) {x ↦ a, y ↦ b} Clashing values for existing variables will fail: >>> subst.extract_substitution(b, x_) False For non-linear patterns, the extraction can also fail with an empty substitution: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, x_)) False >>> print(subst) {x ↦ a} Note that the initial substitution got mutated even though the extraction failed! Args: subject: A :term:`syntactic` subject that matches the pattern. pattern: A :term:`syntactic` pattern that matches the subject. Returns: ``True`` iff the substitution could be extracted successfully. """ if getattr(pattern, 'variable_name', False): try: self.try_add_variable(pattern.variable_name, subject) except ValueError: return False return True elif isinstance(pattern, expressions.Operation): assert isinstance(subject, type(pattern)) assert op_len(subject) == op_len(pattern) op_expression = cast(expressions.Operation, subject) for subj, patt in zip(op_iter(op_expression), op_iter(pattern)): if not self.extract_substitution(subj, patt): return False return True
python
def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool: """Extract the variable substitution for the given pattern and subject. This assumes that subject and pattern already match when being considered as linear. Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here. All that this method does is checking whether all the substitutions for the variables can be unified. So, in case it returns ``False``, the substitution is invalid for the match. ..warning:: This method mutates the substitution and will even do so in case the extraction fails. Create a copy before using this method if you need to preserve the original substitution. Example: With an empty initial substitution and a linear pattern, the extraction will always succeed: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, y_)) True >>> print(subst) {x ↦ a, y ↦ b} Clashing values for existing variables will fail: >>> subst.extract_substitution(b, x_) False For non-linear patterns, the extraction can also fail with an empty substitution: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, x_)) False >>> print(subst) {x ↦ a} Note that the initial substitution got mutated even though the extraction failed! Args: subject: A :term:`syntactic` subject that matches the pattern. pattern: A :term:`syntactic` pattern that matches the subject. Returns: ``True`` iff the substitution could be extracted successfully. """ if getattr(pattern, 'variable_name', False): try: self.try_add_variable(pattern.variable_name, subject) except ValueError: return False return True elif isinstance(pattern, expressions.Operation): assert isinstance(subject, type(pattern)) assert op_len(subject) == op_len(pattern) op_expression = cast(expressions.Operation, subject) for subj, patt in zip(op_iter(op_expression), op_iter(pattern)): if not self.extract_substitution(subj, patt): return False return True
Extract the variable substitution for the given pattern and subject. This assumes that subject and pattern already match when being considered as linear. Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here. All that this method does is checking whether all the substitutions for the variables can be unified. So, in case it returns ``False``, the substitution is invalid for the match. ..warning:: This method mutates the substitution and will even do so in case the extraction fails. Create a copy before using this method if you need to preserve the original substitution. Example: With an empty initial substitution and a linear pattern, the extraction will always succeed: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, y_)) True >>> print(subst) {x ↦ a, y ↦ b} Clashing values for existing variables will fail: >>> subst.extract_substitution(b, x_) False For non-linear patterns, the extraction can also fail with an empty substitution: >>> subst = Substitution() >>> subst.extract_substitution(f(a, b), f(x_, x_)) False >>> print(subst) {x ↦ a} Note that the initial substitution got mutated even though the extraction failed! Args: subject: A :term:`syntactic` subject that matches the pattern. pattern: A :term:`syntactic` pattern that matches the subject. Returns: ``True`` iff the substitution could be extracted successfully.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L103-L164
HPAC/matchpy
matchpy/expressions/substitution.py
Substitution.union
def union(self, *others: 'Substitution') -> 'Substitution': """Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a}) >>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )}) >>> print(subst1.union(subst2)) {x ↦ (a, b), y ↦ (c), z ↦ a} Args: others: The other substitutions to merge with this one. Returns: The new substitution with the other substitutions merged. Raises: ValueError: if a variable occurs in multiple substitutions but cannot be merged because the substitutions conflict. """ new_subst = Substitution(self) for other in others: for variable_name, replacement in other.items(): new_subst.try_add_variable(variable_name, replacement) return new_subst
python
def union(self, *others: 'Substitution') -> 'Substitution': """Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a}) >>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )}) >>> print(subst1.union(subst2)) {x ↦ (a, b), y ↦ (c), z ↦ a} Args: others: The other substitutions to merge with this one. Returns: The new substitution with the other substitutions merged. Raises: ValueError: if a variable occurs in multiple substitutions but cannot be merged because the substitutions conflict. """ new_subst = Substitution(self) for other in others: for variable_name, replacement in other.items(): new_subst.try_add_variable(variable_name, replacement) return new_subst
Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a}) >>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )}) >>> print(subst1.union(subst2)) {x ↦ (a, b), y ↦ (c), z ↦ a} Args: others: The other substitutions to merge with this one. Returns: The new substitution with the other substitutions merged. Raises: ValueError: if a variable occurs in multiple substitutions but cannot be merged because the substitutions conflict.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L166-L197
HPAC/matchpy
matchpy/expressions/substitution.py
Substitution.rename
def rename(self, renaming: Dict[str, str]) -> 'Substitution': """Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old variable names to new ones. Returns: A copy of the substitution where variable names have been replaced according to the given renaming dictionary. Names that are not contained in the dictionary are left unchanged. """ return Substitution((renaming.get(name, name), value) for name, value in self.items())
python
def rename(self, renaming: Dict[str, str]) -> 'Substitution': """Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old variable names to new ones. Returns: A copy of the substitution where variable names have been replaced according to the given renaming dictionary. Names that are not contained in the dictionary are left unchanged. """ return Substitution((renaming.get(name, name), value) for name, value in self.items())
Return a copy of the substitution with renamed variables. Example: Rename the variable *x* to *y*: >>> subst = Substitution({'x': a}) >>> subst.rename({'x': 'y'}) {'y': Symbol('a')} Args: renaming: A dictionary mapping old variable names to new ones. Returns: A copy of the substitution where variable names have been replaced according to the given renaming dictionary. Names that are not contained in the dictionary are left unchanged.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L199-L218
HPAC/matchpy
matchpy/matching/syntactic.py
is_operation
def is_operation(term: Any) -> bool: """Return True iff the given term is a subclass of :class:`.Operation`.""" return isinstance(term, type) and issubclass(term, Operation)
python
def is_operation(term: Any) -> bool: """Return True iff the given term is a subclass of :class:`.Operation`.""" return isinstance(term, type) and issubclass(term, Operation)
Return True iff the given term is a subclass of :class:`.Operation`.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L40-L42
HPAC/matchpy
matchpy/matching/syntactic.py
is_symbol_wildcard
def is_symbol_wildcard(term: Any) -> bool: """Return True iff the given term is a subclass of :class:`.Symbol`.""" return isinstance(term, type) and issubclass(term, Symbol)
python
def is_symbol_wildcard(term: Any) -> bool: """Return True iff the given term is a subclass of :class:`.Symbol`.""" return isinstance(term, type) and issubclass(term, Symbol)
Return True iff the given term is a subclass of :class:`.Symbol`.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L45-L47
HPAC/matchpy
matchpy/matching/syntactic.py
_get_symbol_wildcard_label
def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]: """Return the transition target for the given symbol type from the the given state or None if it does not exist.""" return next((t for t in state.keys() if is_symbol_wildcard(t) and isinstance(symbol, t)), None)
python
def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]: """Return the transition target for the given symbol type from the the given state or None if it does not exist.""" return next((t for t in state.keys() if is_symbol_wildcard(t) and isinstance(symbol, t)), None)
Return the transition target for the given symbol type from the the given state or None if it does not exist.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L50-L52
HPAC/matchpy
matchpy/matching/syntactic.py
_term_str
def _term_str(term: TermAtom) -> str: # pragma: no cover """Return a string representation of a term atom.""" if is_operation(term): return term.name + '(' elif is_symbol_wildcard(term): return '*{!s}'.format(term.__name__) elif isinstance(term, Wildcard): return '*{!s}{!s}'.format(term.min_count, (not term.fixed_size) and '+' or '') elif term == Wildcard: return '*' else: return str(term)
python
def _term_str(term: TermAtom) -> str: # pragma: no cover """Return a string representation of a term atom.""" if is_operation(term): return term.name + '(' elif is_symbol_wildcard(term): return '*{!s}'.format(term.__name__) elif isinstance(term, Wildcard): return '*{!s}{!s}'.format(term.min_count, (not term.fixed_size) and '+' or '') elif term == Wildcard: return '*' else: return str(term)
Return a string representation of a term atom.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L200-L211
HPAC/matchpy
matchpy/matching/syntactic.py
FlatTerm.is_syntactic
def is_syntactic(self): """True, iff the flatterm is :term:`syntactic`.""" for term in self._terms: if isinstance(term, Wildcard) and not term.fixed_size: return False if is_operation(term) and issubclass(term, (AssociativeOperation, CommutativeOperation)): return False return True
python
def is_syntactic(self): """True, iff the flatterm is :term:`syntactic`.""" for term in self._terms: if isinstance(term, Wildcard) and not term.fixed_size: return False if is_operation(term) and issubclass(term, (AssociativeOperation, CommutativeOperation)): return False return True
True, iff the flatterm is :term:`syntactic`.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L131-L138
HPAC/matchpy
matchpy/matching/syntactic.py
FlatTerm.merged
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm': """Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms. """ return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))
python
def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm': """Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms. """ return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty())))
Concatenate the given flatterms to a single flatterm. Args: *flatterms: The flatterms which are concatenated. Returns: The concatenated flatterms.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L146-L156
HPAC/matchpy
matchpy/matching/syntactic.py
FlatTerm._flatterm_iter
def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]: """Generator that yields the atoms of the expressions in prefix notation with operation end markers.""" if isinstance(expression, Operation): yield type(expression) for operand in op_iter(expression): yield from cls._flatterm_iter(operand) yield OPERATION_END elif isinstance(expression, SymbolWildcard): yield expression.symbol_type elif isinstance(expression, (Symbol, Wildcard)): yield expression else: assert False, "Unreachable unless a new unsupported expression type is added."
python
def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]: """Generator that yields the atoms of the expressions in prefix notation with operation end markers.""" if isinstance(expression, Operation): yield type(expression) for operand in op_iter(expression): yield from cls._flatterm_iter(operand) yield OPERATION_END elif isinstance(expression, SymbolWildcard): yield expression.symbol_type elif isinstance(expression, (Symbol, Wildcard)): yield expression else: assert False, "Unreachable unless a new unsupported expression type is added."
Generator that yields the atoms of the expressions in prefix notation with operation end markers.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L159-L171
HPAC/matchpy
matchpy/matching/syntactic.py
FlatTerm._combined_wildcards_iter
def _combined_wildcards_iter(flatterm: Iterator[TermAtom]) -> Iterator[TermAtom]: """Combine consecutive wildcards in a flatterm into a single one.""" last_wildcard = None # type: Optional[Wildcard] for term in flatterm: if isinstance(term, Wildcard) and not isinstance(term, SymbolWildcard): if last_wildcard is not None: new_min_count = last_wildcard.min_count + term.min_count new_fixed_size = last_wildcard.fixed_size and term.fixed_size last_wildcard = Wildcard(new_min_count, new_fixed_size) else: last_wildcard = Wildcard(term.min_count, term.fixed_size) else: if last_wildcard is not None: yield last_wildcard last_wildcard = None yield term if last_wildcard is not None: yield last_wildcard
python
def _combined_wildcards_iter(flatterm: Iterator[TermAtom]) -> Iterator[TermAtom]: """Combine consecutive wildcards in a flatterm into a single one.""" last_wildcard = None # type: Optional[Wildcard] for term in flatterm: if isinstance(term, Wildcard) and not isinstance(term, SymbolWildcard): if last_wildcard is not None: new_min_count = last_wildcard.min_count + term.min_count new_fixed_size = last_wildcard.fixed_size and term.fixed_size last_wildcard = Wildcard(new_min_count, new_fixed_size) else: last_wildcard = Wildcard(term.min_count, term.fixed_size) else: if last_wildcard is not None: yield last_wildcard last_wildcard = None yield term if last_wildcard is not None: yield last_wildcard
Combine consecutive wildcards in a flatterm into a single one.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L174-L191
HPAC/matchpy
matchpy/matching/syntactic.py
_StateQueueItem.labels
def labels(self) -> Set[TransitionLabel]: """Return the set of transition labels to examine for this queue state. This is the union of the transition label sets for both states. However, if one of the states is fixed, it is excluded from this union and a wildcard transition is included instead. Also, when already in a failed state (one of the states is ``None``), the :const:`OPERATION_END` is also included. """ labels = set() # type: Set[TransitionLabel] if self.state1 is not None and self.fixed != 1: labels.update(self.state1.keys()) if self.state2 is not None and self.fixed != 2: labels.update(self.state2.keys()) if self.fixed != 0: if self.fixed == 1 and self.state2 is None: labels.add(OPERATION_END) elif self.fixed == 2 and self.state1 is None: labels.add(OPERATION_END) labels.add(Wildcard) return labels
python
def labels(self) -> Set[TransitionLabel]: """Return the set of transition labels to examine for this queue state. This is the union of the transition label sets for both states. However, if one of the states is fixed, it is excluded from this union and a wildcard transition is included instead. Also, when already in a failed state (one of the states is ``None``), the :const:`OPERATION_END` is also included. """ labels = set() # type: Set[TransitionLabel] if self.state1 is not None and self.fixed != 1: labels.update(self.state1.keys()) if self.state2 is not None and self.fixed != 2: labels.update(self.state2.keys()) if self.fixed != 0: if self.fixed == 1 and self.state2 is None: labels.add(OPERATION_END) elif self.fixed == 2 and self.state1 is None: labels.add(OPERATION_END) labels.add(Wildcard) return labels
Return the set of transition labels to examine for this queue state. This is the union of the transition label sets for both states. However, if one of the states is fixed, it is excluded from this union and a wildcard transition is included instead. Also, when already in a failed state (one of the states is ``None``), the :const:`OPERATION_END` is also included.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L280-L299
HPAC/matchpy
matchpy/matching/syntactic.py
DiscriminationNet.add
def add(self, pattern: Union[Pattern, FlatTerm], final_label: T=None) -> int: """Add a pattern to the discrimination net. Args: pattern: The pattern which is added to the DiscriminationNet. If an expression is given, it will be converted to a `FlatTerm` for internal processing. You can also pass a `FlatTerm` directly. final_label: A label that is returned if the pattern matches when using :meth:`match`. This will default to the pattern itself. Returns: The index of the newly added pattern. This is used internally to later to get the pattern and its final label once a match is found. """ index = len(self._patterns) self._patterns.append((pattern, final_label)) flatterm = FlatTerm(pattern.expression) if not isinstance(pattern, FlatTerm) else pattern if flatterm.is_syntactic or len(flatterm) == 1: net = self._generate_syntactic_net(flatterm, index) else: net = self._generate_net(flatterm, index) if self._root: self._root = self._product_net(self._root, net) else: self._root = net return index
python
def add(self, pattern: Union[Pattern, FlatTerm], final_label: T=None) -> int: """Add a pattern to the discrimination net. Args: pattern: The pattern which is added to the DiscriminationNet. If an expression is given, it will be converted to a `FlatTerm` for internal processing. You can also pass a `FlatTerm` directly. final_label: A label that is returned if the pattern matches when using :meth:`match`. This will default to the pattern itself. Returns: The index of the newly added pattern. This is used internally to later to get the pattern and its final label once a match is found. """ index = len(self._patterns) self._patterns.append((pattern, final_label)) flatterm = FlatTerm(pattern.expression) if not isinstance(pattern, FlatTerm) else pattern if flatterm.is_syntactic or len(flatterm) == 1: net = self._generate_syntactic_net(flatterm, index) else: net = self._generate_net(flatterm, index) if self._root: self._root = self._product_net(self._root, net) else: self._root = net return index
Add a pattern to the discrimination net. Args: pattern: The pattern which is added to the DiscriminationNet. If an expression is given, it will be converted to a `FlatTerm` for internal processing. You can also pass a `FlatTerm` directly. final_label: A label that is returned if the pattern matches when using :meth:`match`. This will default to the pattern itself. Returns: The index of the newly added pattern. This is used internally to later to get the pattern and its final label once a match is found.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L329-L356
HPAC/matchpy
matchpy/matching/syntactic.py
DiscriminationNet._generate_net
def _generate_net(cls, flatterm: FlatTerm, final_label: T) -> _State[T]: """Generates a DFA matching the given pattern.""" # Capture the last sequence wildcard for every level of operation nesting on a stack # Used to add backtracking edges in case the "match" fails later last_wildcards = [None] # Generate a fail state for every level of nesting to backtrack to a sequence wildcard in a parent Expression # in case no match can be found fail_states = [None] operand_counts = [0] root = state = _State() states = {root.id: root} for term in flatterm: if operand_counts[-1] >= 0: operand_counts[-1] += 1 # For wildcards, generate a chain of #min_count Wildcard edges # If the wildcard is unbounded (fixed_size = False), # add a wildcard self loop at the end if isinstance(term, Wildcard): # Generate a chain of #min_count Wildcard edges for _ in range(term.min_count): state = cls._create_child_state(state, Wildcard) states[state.id] = state # If it is a sequence wildcard, add a self loop if not term.fixed_size: state[Wildcard] = state last_wildcards[-1] = state operand_counts[-1] = -1 else: state = cls._create_child_state(state, term) states[state.id] = state if is_operation(term): fail_state = None if last_wildcards[-1] or fail_states[-1]: last_fail_state = ( fail_states[-1] if not isinstance(fail_states[-1], list) else fail_states[-1][operand_counts[-1]] ) if term.arity.fixed_size: fail_state = _State() states[fail_state.id] = fail_state new_fail_states = [fail_state] for _ in range(term.arity.min_count): new_fail_state = _State() states[new_fail_state.id] = new_fail_state fail_state[Wildcard] = new_fail_state fail_state = new_fail_state new_fail_states.append(new_fail_state) fail_state[OPERATION_END] = last_wildcards[-1] or last_fail_state fail_state = new_fail_states else: fail_state = _State() states[fail_state.id] = fail_state fail_state[OPERATION_END] = last_wildcards[-1] or last_fail_state fail_state[Wildcard] = fail_state fail_states.append(fail_state) last_wildcards.append(None) operand_counts.append(0) elif term == OPERATION_END: fail_states.pop() last_wildcards.pop() operand_counts.pop() if last_wildcards[-1] != state: if last_wildcards[-1]: state[EPSILON] = last_wildcards[-1] elif fail_states[-1]: last_fail_state = ( fail_states[-1] if not isinstance(fail_states[-1], list) else fail_states[-1][operand_counts[-1]] ) state[EPSILON] = last_fail_state state.payload = [final_label] return cls._convert_nfa_to_dfa(root, states)
python
def _generate_net(cls, flatterm: FlatTerm, final_label: T) -> _State[T]: """Generates a DFA matching the given pattern.""" # Capture the last sequence wildcard for every level of operation nesting on a stack # Used to add backtracking edges in case the "match" fails later last_wildcards = [None] # Generate a fail state for every level of nesting to backtrack to a sequence wildcard in a parent Expression # in case no match can be found fail_states = [None] operand_counts = [0] root = state = _State() states = {root.id: root} for term in flatterm: if operand_counts[-1] >= 0: operand_counts[-1] += 1 # For wildcards, generate a chain of #min_count Wildcard edges # If the wildcard is unbounded (fixed_size = False), # add a wildcard self loop at the end if isinstance(term, Wildcard): # Generate a chain of #min_count Wildcard edges for _ in range(term.min_count): state = cls._create_child_state(state, Wildcard) states[state.id] = state # If it is a sequence wildcard, add a self loop if not term.fixed_size: state[Wildcard] = state last_wildcards[-1] = state operand_counts[-1] = -1 else: state = cls._create_child_state(state, term) states[state.id] = state if is_operation(term): fail_state = None if last_wildcards[-1] or fail_states[-1]: last_fail_state = ( fail_states[-1] if not isinstance(fail_states[-1], list) else fail_states[-1][operand_counts[-1]] ) if term.arity.fixed_size: fail_state = _State() states[fail_state.id] = fail_state new_fail_states = [fail_state] for _ in range(term.arity.min_count): new_fail_state = _State() states[new_fail_state.id] = new_fail_state fail_state[Wildcard] = new_fail_state fail_state = new_fail_state new_fail_states.append(new_fail_state) fail_state[OPERATION_END] = last_wildcards[-1] or last_fail_state fail_state = new_fail_states else: fail_state = _State() states[fail_state.id] = fail_state fail_state[OPERATION_END] = last_wildcards[-1] or last_fail_state fail_state[Wildcard] = fail_state fail_states.append(fail_state) last_wildcards.append(None) operand_counts.append(0) elif term == OPERATION_END: fail_states.pop() last_wildcards.pop() operand_counts.pop() if last_wildcards[-1] != state: if last_wildcards[-1]: state[EPSILON] = last_wildcards[-1] elif fail_states[-1]: last_fail_state = ( fail_states[-1] if not isinstance(fail_states[-1], list) else fail_states[-1][operand_counts[-1]] ) state[EPSILON] = last_fail_state state.payload = [final_label] return cls._convert_nfa_to_dfa(root, states)
Generates a DFA matching the given pattern.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L385-L461
HPAC/matchpy
matchpy/matching/syntactic.py
DiscriminationNet.match
def match(self, subject: Union[Expression, FlatTerm]) -> Iterator[Tuple[T, Substitution]]: """Match the given subject against all patterns in the net. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(final label, substitution)`, where the first component is the final label associated with the pattern as given when using :meth:`add()` and the second one is the match substitution. """ for index in self._match(subject): pattern, label = self._patterns[index] subst = Substitution() if subst.extract_substitution(subject, pattern.expression): for constraint in pattern.constraints: if not constraint(subst): break else: yield label, subst
python
def match(self, subject: Union[Expression, FlatTerm]) -> Iterator[Tuple[T, Substitution]]: """Match the given subject against all patterns in the net. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(final label, substitution)`, where the first component is the final label associated with the pattern as given when using :meth:`add()` and the second one is the match substitution. """ for index in self._match(subject): pattern, label = self._patterns[index] subst = Substitution() if subst.extract_substitution(subject, pattern.expression): for constraint in pattern.constraints: if not constraint(subst): break else: yield label, subst
Match the given subject against all patterns in the net. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(final label, substitution)`, where the first component is the final label associated with the pattern as given when using :meth:`add()` and the second one is the match substitution.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L645-L664
HPAC/matchpy
matchpy/matching/syntactic.py
DiscriminationNet.is_match
def is_match(self, subject: Union[Expression, FlatTerm]) -> bool: """Check if the given subject matches any pattern in the net. Args: subject: The subject that is matched. Must be constant. Returns: True, if any pattern matches the subject. """ try: next(self.match(subject)) except StopIteration: return False return True
python
def is_match(self, subject: Union[Expression, FlatTerm]) -> bool: """Check if the given subject matches any pattern in the net. Args: subject: The subject that is matched. Must be constant. Returns: True, if any pattern matches the subject. """ try: next(self.match(subject)) except StopIteration: return False return True
Check if the given subject matches any pattern in the net. Args: subject: The subject that is matched. Must be constant. Returns: True, if any pattern matches the subject.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L666-L680
HPAC/matchpy
matchpy/matching/syntactic.py
DiscriminationNet.as_graph
def as_graph(self) -> Digraph: # pragma: no cover """Renders the discrimination net as graphviz digraph.""" if Digraph is None: raise ImportError('The graphviz package is required to draw the graph.') dot = Digraph() nodes = set() queue = [self._root] while queue: state = queue.pop(0) if not state.payload: dot.node('n{!s}'.format(state.id), '', {'shape': ('circle' if state else 'doublecircle')}) else: dot.node('n{!s}'.format(state.id), '\n'.join(map(str, state.payload)), {'shape': 'box'}) for next_state in state.values(): if next_state.id not in nodes: queue.append(next_state) nodes.add(state.id) nodes = set() queue = [self._root] while queue: state = queue.pop(0) if state.id in nodes: continue nodes.add(state.id) for (label, other) in state.items(): dot.edge('n{!s}'.format(state.id), 'n{!s}'.format(other.id), _term_str(label)) if other.id not in nodes: queue.append(other) return dot
python
def as_graph(self) -> Digraph: # pragma: no cover """Renders the discrimination net as graphviz digraph.""" if Digraph is None: raise ImportError('The graphviz package is required to draw the graph.') dot = Digraph() nodes = set() queue = [self._root] while queue: state = queue.pop(0) if not state.payload: dot.node('n{!s}'.format(state.id), '', {'shape': ('circle' if state else 'doublecircle')}) else: dot.node('n{!s}'.format(state.id), '\n'.join(map(str, state.payload)), {'shape': 'box'}) for next_state in state.values(): if next_state.id not in nodes: queue.append(next_state) nodes.add(state.id) nodes = set() queue = [self._root] while queue: state = queue.pop(0) if state.id in nodes: continue nodes.add(state.id) for (label, other) in state.items(): dot.edge('n{!s}'.format(state.id), 'n{!s}'.format(other.id), _term_str(label)) if other.id not in nodes: queue.append(other) return dot
Renders the discrimination net as graphviz digraph.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L682-L715
HPAC/matchpy
matchpy/matching/syntactic.py
SequenceMatcher.add
def add(self, pattern: Pattern) -> int: """Add a pattern that will be recognized by the matcher. Args: pattern: The pattern to add. Returns: An internal index for the pattern. Raises: ValueError: If the pattern does not have the correct form. TypeError: If the pattern is not a non-commutative operation. """ inner = pattern.expression if self.operation is None: if not isinstance(inner, Operation) or isinstance(inner, CommutativeOperation): raise TypeError("Pattern must be a non-commutative operation.") self.operation = type(inner) elif not isinstance(inner, self.operation): raise TypeError( "All patterns must be the same operation, expected {} but got {}".format(self.operation, type(inner)) ) if op_len(inner) < 3: raise ValueError("Pattern has not enough operands.") operands = list(op_iter(inner)) first_name = self._check_wildcard_and_get_name(operands[0]) last_name = self._check_wildcard_and_get_name(operands[-1]) index = len(self._patterns) self._patterns.append((pattern, first_name, last_name)) flatterm = FlatTerm.merged(*(FlatTerm(o) for o in operands[1:-1])) self._net.add(flatterm, index) return index
python
def add(self, pattern: Pattern) -> int: """Add a pattern that will be recognized by the matcher. Args: pattern: The pattern to add. Returns: An internal index for the pattern. Raises: ValueError: If the pattern does not have the correct form. TypeError: If the pattern is not a non-commutative operation. """ inner = pattern.expression if self.operation is None: if not isinstance(inner, Operation) or isinstance(inner, CommutativeOperation): raise TypeError("Pattern must be a non-commutative operation.") self.operation = type(inner) elif not isinstance(inner, self.operation): raise TypeError( "All patterns must be the same operation, expected {} but got {}".format(self.operation, type(inner)) ) if op_len(inner) < 3: raise ValueError("Pattern has not enough operands.") operands = list(op_iter(inner)) first_name = self._check_wildcard_and_get_name(operands[0]) last_name = self._check_wildcard_and_get_name(operands[-1]) index = len(self._patterns) self._patterns.append((pattern, first_name, last_name)) flatterm = FlatTerm.merged(*(FlatTerm(o) for o in operands[1:-1])) self._net.add(flatterm, index) return index
Add a pattern that will be recognized by the matcher. Args: pattern: The pattern to add. Returns: An internal index for the pattern. Raises: ValueError: If the pattern does not have the correct form. TypeError: If the pattern is not a non-commutative operation.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L750-L790
HPAC/matchpy
matchpy/matching/syntactic.py
SequenceMatcher.can_match
def can_match(cls, pattern: Pattern) -> bool: """Check if a pattern can be matched with a sequence matcher. Args: pattern: The pattern to check. Returns: True, iff the pattern can be matched with a sequence matcher. """ if not isinstance(pattern.expression, Operation) or isinstance(pattern.expression, CommutativeOperation): return False if op_len(pattern.expression) < 3: return False first, *_, last = op_iter(pattern.expression) try: cls._check_wildcard_and_get_name(first) cls._check_wildcard_and_get_name(last) except ValueError: return False return True
python
def can_match(cls, pattern: Pattern) -> bool: """Check if a pattern can be matched with a sequence matcher. Args: pattern: The pattern to check. Returns: True, iff the pattern can be matched with a sequence matcher. """ if not isinstance(pattern.expression, Operation) or isinstance(pattern.expression, CommutativeOperation): return False if op_len(pattern.expression) < 3: return False first, *_, last = op_iter(pattern.expression) try: cls._check_wildcard_and_get_name(first) cls._check_wildcard_and_get_name(last) except ValueError: return False return True
Check if a pattern can be matched with a sequence matcher. Args: pattern: The pattern to check. Returns: True, iff the pattern can be matched with a sequence matcher.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L800-L824
HPAC/matchpy
matchpy/matching/syntactic.py
SequenceMatcher.match
def match(self, subject: Expression) -> Iterator[Tuple[Pattern, Substitution]]: """Match the given subject against all patterns in the sequence matcher. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(pattern, substitution)` for every matching pattern. """ if not isinstance(subject, self.operation): return subjects = list(op_iter(subject)) flatterms = [FlatTerm(o) for o in subjects] for i in range(len(flatterms)): flatterm = FlatTerm.merged(*flatterms[i:]) for index in self._net._match(flatterm, collect=True): match_index = self._net._patterns[index][1] pattern, first_name, last_name = self._patterns[match_index] operand_count = op_len(pattern.expression) - 2 expr_operands = subjects[i:i + operand_count] patt_operands = list(op_iter(pattern.expression))[1:-1] substitution = Substitution() if not all(itertools.starmap(substitution.extract_substitution, zip(expr_operands, patt_operands))): continue try: if first_name is not None: substitution.try_add_variable(first_name, tuple(subjects[:i])) if last_name is not None: substitution.try_add_variable(last_name, tuple(subjects[i + operand_count:])) except ValueError: continue for constraint in pattern.constraints: if not constraint(substitution): break else: yield pattern, substitution
python
def match(self, subject: Expression) -> Iterator[Tuple[Pattern, Substitution]]: """Match the given subject against all patterns in the sequence matcher. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(pattern, substitution)` for every matching pattern. """ if not isinstance(subject, self.operation): return subjects = list(op_iter(subject)) flatterms = [FlatTerm(o) for o in subjects] for i in range(len(flatterms)): flatterm = FlatTerm.merged(*flatterms[i:]) for index in self._net._match(flatterm, collect=True): match_index = self._net._patterns[index][1] pattern, first_name, last_name = self._patterns[match_index] operand_count = op_len(pattern.expression) - 2 expr_operands = subjects[i:i + operand_count] patt_operands = list(op_iter(pattern.expression))[1:-1] substitution = Substitution() if not all(itertools.starmap(substitution.extract_substitution, zip(expr_operands, patt_operands))): continue try: if first_name is not None: substitution.try_add_variable(first_name, tuple(subjects[:i])) if last_name is not None: substitution.try_add_variable(last_name, tuple(subjects[i + operand_count:])) except ValueError: continue for constraint in pattern.constraints: if not constraint(substitution): break else: yield pattern, substitution
Match the given subject against all patterns in the sequence matcher. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(pattern, substitution)` for every matching pattern.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L826-L868
HPAC/matchpy
matchpy/matching/one_to_one.py
match
def match(subject: Expression, pattern: Pattern) -> Iterator[Substitution]: r"""Tries to match the given *pattern* to the given *subject*. Yields each match in form of a substitution. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible match substitutions. Raises: ValueError: If the subject is not constant. """ if not is_constant(subject): raise ValueError("The subject for matching must be constant.") global_constraints = [c for c in pattern.constraints if not c.variables] local_constraints = set(c for c in pattern.constraints if c.variables) for subst in _match([subject], pattern.expression, Substitution(), local_constraints): for constraint in global_constraints: if not constraint(subst): break else: yield subst
python
def match(subject: Expression, pattern: Pattern) -> Iterator[Substitution]: r"""Tries to match the given *pattern* to the given *subject*. Yields each match in form of a substitution. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible match substitutions. Raises: ValueError: If the subject is not constant. """ if not is_constant(subject): raise ValueError("The subject for matching must be constant.") global_constraints = [c for c in pattern.constraints if not c.variables] local_constraints = set(c for c in pattern.constraints if c.variables) for subst in _match([subject], pattern.expression, Substitution(), local_constraints): for constraint in global_constraints: if not constraint(subst): break else: yield subst
r"""Tries to match the given *pattern* to the given *subject*. Yields each match in form of a substitution. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible match substitutions. Raises: ValueError: If the subject is not constant.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/one_to_one.py#L23-L50
HPAC/matchpy
matchpy/matching/one_to_one.py
match_anywhere
def match_anywhere(subject: Expression, pattern: Pattern) -> Iterator[Tuple[Substitution, Tuple[int, ...]]]: """Tries to match the given *pattern* to the any subexpression of the given *subject*. Yields each match in form of a substitution and a position tuple. The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself, :code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of the first child etc. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible substitution and position pairs. Raises: ValueError: If the subject is not constant. """ if not is_constant(subject): raise ValueError("The subject for matching must be constant.") for child, pos in preorder_iter_with_position(subject): if match_head(child, pattern): for subst in match(child, pattern): yield subst, pos
python
def match_anywhere(subject: Expression, pattern: Pattern) -> Iterator[Tuple[Substitution, Tuple[int, ...]]]: """Tries to match the given *pattern* to the any subexpression of the given *subject*. Yields each match in form of a substitution and a position tuple. The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself, :code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of the first child etc. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible substitution and position pairs. Raises: ValueError: If the subject is not constant. """ if not is_constant(subject): raise ValueError("The subject for matching must be constant.") for child, pos in preorder_iter_with_position(subject): if match_head(child, pattern): for subst in match(child, pattern): yield subst, pos
Tries to match the given *pattern* to the any subexpression of the given *subject*. Yields each match in form of a substitution and a position tuple. The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself, :code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of the first child etc. Parameters: subject: An subject to match. pattern: The pattern to match. Yields: All possible substitution and position pairs. Raises: ValueError: If the subject is not constant.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/one_to_one.py#L53-L79
HPAC/matchpy
matchpy/matching/one_to_one.py
_build_full_partition
def _build_full_partition( optional_parts, sequence_var_partition: Sequence[int], subjects: Sequence[Expression], operation: Operation ) -> List[Sequence[Expression]]: """Distribute subject operands among pattern operands. Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence variable gets assigned). """ i = 0 var_index = 0 opt_index = 0 result = [] for operand in op_iter(operation): wrap_associative = False if isinstance(operand, Wildcard): count = operand.min_count if operand.optional is None else 0 if not operand.fixed_size or isinstance(operation, AssociativeOperation): count += sequence_var_partition[var_index] var_index += 1 wrap_associative = operand.fixed_size and operand.min_count elif operand.optional is not None: count = optional_parts[opt_index] opt_index += 1 else: count = 1 operand_expressions = list(op_iter(subjects))[i:i + count] i += count if wrap_associative and len(operand_expressions) > wrap_associative: fixed = wrap_associative - 1 operand_expressions = tuple(operand_expressions[:fixed]) + ( create_operation_expression(operation, operand_expressions[fixed:]), ) result.append(operand_expressions) return result
python
def _build_full_partition( optional_parts, sequence_var_partition: Sequence[int], subjects: Sequence[Expression], operation: Operation ) -> List[Sequence[Expression]]: """Distribute subject operands among pattern operands. Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence variable gets assigned). """ i = 0 var_index = 0 opt_index = 0 result = [] for operand in op_iter(operation): wrap_associative = False if isinstance(operand, Wildcard): count = operand.min_count if operand.optional is None else 0 if not operand.fixed_size or isinstance(operation, AssociativeOperation): count += sequence_var_partition[var_index] var_index += 1 wrap_associative = operand.fixed_size and operand.min_count elif operand.optional is not None: count = optional_parts[opt_index] opt_index += 1 else: count = 1 operand_expressions = list(op_iter(subjects))[i:i + count] i += count if wrap_associative and len(operand_expressions) > wrap_associative: fixed = wrap_associative - 1 operand_expressions = tuple(operand_expressions[:fixed]) + ( create_operation_expression(operation, operand_expressions[fixed:]), ) result.append(operand_expressions) return result
Distribute subject operands among pattern operands. Given a partitoning for the variable part of the operands (i.e. a list of how many extra operands each sequence variable gets assigned).
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/one_to_one.py#L179-L216
HPAC/matchpy
matchpy/matching/many_to_one.py
_MatchIter.grouped
def grouped(self): """ Yield the matches grouped by their final state in the automaton, i.e. structurally identical patterns only differing in constraints will be yielded together. Each group is yielded as a list of tuples consisting of a pattern and a match substitution. Yields: The grouped matches. """ for _ in self._match(self.matcher.root): yield list(self._internal_iter())
python
def grouped(self): """ Yield the matches grouped by their final state in the automaton, i.e. structurally identical patterns only differing in constraints will be yielded together. Each group is yielded as a list of tuples consisting of a pattern and a match substitution. Yields: The grouped matches. """ for _ in self._match(self.matcher.root): yield list(self._internal_iter())
Yield the matches grouped by their final state in the automaton, i.e. structurally identical patterns only differing in constraints will be yielded together. Each group is yielded as a list of tuples consisting of a pattern and a match substitution. Yields: The grouped matches.
https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/many_to_one.py#L102-L112